file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
0005_vote.py | # Generated by Django 3.2.3 on 2021-05-19 19:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
| dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0004_alter_comment_reply'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='posts.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to=settings.AUTH_USER_MODEL)),
],
),
] |
|
policy_hmac_auth.go | //go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azappconfig
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
type hmacAuthenticationPolicy struct {
credential string
secret []byte
}
func newHmacAuthenticationPolicy(credential string, secret []byte) *hmacAuthenticationPolicy {
return &hmacAuthenticationPolicy{
credential: credential,
secret: secret,
}
}
func (policy *hmacAuthenticationPolicy) Do(request *policy.Request) (*http.Response, error) {
req := request.Raw()
id := policy.credential
key := policy.secret
method := req.Method
host := req.URL.Host
pathAndQuery := req.URL.Path
if req.URL.RawQuery != "" {
pathAndQuery = pathAndQuery + "?" + req.URL.RawQuery
}
var content []byte
if req.Body != nil {
var err error
if content, err = ioutil.ReadAll(req.Body); err != nil {
return nil, err
}
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(content))
timestamp := time.Now().UTC().Format(http.TimeFormat)
contentHash, err1 := getContentHashBase64(content)
if err1 != nil {
return nil, err1
}
stringToSign := fmt.Sprintf("%s\n%s\n%s;%s;%s", strings.ToUpper(method), pathAndQuery, timestamp, host, contentHash)
signature, err2 := getHmac(stringToSign, key)
if err2 != nil {
return nil, err2
}
req.Header.Set("x-ms-content-sha256", contentHash)
req.Header.Set("Date", timestamp)
req.Header.Set("Authorization", "HMAC-SHA256 Credential="+id+", SignedHeaders=date;host;x-ms-content-sha256, Signature="+signature)
return request.Next()
}
func getContentHashBase64(content []byte) (string, error) {
hasher := sha256.New()
_, err := hasher.Write(content)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(hasher.Sum(nil)), nil
}
func getHmac(content string, key []byte) (string, error) {
hmac := hmac.New(sha256.New, key)
_, err := hmac.Write([]byte(content))
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(hmac.Sum(nil)), nil
}
func parseConnectionString(connectionString string) (endpoint string, credential string, secret []byte, err error) {
const connectionStringEndpointPrefix = "Endpoint="
const connectionStringCredentialPrefix = "Id="
const connectionStringSecretPrefix = "Secret="
var er error = errors.New("error parsing connection string")
var ept *string
var cred *string
var sec *[]byte
for _, seg := range strings.Split(connectionString, ";") {
if strings.HasPrefix(seg, connectionStringEndpointPrefix) {
if ept != nil {
return "", "", []byte{}, er
}
| ept = &ep
} else if strings.HasPrefix(seg, connectionStringCredentialPrefix) {
if cred != nil {
return "", "", []byte{}, er
}
c := strings.TrimPrefix(seg, connectionStringCredentialPrefix)
cred = &c
} else if strings.HasPrefix(seg, connectionStringSecretPrefix) {
if sec != nil {
return "", "", []byte{}, er
}
s, e := base64.StdEncoding.DecodeString(strings.TrimPrefix(seg, connectionStringSecretPrefix))
if e != nil {
return "", "", []byte{}, e
}
sec = &s
}
}
if ept == nil || cred == nil || sec == nil {
return "", "", []byte{}, er
}
return *ept, *cred, *sec, nil
} | ep := strings.TrimPrefix(seg, connectionStringEndpointPrefix) |
render_macro_matchers.rs | use rustc_ast::token::{self, BinOpToken, Delimiter};
use rustc_ast::tokenstream::{TokenStream, TokenTree};
use rustc_ast_pretty::pprust::state::State as Printer;
use rustc_ast_pretty::pprust::PrintState;
use rustc_middle::ty::TyCtxt;
use rustc_session::parse::ParseSess;
use rustc_span::source_map::FilePathMapping;
use rustc_span::symbol::{kw, Ident, Symbol};
use rustc_span::Span;
/// Render a macro matcher in a format suitable for displaying to the user
/// as part of an item declaration.
pub(super) fn render_macro_matcher(tcx: TyCtxt<'_>, matcher: &TokenTree) -> String {
if let Some(snippet) = snippet_equal_to_token(tcx, matcher) {
// If the original source code is known, we display the matcher exactly
// as present in the source code.
return snippet;
}
// If the matcher is macro-generated or some other reason the source code
// snippet is not available, we attempt to nicely render the token tree.
let mut printer = Printer::new();
// If the inner ibox fits on one line, we get:
//
// macro_rules! macroname {
// (the matcher) => {...};
// }
//
// If the inner ibox gets wrapped, the cbox will break and get indented:
//
// macro_rules! macroname {
// (
// the matcher ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~!
// ) => {...};
// }
printer.cbox(8);
printer.word("(");
printer.zerobreak();
printer.ibox(0);
match matcher {
TokenTree::Delimited(_span, _delim, tts) => print_tts(&mut printer, tts),
// Matcher which is not a Delimited is unexpected and should've failed
// to compile, but we render whatever it is wrapped in parens.
TokenTree::Token(_) => print_tt(&mut printer, matcher),
}
printer.end();
printer.break_offset_if_not_bol(0, -4);
printer.word(")");
printer.end();
printer.s.eof()
}
/// Find the source snippet for this token's Span, reparse it, and return the
/// snippet if the reparsed TokenTree matches the argument TokenTree.
fn snippet_equal_to_token(tcx: TyCtxt<'_>, matcher: &TokenTree) -> Option<String> {
// Find what rustc thinks is the source snippet.
// This may not actually be anything meaningful if this matcher was itself
// generated by a macro.
let source_map = tcx.sess.source_map();
let span = matcher.span();
let snippet = source_map.span_to_snippet(span).ok()?;
// Create a Parser.
let sess = ParseSess::new(FilePathMapping::empty());
let file_name = source_map.span_to_filename(span);
let mut parser =
match rustc_parse::maybe_new_parser_from_source_str(&sess, file_name, snippet.clone()) {
Ok(parser) => parser,
Err(diagnostics) => {
drop(diagnostics);
return None;
}
};
// Reparse a single token tree.
let mut reparsed_trees = match parser.parse_all_token_trees() {
Ok(reparsed_trees) => reparsed_trees,
Err(diagnostic) => {
diagnostic.cancel();
return None;
}
};
if reparsed_trees.len() != 1 {
return None;
}
let reparsed_tree = reparsed_trees.pop().unwrap();
// Compare against the original tree.
if reparsed_tree.eq_unspanned(matcher) { Some(snippet) } else { None }
}
fn print_tt(printer: &mut Printer<'_>, tt: &TokenTree) {
match tt {
TokenTree::Token(token) => {
let token_str = printer.token_to_string(token);
printer.word(token_str);
if let token::DocComment(..) = token.kind {
printer.hardbreak()
}
}
TokenTree::Delimited(_span, delim, tts) => {
let open_delim = printer.token_kind_to_string(&token::OpenDelim(*delim));
printer.word(open_delim);
if !tts.is_empty() {
if *delim == Delimiter::Brace {
printer.space();
}
print_tts(printer, tts);
if *delim == Delimiter::Brace {
printer.space();
}
}
let close_delim = printer.token_kind_to_string(&token::CloseDelim(*delim));
printer.word(close_delim);
}
}
}
fn print_tts(printer: &mut Printer<'_>, tts: &TokenStream) {
#[derive(Copy, Clone, PartialEq)]
enum State {
Start,
Dollar,
DollarIdent,
DollarIdentColon,
DollarParen,
DollarParenSep,
Pound,
PoundBang,
Ident,
Other,
}
use State::*;
let mut state = Start;
for tt in tts.trees() {
let (needs_space, next_state) = match &tt {
TokenTree::Token(tt) => match (state, &tt.kind) {
(Dollar, token::Ident(..)) => (false, DollarIdent),
(DollarIdent, token::Colon) => (false, DollarIdentColon),
(DollarIdentColon, token::Ident(..)) => (false, Other),
(
DollarParen,
token::BinOp(BinOpToken::Plus | BinOpToken::Star) | token::Question,
) => (false, Other),
(DollarParen, _) => (false, DollarParenSep),
(DollarParenSep, token::BinOp(BinOpToken::Plus | BinOpToken::Star)) => {
(false, Other)
}
(Pound, token::Not) => (false, PoundBang),
(_, token::Ident(symbol, /* is_raw */ false))
if !usually_needs_space_between_keyword_and_open_delim(*symbol, tt.span) =>
{
(true, Ident)
}
(_, token::Comma | token::Semi) => (false, Other),
(_, token::Dollar) => (true, Dollar),
(_, token::Pound) => (true, Pound),
(_, _) => (true, Other),
},
TokenTree::Delimited(_, delim, _) => match (state, delim) {
(Dollar, Delimiter::Parenthesis) => (false, DollarParen),
(Pound | PoundBang, Delimiter::Bracket) => (false, Other),
(Ident, Delimiter::Parenthesis | Delimiter::Bracket) => (false, Other),
(_, _) => (true, Other),
},
};
if state != Start && needs_space {
printer.space();
}
print_tt(printer, tt);
state = next_state;
}
}
fn usually_needs_space_between_keyword_and_open_delim(symbol: Symbol, span: Span) -> bool {
let ident = Ident { name: symbol, span };
let is_keyword = ident.is_used_keyword() || ident.is_unused_keyword();
if !is_keyword {
// An identifier that is not a keyword usually does not need a space
// before an open delim. For example: `f(0)` or `f[0]`.
return false;
}
match symbol {
// No space after keywords that are syntactically an expression. For
// example: a tuple struct created with `let _ = Self(0, 0)`, or if
// someone has `impl Index<MyStruct> for bool` then `true[MyStruct]`.
kw::False | kw::SelfLower | kw::SelfUpper | kw::True => false,
// No space, as in `let _: fn();`
kw::Fn => false,
// No space, as in `pub(crate) type T;`
kw::Pub => false,
// No space for keywords that can end an expression, as in `fut.await()`
// where fut's Output type is `fn()`.
kw::Await => false,
// Otherwise space after keyword. Some examples:
//
// `expr as [T; 2]`
// ^
// `box (tuple,)`
// ^
// `break (tuple,)`
// ^
// `type T = dyn (Fn() -> dyn Trait) + Send;`
// ^
// `for (tuple,) in iter {}`
// ^
// `if (tuple,) == v {}`
// ^
// `impl [T] {}`
// ^
// `for x in [..] {}`
// ^
// `let () = unit;`
// ^
// `match [x, y] {...}`
// ^
// `&mut (x as T)`
// ^
// `return [];`
// ^
// `fn f<T>() where (): Into<T>`
// ^ | // `while (a + b).what() {}`
// ^
// `yield [];`
// ^
_ => true,
}
} | |
__util__.py | from functools import wraps
from inspect import Parameter, signature
from itertools import chain
from typing import Mapping, Sequence
class CallParamDict:
def __init__(
self,
func,
args: tuple,
kwargs: dict,
pos_param_names: Sequence[str],
all_params: Mapping[str, Parameter],
):
|
def __str__(self):
return f"""{self.func.__name__}({
",".join(chain(
(repr(a) for a in self.args),
(f'{k}= {v!r}' for (k,v) in self.kwargs.items()))
)
})"""
def __getitem__(self, item):
if item in self.all_arguments:
return self.all_arguments[item]
p = self.all_params[item]
if p.default != Parameter.empty:
return p.default
raise KeyError(item)
def __setitem__(self, key, value):
prev = key in self.all_arguments
self.all_arguments[key] = value
if prev:
# value was default before, nothing more needs changing
return
def with_param_dict(kwarg_name="_params"):
def decorator(func):
all_parameters: Mapping[str, Parameter] = signature(func).parameters
dest_param = all_parameters.get(kwarg_name)
if not dest_param or dest_param.kind != Parameter.KEYWORD_ONLY:
raise NameError(
"function must contain keyword-only parameter named " + kwarg_name
)
pos_names = []
for n, p in all_parameters.items():
if p.kind == Parameter.VAR_POSITIONAL:
raise TypeError(
f"with_param_dict can't a variadic argument parameter ({p})"
)
if p.kind not in (
Parameter.POSITIONAL_ONLY,
Parameter.POSITIONAL_OR_KEYWORD,
):
break
pos_names.append(n)
@wraps(func)
def wrapper(*args, **kwargs):
if kwarg_name in kwargs:
raise TypeError(f"{kwarg_name} cannot be specified outside the wrapper")
params = dict(zip(pos_names, args))
params.update(kwargs)
for k, p in all_parameters.items():
if k == kwarg_name:
continue
params.setdefault(k, p.default)
kwargs[kwarg_name] = params
return func(*args, **kwargs)
return wrapper
return decorator
| self.func = func
self.args = args
self.kwargs = kwargs
self.pos_param_names = pos_param_names
self.all_params = all_params
self.all_arguments = dict(zip(pos_param_names, args))
self.all_arguments.update(kwargs) |
flaskish.py | from __future__ import print_function
from functools import wraps
import logging
try:
import ujson as json
except ImportError:
import json
from flask import Flask as _Flask
from flask.globals import _request_ctx_stack
from werkzeug.wrappers import Response
from werkzeug.datastructures import Headers
from werkzeug.exceptions import HTTPException
_Request = _Flask.request_class
class cached_property(object):
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class ApiError(Exception):
status_code = 500
error = 'internal-error'
def __init__(self, error=None, status_code=None, **kwargs):
self.status_code = status_code or self.status_code
self.error = error or self.error
self.details = kwargs
def to_json(self):
data = {'error': self.error}
self.details and data.update(self.details)
return data
class Request(_Request):
def __init__(self, *args, **kwargs):
_Request.__init__(self, *args, **kwargs)
self._response = None
@cached_property
def response(self):
self._response = HeaderResponse()
return self._response
def | (self, response):
headers = self._response and self._response.headers
if headers:
response.headers._list.extend(headers)
return response
class HeaderResponse(Response):
def __init__(self):
self.headers = Headers()
class Flask(_Flask):
request_class = Request
def __init__(self, *args, **kwargs):
_Flask.__init__(self, *args, **kwargs)
self.url_map.strict_slashes = False
self.endpoint_counter = 0
self._logger = logging.getLogger(self.logger_name)
def route(self, rule, endpoint=None, weight=None, **options):
if weight is not None:
weight = False, -9999, weight
def decorator(func):
lendpoint = endpoint
if not lendpoint:
lendpoint = '{}_{}'.format(func.__name__, self.endpoint_counter)
self.endpoint_counter += 1
self.add_url_rule(rule, lendpoint, func, **options)
if weight:
self.url_map._rules[-1].match_compare_key = lambda: weight
return func
return decorator
def api(self, *args, **kwargs):
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
try:
result = func(*args, **kwargs)
except ApiError as e:
result = e
except HTTPException as e:
result = e
except Exception:
self.logger.exception('Unhandled error')
result = ApiError()
if isinstance(result, Response):
return result
elif isinstance(result, ApiError):
code = result.status_code
result = result.to_json()
else:
code = 200
return self.response_class(json.dumps(result, ensure_ascii=False), code,
content_type='application/json')
return self.route(*args, **kwargs)(inner)
return decorator
def process_response(self, response):
response = _request_ctx_stack.top.request.process_response(response)
return _Flask.process_response(self, response)
def print_routes(self, sort=False):
rules = self.url_map.iter_rules()
if sort:
rules = sorted(rules, key=lambda r: r.rule)
for rule in rules:
func = self.view_functions[rule.endpoint]
print('{:10} {}\t{}.{}'.format(
','.join(rule.methods),
rule.rule,
func.__module__,
func.__name__))
| process_response |
image.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images
import (
"context"
"encoding/json"
"sort"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/platforms"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Image provides the model for how containerd views container images.
type Image struct {
// Name of the image.
//
// To be pulled, it must be a reference compatible with resolvers.
//
// This field is required.
Name string
// Labels provide runtime decoration for the image record.
//
// There is no default behavior for how these labels are propagated. They
// only decorate the static metadata object.
//
// This field is optional.
Labels map[string]string
// Target describes the root content for this image. Typically, this is
// a manifest, index or manifest list.
Target ocispec.Descriptor
CreatedAt, UpdatedAt time.Time
}
// DeleteOptions provide options on image delete
type DeleteOptions struct {
Synchronous bool
}
// DeleteOpt allows configuring a delete operation
type DeleteOpt func(context.Context, *DeleteOptions) error
// SynchronousDelete is used to indicate that an image deletion and removal of
// the image resources should occur synchronously before returning a result.
func SynchronousDelete() DeleteOpt {
return func(ctx context.Context, o *DeleteOptions) error {
o.Synchronous = true
return nil
}
}
// Store and interact with images
type Store interface {
Get(ctx context.Context, name string) (Image, error)
List(ctx context.Context, filters ...string) ([]Image, error)
Create(ctx context.Context, image Image) (Image, error)
// Update will replace the data in the store with the provided image. If
// one or more fieldpaths are provided, only those fields will be updated.
Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error)
Delete(ctx context.Context, name string, opts ...DeleteOpt) error
}
// TODO(stevvooe): Many of these functions make strong platform assumptions,
// which are untrue in a lot of cases. More refactoring must be done here to
// make this work in all cases.
// Config resolves the image configuration descriptor.
//
// The caller can then use the descriptor to resolve and process the
// configuration of the image.
func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) {
return Config(ctx, provider, image.Target, platform)
}
// RootFS returns the unpacked diffids that make up and images rootfs.
//
// These are used to verify that a set of layers unpacked to the expected
// values.
func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) {
desc, err := image.Config(ctx, provider, platform)
if err != nil {
return nil, err
}
return RootFS(ctx, provider, desc)
}
// Size returns the total size of an image's packed resources.
func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) {
var size int64
return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if desc.Size < 0 {
return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
}
size += desc.Size
return nil, nil
}), FilterPlatforms(ChildrenHandler(provider), platform)), image.Target)
}
type platformManifest struct {
p *ocispec.Platform
m *ocispec.Manifest
}
// Manifest resolves a manifest from the image for the given platform.
//
// When a manifest descriptor inside of a manifest index does not have
// a platform defined, the platform from the image config is considered.
//
// If the descriptor points to a non-index manifest, then the manifest is
// unmarshalled and returned without considering the platform inside of the
// config.
//
// TODO(stevvooe): This violates the current platform agnostic approach to this
// package by returning a specific manifest type. We'll need to refactor this
// to return a manifest descriptor or decide that we want to bring the API in
// this direction because this abstraction is not needed.`
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
var (
m []platformManifest
wasIndex bool
)
if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
var manifest ocispec.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return nil, err
}
if desc.Digest != image.Digest && platform != nil {
if desc.Platform != nil && !platform.Match(*desc.Platform) {
return nil, nil
}
if desc.Platform == nil {
p, err := content.ReadBlob(ctx, provider, manifest.Config)
if err != nil {
return nil, err
}
var image ocispec.Image
if err := json.Unmarshal(p, &image); err != nil {
return nil, err
}
if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) {
return nil, nil
}
}
}
m = append(m, platformManifest{
p: desc.Platform,
m: &manifest,
})
return nil, nil
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
var idx ocispec.Index
if err := json.Unmarshal(p, &idx); err != nil {
return nil, err
}
if platform == nil {
return idx.Manifests, nil
}
var descs []ocispec.Descriptor
for _, d := range idx.Manifests {
if d.Platform == nil || platform.Match(*d.Platform) {
descs = append(descs, d)
}
}
wasIndex = true
return descs, nil
}
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
}), image); err != nil {
return ocispec.Manifest{}, err
}
if len(m) == 0 {
err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
if wasIndex {
err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest)
}
return ocispec.Manifest{}, err
}
sort.SliceStable(m, func(i, j int) bool {
if m[i].p == nil {
return false
}
if m[j].p == nil {
return true
}
return platform.Less(*m[i].p, *m[j].p)
})
return *m[0].m, nil
}
// Config resolves the image configuration descriptor using a content provided
// to resolve child resources on the image.
//
// The caller can then use the descriptor to resolve and process the | return ocispec.Descriptor{}, err
}
return manifest.Config, err
}
// Platforms returns one or more platforms supported by the image.
func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) {
var platformSpecs []ocispec.Platform
return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if desc.Platform != nil {
platformSpecs = append(platformSpecs, *desc.Platform)
return nil, ErrSkipDesc
}
switch desc.MediaType {
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
var image ocispec.Image
if err := json.Unmarshal(p, &image); err != nil {
return nil, err
}
platformSpecs = append(platformSpecs,
platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture}))
}
return nil, nil
}), ChildrenHandler(provider)), image)
}
// Check returns nil if the all components of an image are available in the
// provider for the specified platform.
//
// If available is true, the caller can assume that required represents the
// complete set of content required for the image.
//
// missing will have the components that are part of required but not avaiiable
// in the provider.
//
// If there is a problem resolving content, an error will be returned.
func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) {
mfst, err := Manifest(ctx, provider, image, platform)
if err != nil {
if errdefs.IsNotFound(err) {
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
}
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
}
// TODO(stevvooe): It is possible that referenced conponents could have
// children, but this is rare. For now, we ignore this and only verify
// that manifest components are present.
required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...)
for _, desc := range required {
ra, err := provider.ReaderAt(ctx, desc)
if err != nil {
if errdefs.IsNotFound(err) {
missing = append(missing, desc)
continue
} else {
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
}
}
ra.Close()
present = append(present, desc)
}
return true, required, present, missing, nil
}
// Children returns the immediate children of content described by the descriptor.
func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
var descs []ocispec.Descriptor
switch desc.MediaType {
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
// TODO(stevvooe): We just assume oci manifest, for now. There may be
// subtle differences from the docker version.
var manifest ocispec.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return nil, err
}
descs = append(descs, manifest.Config)
descs = append(descs, manifest.Layers...)
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
var index ocispec.Index
if err := json.Unmarshal(p, &index); err != nil {
return nil, err
}
descs = append(descs, index.Manifests...)
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip,
MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
// childless data types.
return nil, nil
default:
log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
}
return descs, nil
}
// RootFS returns the unpacked diffids that make up and images rootfs.
//
// These are used to verify that a set of layers unpacked to the expected
// values.
func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) {
p, err := content.ReadBlob(ctx, provider, configDesc)
if err != nil {
return nil, err
}
var config ocispec.Image
if err := json.Unmarshal(p, &config); err != nil {
return nil, err
}
return config.RootFS.DiffIDs, nil
}
// IsCompressedDiff returns true if mediaType is a known compressed diff media type.
// It returns false if the media type is a diff, but not compressed. If the media type
// is not a known diff type, it returns errdefs.ErrNotImplemented
func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) {
switch mediaType {
case ocispec.MediaTypeImageLayer, MediaTypeDockerSchema2Layer:
case ocispec.MediaTypeImageLayerGzip, MediaTypeDockerSchema2LayerGzip:
return true, nil
default:
// Still apply all generic media types *.tar[.+]gzip and *.tar
if strings.HasSuffix(mediaType, ".tar.gzip") || strings.HasSuffix(mediaType, ".tar+gzip") {
return true, nil
} else if !strings.HasSuffix(mediaType, ".tar") {
return false, errdefs.ErrNotImplemented
}
}
return false, nil
} | // configuration of the image.
func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) {
manifest, err := Manifest(ctx, provider, image, platform)
if err != nil { |
ajouterMotifs.js | var base_url = window.location.toString();
var tabUrl = base_url.split("public");
function cacherToutMotif(){
$(function(){
$('#motif1').toggle(false);
$('#motif2').toggle(false);
$('#motif3').toggle(false);
$('#motif4').toggle(false);
$('#motif5').toggle(false);
});
}
var nbChampMotif;
function afficherMotif(nbmotif_admission) {
$(function(){
for(var i = 1; i<=nbmotif_admission ; i++){
$('#motif'+i).toggle(true);
}
});
nbChampMotif = nbmotif_admission;
if(nbChampMotif == 1){
$('#supprimer_motif_img').toggle(false);
}
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(false);
}
if(nbChampMotif == 1){
$(".supprimerMotif1" ).replaceWith(
"<img class='supprimerMotif' src='"+tabUrl[0]+"public/images/images/sup2.png' />"
);
}
ajouterMotif();
supprimerMotif();
supprimerLeMotif1();
}
function ajouterMotif(){
$('#ajouter_motif_img').click(function(){
nbChampMotif++;
$('#motif'+(nbChampMotif)).toggle(true);
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(false);
}
if(nbChampMotif == 2){
$('#supprimer_motif_img').toggle(true);
$(".supprimerMotif" ).replaceWith(
"<img class='supprimerMotif1' style='cursor: pointer;' src='"+tabUrl[0]+"public/images/images/sup.png' title='supprimer' />"
);
supprimerLeMotif1();
}
});
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(false);
}
}
function supprimerMotif(){
$('#supprimer_motif_img').click(function(){
$("#motif_admission"+nbChampMotif).val('');
$('#motif'+nbChampMotif).toggle(false);
nbChampMotif--;
if(nbChampMotif == 1){
$('#supprimer_motif_img').toggle(false);
$(".supprimerMotif1" ).replaceWith(
"<img class='supprimerMotif' src='"+tabUrl[0]+"public/images/images/sup2.png' />"
);
}
if(nbChampMotif == 4){
$('#ajouter_motif_img').toggle(true);
}
});
}
function supprimerLeMotif1(){
$(".supprimerMotif1").click(function(){
for(var i=1; i<nbChampMotif; i++){
$("#motif_admission"+i).val( $("#motif_admission"+(i+1)).val() );
}
$("#motif_admission"+i).val('');
$('#motif'+i).toggle(false);
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(true);
}
if(nbChampMotif == 2){
$('#supprimer_motif_img').toggle(false);
$(".supprimerMotif1" ).replaceWith(
"<img class='supprimerMotif' src='"+tabUrl[0]+"public/images/images/sup2.png' />"
);
}
nbChampMotif--;
return false;
});
}
function supprimerUnMotif(){
$(".supprimerMotif2").click(function(){
for(var i=2; i<nbChampMotif; i++){
$("#motif_admission"+i).val( $("#motif_admission"+(i+1)).val() );
}
$("#motif_admission"+i).val('');
$('#motif'+i).toggle(false);
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(true);
}
if(nbChampMotif == 2){
$('#supprimer_motif_img').toggle(false);
$(".supprimerMotif1" ).replaceWith(
"<img class='supprimerMotif' src='"+tabUrl[0]+"public/images/images/sup2.png' />"
);
}
nbChampMotif--;
return false;
});
$(".supprimerMotif3").click(function(){
for(var i=3; i<nbChampMotif; i++){
$("#motif_admission"+i).val( $("#motif_admission"+(i+1)).val() );
}
$("#motif_admission"+i).val('');
$('#motif'+i).toggle(false);
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(true);
}
if(nbChampMotif == 2){
$('#supprimer_motif_img').toggle(false);
$(".supprimerMotif1" ).replaceWith(
"<img class='supprimerMotif' src='"+tabUrl[0]+"public/images/images/sup2.png' />"
);
}
nbChampMotif--;
return false;
});
$(".supprimerMotif4").click(function(){
for(var i=4; i<nbChampMotif; i++){
$("#motif_admission"+i).val( $("#motif_admission"+(i+1)).val() );
}
$("#motif_admission"+i).val('');
$('#motif'+i).toggle(false);
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(true);
}
if(nbChampMotif == 2){
$('#supprimer_motif_img').toggle(false);
$(".supprimerMotif1" ).replaceWith(
"<img class='supprimerMotif' src='"+tabUrl[0]+"public/images/images/sup2.png' />"
);
}
nbChampMotif--;
return false;
});
| for(var i=5; i<nbChampMotif; i++){
$("#motif_admission"+i).val( $("#motif_admission"+(i+1)).val() );
}
$("#motif_admission"+i).val('');
$('#motif'+i).toggle(false);
if(nbChampMotif == 5){
$('#ajouter_motif_img').toggle(true);
}
if(nbChampMotif == 2){
$('#supprimer_motif_img').toggle(false);
$(".supprimerMotif1" ).replaceWith(
"<img class='supprimerMotif' src='"+tabUrl[0]+"public/images/images/sup2.png' />"
);
}
nbChampMotif--;
return false;
});
}
//********************* motif_admission *****************************
//********************* motif_admission *****************************
$(function(){
var motif1 = $("#motif_admission1");
var motif2 = $("#motif_admission2");
var motif3 = $("#motif_admission3");
var motif4 = $("#motif_admission4");
var motif5 = $("#motif_admission5");
//Au debut on affiche pas le bouton modifier
$("#bouton_motif_modifier").toggle(false);
//Au debut on affiche le bouton valider
$("#bouton_motif_valider").toggle(true);
//Au debut on desactive tous les champs
motif1.attr( 'readonly', false);
motif2.attr( 'readonly', false);
motif3.attr( 'readonly', false);
motif4.attr( 'readonly', false);
motif5.attr( 'readonly', false);
$("#bouton_motif_valider").click(function(){
motif1.attr( 'readonly', true);
motif2.attr( 'readonly', true);
motif3.attr( 'readonly', true);
motif4.attr( 'readonly', true);
motif5.attr( 'readonly', true);
$("#bouton_motif_modifier").toggle(true);
$("#bouton_motif_valider").toggle(false);
$('#ajouter_motif_img').toggle(false);
$('#supprimer_motif_img').toggle(false);
$('.supprimerMotif1, .supprimerMotif2, .supprimerMotif3, .supprimerMotif4, .supprimerMotif5').toggle(false);
return false;
});
$("#bouton_motif_modifier").click(function(){
motif1.attr( 'readonly', false);
motif2.attr( 'readonly', false);
motif3.attr( 'readonly', false);
motif4.attr( 'readonly', false);
motif5.attr( 'readonly', false);
$("#bouton_motif_modifier").toggle(false);
$("#bouton_motif_valider").toggle(true);
if(nbChampMotif != 5) { $('#ajouter_motif_img').toggle(true); }
if(nbChampMotif != 1) { $('#supprimer_motif_img').toggle(true); }
$('.supprimerMotif1, .supprimerMotif2, .supprimerMotif3, .supprimerMotif4, .supprimerMotif5').toggle(true);
return false;
});
}); | $(".supprimerMotif5").click(function(){
|
aodh_handlers.py | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import charms.reactive as reactive
import charmhelpers.core.hookenv as hookenv
# This charm's library contains all of the handler code associated with
# aodh
import charm.openstack.aodh as aodh
# Minimal inferfaces required for operation
MINIMAL_INTERFACES = [
'shared-db.available',
'identity-service.available',
'amqp.available',
]
# use a synthetic state to ensure that it get it to be installed independent of
# the install hook.
@reactive.when_not('charm.installed')
def install_packages():
aodh.install()
reactive.set_state('charm.installed')
@reactive.when('amqp.connected')
def setup_amqp_req(amqp):
"""Use the amqp interface to request access to the amqp broker using our
local configuration.
"""
amqp.request_access(username='aodh',
vhost='openstack')
aodh.assess_status()
@reactive.when('shared-db.connected')
def setup_database(database):
|
@reactive.when('identity-service.connected')
def setup_endpoint(keystone):
aodh.setup_endpoint(keystone)
aodh.assess_status()
def render(*args):
aodh.render_configs(args)
reactive.set_state('config.complete')
aodh.assess_status()
@reactive.when('charm.installed')
@reactive.when_not('cluster.available')
@reactive.when(*MINIMAL_INTERFACES)
def render_unclustered(*args):
render(*args)
@reactive.when('charm.installed')
@reactive.when('cluster.available',
*MINIMAL_INTERFACES)
def render_clustered(*args):
render(*args)
@reactive.when('charm.installed')
@reactive.when('config.complete')
@reactive.when_not('db.synced')
def run_db_migration():
aodh.db_sync()
aodh.restart_all()
reactive.set_state('db.synced')
aodh.assess_status()
@reactive.when('ha.connected')
def cluster_connected(hacluster):
aodh.configure_ha_resources(hacluster)
@reactive.hook('upgrade-charm')
def upgrade_charm():
aodh.install()
| """On receiving database credentials, configure the database on the
interface.
"""
database.configure('aodh', 'aodh', hookenv.unit_private_ip())
aodh.assess_status() |
__init__.py | import os
from importlib import import_module
def | ():
for provider_file in os.listdir(os.path.dirname(os.path.abspath(__file__))):
if provider_file[0] != '$':
continue
provider = provider_file.replace('.py', '')
yield import_module(f'{__package__}.{provider}')
def get_prodvider(name, *args, **kwargs):
provider_module = import_module(f'{__name__}.${name}')
return provider_module.run(*args, **kwargs)
| get_providers |
dingding.py | # -*- coding:utf-8 -*-
"""
钉钉机器人接口
Author: HuangTao
Date: 2018/08/04
Update: 2018/12/24 1. 增加markdown格式消息推送;
"""
from quant.utils import logger
from quant.utils.http_client import AsyncHttpRequests
class DingTalk:
""" 钉钉机器人接口
"""
BASE_URL = 'https://oapi.dingtalk.com/robot/send?access_token='
@classmethod
async def send_text_msg(cls, access_token, content, phones=None, is_at_all=False):
""" 发送文本消息
@param access_token 钉钉消息access_token
@param content 消息内容
@param phones 需要@提醒的群成员手机号列表
@param is_at_all 是否需要@所有人,默认为False
"""
body = {
'msgtype': 'text',
'text': {
'content': content
}
}
if is_at_all:
body['at'] = {'isAtAll': True}
if phones:
assert isinstance(phones, list)
body['at'] = {'atMobiles': phones}
url = cls.BASE_URL + access_token
headers = {'Content-Type': 'application/json'}
result = await AsyncHttpRequests.post(url, data=body, headers=headers)
logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
@classmethod
async def send_markdown_msg(cls, access_token, title, text, phones=None, is_at_all=False):
""" 发送文本消息
@param access_token 钉钉消息access_token
@param title 首屏会话透出的展示内容
@param text markdown格式的消息 |
@param phones 需要@提醒的群成员手机号列表
@param is_at_all 是否需要@所有人,默认为False
"""
body = {
'msgtype': 'markdown',
'markdown': {
'title': title,
'text': text
}
}
if is_at_all:
body['at'] = {'isAtAll': True}
if phones:
assert isinstance(phones, list)
body['at'] = {'atMobiles': phones}
url = cls.BASE_URL + access_token
headers = {'Content-Type': 'application/json'}
result = await AsyncHttpRequests.post(url, data=body, headers=headers)
logger.info('url:', url, 'body:', body, 'result:', result, caller=cls)
|
|
resp.rs | use std::fmt::Display;
use bitflags::_core::fmt::Formatter;
use crate::Bytes;
thread_local! {
pub static MSG_OK: Message = Message::String("OK".into());
}
thread_local!{
pub static INT_BYTES: Vec<Bytes> = {
let mut m = Vec::new();
for i in 0..10001 {
m.push(format!("{}", i-1).into());
}
m
};
}
pub fn get_int_bytes(n: i64) -> Bytes {
if n >= -1 && n < 10000 {
INT_BYTES.with(|x| x[(n+1) as usize].clone())
} else {
format!("{}", n).into()
}
}
#[inline]
pub fn new_msg_ok() -> Message {
MSG_OK.with(|x| x.clone())
}
#[derive(Debug, Clone)]
pub enum Message {
None,
Nil,
String(Bytes),
Integer(i64),
Error(Bytes),
BulkString(Bytes),
Array(Vec<Message>)
}
impl Display for Message {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Message::None => f.write_str("[NONE]"),
Message::Nil => f.write_str("(nil)"),
Message::Array(msgs) => {
f.write_str("Arr(")?;
for msg in msgs {
msg.fmt(f)?;
f.write_str(" ")?;
}
f.write_str(")")
}
Message::String(s) => {
f.write_str("Str(")?;
f.write_fmt(format_args!("\x1b[1;33m{}\x1b[0m", String::from(s.clone())))?;
f.write_str(")")
},
Message::Integer(s) => {
f.write_str("Int(")?;
//f.write_str(format!("{}", *s).as_str());
f.write_fmt(format_args!("\x1b[1;33m{}\x1b[0m", *s))?;
f.write_str(")")
},
Message::Error(s) => {
f.write_str("ERR(")?;
//f.write_str(String::from(s.clone()).as_str());
f.write_fmt(format_args!("\x1b[1;33m{}\x1b[0m", String::from(s.clone())))?;
f.write_str(")")
},
Message::BulkString(s) => {
f.write_str("Bulk(")?;
f.write_fmt(format_args!("\x1b[1;33m{}\x1b[0m", String::from(s.clone())))?;
f.write_str(")")
},
}
}
}
impl PartialEq for Message {
fn eq(&self, other: &Self) -> bool |
}
impl Message {
pub fn size(&self) -> usize {
match self {
Message::None => 0,
Message::Nil => 0,
Message::String(b) => b.len(),
Message::Error(e) => e.len(),
Message::BulkString(b) => b.len(),
Message::Integer(_) => 8,
Message::Array(args) => args.iter().map(|x| x.size()).sum(),
}
}
pub fn raw_size(&self) -> usize {
match self {
Message::None => 0,
Message::Nil => 5,
Message::String(b) => b.len() + 3,
Message::Error(e) => e.len() + 3,
Message::BulkString(b) => {
let l = b.len();
get_int_bytes(l as i64).len() + l + 5
},
Message::Integer(i) => get_int_bytes(*i).len() + 3,
Message::Array(args) => {
let l = args.len();
let ds: usize = args.iter().map(|x| x.size()).sum();
get_int_bytes(l as i64).len() + 3 + ds
},
}
}
}
#[macro_export]
macro_rules! mkcmd {
($cmd:expr) => ({
let mut args = vec![Message::BulkString(format!("{}", $cmd).into())];
Message::Array(args)
});
($cmd:expr, $($arg:expr),*) => ({
let mut args = vec![Message::BulkString(format!("{}", $cmd).into())];
$(
args.push(Message::BulkString(format!("{}", $arg).into()));
)*
Message::Array(args)
})
} | {
match (self, other) {
(Message::None, Message::None) => true,
(Message::String(l), Message::String(r)) => l == r,
(Message::BulkString(l), Message::BulkString(r)) => l == r,
(Message::Integer(l), Message::Integer(r)) => l == r,
(Message::Error(l), Message::Error(r)) => l == r,
(Message::Array(l), Message::Array(r)) => l == r,
(Message::Nil, Message::Nil) => true,
_ => false,
}
} |
main.js | import { ACTIONS, Container } from '../../Container.js'
import { Cursor } from './cursor.js'
import { TextSize } from './TextSize.js'
import { FontManager } from './fonts.js'
import { Keyboard } from '../utils/keyboard.js'
import { EVENTS as ClipboardEvents, Clipboard } from '../utils/clipboard.js'
import { ACCESS_REQUIREMENT } from '../utils/InputAccessManager.mjs'
import { findCommonStyleSubset } from '../utils/common.js'
import { queueWork } from '../../YeldingExecutor.js'
//import { getSelection } from '../utils/common.js'
//TODO:
//fix reverse selection (sometimes it is smaller than it should be)
//forward delete doesn't work well (via delete key)
//better line comprehension in text manipulation:
// - fixed line heights
//span compaction (combine if style is identical)
//unicode & escaped character support
//serialize text units as rich text
//line spacing
//wrapping - add in logic to support:
/*
1. resizing container as text is typed (both with and height)
2. automatically wrapping a line that is too long
-> cursor support for this (depends on knowing char height and width)
Types of fields:
1. Just a text field
width: auto
height: auto
- self adjusting
or fitVisibleContent with expandOnly = false
2. Fixed width text field
width: x px
height: auto
- need some function to break the line
- needs auto line breaking
3. Multi content text field
width: x px
height: y px
- container.fitVisibleContent()
*/
const textItemPerms = {}//, "container.edit":{"*":false}}
textItemPerms[ACTIONS.setPosition] = {"*":false}
textItemPerms[ACTIONS.create] = {"*":false}
const textLinePerms = {}
textLinePerms[ACTIONS.setPosition] = {"*":false}
export class ContainerTextInjector {
appId = "container.edit.text"
container = null;
target = null;
newLineChar = '\n';
tabCharacter = '\t';
tabInSpaces = ' ';
#textSize = null;
#interface = null;
#keyboard = null;
#clipboard = null;
#fontManager = null; | #toggleSelectionModify = false;
#anchorCursor = {}
#debug = false;
#cursorDiv = null
defaultTextBoxDescriptor = {
nodeName:"DIV",
computedStyle:{
"position":"absolute",
"background-color": "transparent",
"width":"auto",
"height":"auto",
"min-width":"32px",
"min-height":"32px"
},
data:{}//[TODO]
}
cursorDescriptor = {
nodeName:"DIV",
className: "text-document-cursor",
computedStyle:{"position":"absolute"},
data:{
ignore:true,
containerPermissions:{
"container.broadcast":{"*":false},
"container.bridge":{"*":false}
}
}
}
lineDescriptor = {
nodeName: "DIV",
className: "text-document-line",
"data":{
//"containerActions":[{"trigger":"click","call":"container.edit.text.onLineClick","params":[]}],
"containerPermissions":textLinePerms
}
}
textUnitDescriptor = {
nodeName: "SPAN",
className: "text-document-unit",
computedStyle:{},
"data":{
"containerActions":[{"trigger":"click","call":"container.edit.text.onTextUnitClick","params":[]}],
"containerPermissions":textItemPerms
}
}
state = {
control:false,
bold:'normal',
italic:'normal',
underlined: '',
textColor: undefined,
highlightColor: undefined,
fontFam: "Arial",
fontSize: "15px"
}
cursor = null
constructor (container, debug) {
this.container = container;
container.registerComponent(this);
this.#debug = debug
if(debug) {
this.lineDescriptor['computedStyle'] = {
"border-width": "3px",
"border-color": "red",
"border-style": "dotted"
}
this.textUnitDescriptor['computedStyle'] = this.lineDescriptor.computedStyle;
console.log("Text editor runnign in debug mode")
}
this.cursor = new Cursor(this)
this.#textSize = new TextSize(container, this)
this.#keyboard = new Keyboard(this.appId, container, ACCESS_REQUIREMENT.EXCLUSIVE)
this.initKeyboard();
this.#clipboard = new Clipboard(this.appId, container);
this.#clipboard.setAction(ClipboardEvents.paste,
(event) => this.paste(event.detail.originalEvent),
ACCESS_REQUIREMENT.EXCLUSIVE)
this.#clipboard.setAction(ClipboardEvents.cut,
(event) => this.cut(event.detail.originalEvent),
ACCESS_REQUIREMENT.EXCLUSIVE)
this.#clipboard.setAction(ClipboardEvents.copy,
(event) => {}, //noop
ACCESS_REQUIREMENT.EXCLUSIVE)
this.#fontManager = new FontManager(container)
this.#handlers['selectionchange'] = (e) => this.onSelectionChange(e)
//create interface holder
this.#interface = this.container.createFromSerializable(document.body, {
"nodeName":"div",
"computedStyle":{
"top":"0px",
"left":"0px",
"position":"absolute"
},
"data":{
"ignore":true,
"containerPermissions":{
"container.broadcast":{"*":false},
"container.bridge":{"*":false}
}
}
},
null,
this.appId)
this.container.hide(this.#interface, this.appId)
//load interface style and html
this.container.loadStyle("style.css", this.appId)
this.container.loadHtml(this.#interface, "interface.html", this.appId)
.then(e => {
this.#loadFontsInInterface()
})
//create cursor pointer
this.#cursorDiv = this.container.createFromSerializable(document.body, this.cursorDescriptor, null, this.appId)
this.container.hide(this.#cursorDiv, this.appId)
}
enable() {
if (!this.#enabled) {
for (const [key, value] of Object.entries(this.#handlers)) {
this.container.addEventListener(key, value, this.appId)
}
document.addEventListener("selectionchange", this.#handlers['selectionchange'])
this.#enabled = true
}
}
disable() {
if (this.#enabled) {
this.stop();
for (const [key, value] of Object.entries(this.#handlers)) {
this.container.removeEventListener(key, value, this.appId)
}
document.removeEventListener("selectionchange", this.#handlers['selectionchange'])
this.container.hide(this.#interface, this.appId)
this.#enabled = false
}
}
isEnabled() {
return this.#enabled
}
initKeyboard () {
this.#keyboard.onPritable(this, (key) => this.addPrintable(key), true)
this.#keyboard.setAction(new Set(['Backspace']), this, (key) => this.removePrintable(-1), true)
this.#keyboard.setAction(new Set(['Delete']), this, (key) => this.removePrintable(1), true)
this.#keyboard.setAction(new Set(['Enter']), this, (key) => this.newLine(), true)
this.#keyboard.setAction(new Set(['Escape']), this, (key) => this.stop(), true)
this.#keyboard.setAction(new Set(['Tab']), this, (key) => this.tab(), true, true)
this.#keyboard.setAction(new Set(['Shift']), this, (key) => {
this.#toggleSelectionModify = true
this.#anchorCursor = this.cursor.get()
}, true)
this.#keyboard.setKeyUpAction(new Set(['Shift']), this, (key) => this.#toggleSelectionModify = false, true)
this.#keyboard.setAction(new Set(['Down']), this, (key) => this.cursorDown(), true)
this.#keyboard.setAction(new Set(['ArrowDown']), this, (key) => this.cursorDown(), true)
this.#keyboard.setAction(new Set(['Up']), this, (key) => this.cursorUp(), true)
this.#keyboard.setAction(new Set(['ArrowUp']), this, (key) => this.cursorUp(), true)
this.#keyboard.setAction(new Set(['Left']), this, (key) => this.cursorLeft(), true)
this.#keyboard.setAction(new Set(['ArrowLeft']), this, (key) => this.cursorLeft(), true)
this.#keyboard.setAction(new Set(['Right']), this, (key) => this.cursorRight(), true)
this.#keyboard.setAction(new Set(['ArrowRight']), this, (key) => this.cursorRight(), true)
this.#keyboard.setAction(new Set(['End']), this, (key) => this.toLineEnd(), true)
this.#keyboard.setAction(new Set(['Home']), this, (key) => this.toLineStart(), true)
this.#keyboard.setAction(new Set(['Control','u']), this, (key) => this.underlined(), true)
this.#keyboard.setAction(new Set(['Control','i']), this, (key) => this.italic(), true)
this.#keyboard.setAction(new Set(['Control','b']), this, (key) => this.bold(), true)
this.#keyboard.setAction(new Set(['Control','a']), this, (key) => this.selectAll(), true)
this.#keyboard.setAction(new Set(['Control','1']), this, (key) => this.align('left'), true)
this.#keyboard.setAction(new Set(['Control','2']), this, (key) => this.align('center'), true)
this.#keyboard.setAction(new Set(['Control','3']), this, (key) => this.align('right'), true)
this.#keyboard.setAction(new Set(['Control','4']), this, (key) => this.align('justify'), true)
this.#keyboard.setAction(new Set(['Control','+']), this, (key) => this.changeFontSize(1), true)
this.#keyboard.setAction(new Set(['Control','-']), this, (key) => this.changeFontSize(-1), true)
this.#keyboard.setAction(new Set(['Control','/']), this, (key) => this.underlined(), true)
this.#keyboard.setAction(new Set(['Control',';']), this, (key) => this.underlined(), true)
this.#keyboard.setAction(new Set(['Control','c']), this, (key) => {}, false)
this.#keyboard.setAction(new Set(['Control','v']), this, (key) => {}, false)
}
#loadFontsInInterface () {
let fonts = this.#fontManager.listFonts()
let root = this.container.lookup("ns-text-editor-font")
for (const fontName of fonts) {
let fontRow = document.createElement('option')
fontRow.value = fontName
fontRow.innerHTML = fontName
root.appendChild(fontRow)
}
}
start (target, overrideNewBoxPos) {
if (!target || this.target == target || !this.#enabled) {
return;
}
this.container.componentStartedWork(this.appId, {})
console.log(`${this.appId} start text editing ${target}`)
this.target = this.findFirstDivParent(this.container.lookup(target));
console.log(`${this.appId} Set text edit target to ${target.id}`)
try {
this.container.isOperationAllowed('container.edit', this.target, this.appId)
} catch (e) {
console.log(`${this.appId} - container does not allow editing at all. Aborting`)
this.stop();
return;
}
if (!this.isTargetTextEditable(this.target)) {
console.log(`${this.appId} - container not suitable for text editing. Creating a transparent container to type in.`)
this.target = this.container.createFromSerializable(this.target, this.defaultTextBoxDescriptor, null, this.appId)
if (overrideNewBoxPos) {
this.container.setPosition(this.target, overrideNewBoxPos, this.appId)
}
}
this.cursor.setTarget(this.target)
this.container.show(this.#interface, this.appId)
let pos = this.container.getPosition(this.target)
//set interface position
pos.originX = 0.0
pos.originY = 1.0
this.container.setPosition(this.#interface, pos, this.appId)
this.#interface.style['min-width'] = this.container.getWidth(this.target)
//bring up interface
this.container.bringToFront(this.#interface, this.appId)
this.container.show(this.#cursorDiv, this.appId)
this.container.bringToFront(this.#cursorDiv, this.appId)
//TODO use cascading and local permissions
//these need to be ephemeral state, not sent to the server and propagated...
this.container.setMetadata(this.target, 'text-editing', true)
this.container.setPermission(this.target, ACTIONS.cascade, '*', false, this.appId, true)
//this.container.setPermission(this.target, ACTIONS.delete, 'container.create', false, this.appId)
this.#keyboard.enable();
this.#clipboard.enable();
if (this.container.camera) {
this.container.camera.focusOn(this.target)
}
}
stop () {
if (this.target) {
console.log(`${this.appId} stop text editing`)
this.container.componentStoppedWork(this.appId)
this.#keyboard.disable()
this.#clipboard.disable()
//this.container.removePermission(this.target, ACTIONS.delete, 'container.create', false, this.appId)
this.container.removeMetadata(this.target, 'text-editing')
this.container.removePermission(this.target, ACTIONS.cascade, '*', this.appId)
this.cursor.setTarget(null);
this.container.hide(this.#cursorDiv, this.appId)
this.container.hide(this.#interface, this.appId)
this.target = null;
}
}
getEditTarget() {
return this.target
}
//[TODO]: make efficient, should not loop over every time
getLinesCount(target = {childNodes:[]}) {
let count = 0
for (const child of target.childNodes) {
if (this.isLine(child) && this.isVisible(child)) {
count++
}
}
return count
}
//[TODO]: make efficient, should not seek every time this is called
getLine(target = {childNodes:[]}, lineNumber) {
let count = 0;
for (const child of target.childNodes) {
if (this.isLine(child) && this.isVisible(child)) {
if (count == lineNumber) {
return child
}
count++;
}
}
return null;
}
isVisible(target = {style:{display:'none'}}) {
return target.style.display !== 'none'
}
static isPrintableCharacter(key) {
return key.length === 1;
}
findFirstDivParent(elem) {
if (!elem)
return null;
if (this.isTextUnit(elem)) {
return elem.parentNode.parentNode
}
if (elem.nodeName == 'DIV') {
if (this.isLine(elem)) {
return elem.parentNode
}
return elem
}
return this.findFirstDivParent(elem.parentNode)
}
//[TODO]: reconsider criteria
isTargetTextEditable(target) {
if (target === this.container.parent) {
console.log(`${this.appId} - currently not allowing adding text to root container`)
return false;
}
for (const child of target.children) {
if (this.isLine(child)) {
console.log(`${this.appId} - found a line. Can text edit`)
return true;
}
}
if (!target.childNodes || this.getLinesCount(this.target) == 0) {
return true;
}
console.log(`${this.appId} - comprised only of other type of containers. Cannot text edit`)
return false;
}
#detectClipboardFormat(controller) {
console.log(`CLIPBOARD_TYPES: ${JSON.stringify(controller.types)}`)
//let rtf = controller.getData('text/html')
//console.log(`RTF_CLIPBOARD: ${rtf}`)
}
//doesn't support rich text yet
paste (event) {
if (!this.target) {
return;
}
let clipboardController = (event.clipboardData || window.clipboardData)
let paste = clipboardController.getData('text');
this.addPrintable(paste)
event.preventDefault();
//[TODO]: integrate
this.#detectClipboardFormat(clipboardController)
}
cut (event) {
if (!this.target) {
return;
}
let selection = this.getSelected()
if ( selection ) {
event.clipboardData.setData("text/plain", this.serializeTextUnits(selection.units))
event.preventDefault();
}
this.deleteSelection();
}
isLine(elem) {
return elem && elem.className && elem.className.includes(this.lineDescriptor.className);
}
isNewLine(elem) {
return this.isLine(elem) && elem.children.length == 0;
}
isTextUnit(elem) {
return elem && elem.className && elem.className.includes(this.textUnitDescriptor.className);
}
isTextUnitInCurrentTarget(unit) {
while(unit) {
if(unit == this.target) {
return true;
}
unit = unit.parentNode
}
return false;
}
serializeTextUnits(units) {
let result = ""
for (const unit of units) {
result += unit.innerHTML
}
return result
}
cursorUp () {
if (!this.target) {
return;
}
let curStat = this.cursor.getPosition()
this.cursor.putAt(curStat.lineNumber - 1, curStat.charNumber)
if (this.#toggleSelectionModify) {
this.modifySelection()
} else {
this.clearSelection();
}
this.cursorUpdateVisible(this.#cursorDiv)
}
cursorDown () {
if (!this.target) {
return;
}
let curStat = this.cursor.getPosition()
this.cursor.putAt(curStat.lineNumber + 1, curStat.charNumber)
if (this.#toggleSelectionModify) {
this.modifySelection()
} else {
this.clearSelection();
}
this.cursorUpdateVisible(this.#cursorDiv)
}
cursorLeft () {
if (!this.target) {
return;
}
this.cursor.move(-1)
if (this.#toggleSelectionModify) {
this.modifySelection()
} else {
this.clearSelection();
}
this.cursorUpdateVisible(this.#cursorDiv)
}
cursorRight () {
if (!this.target) {
return;
}
this.cursor.move(1)
if (this.#toggleSelectionModify) {
this.modifySelection()
} else {
this.clearSelection();
}
this.cursorUpdateVisible(this.#cursorDiv)
}
toLineEnd() {
this.cursor.putAtLineEnd()
if (this.#toggleSelectionModify) {
this.modifySelection()
} else {
this.clearSelection();
}
this.cursorUpdateVisible(this.#cursorDiv)
}
toLineStart() {
this.cursor.putAtLineStart()
if (this.#toggleSelectionModify) {
this.modifySelection()
} else {
this.clearSelection();
}
this.cursorUpdateVisible(this.#cursorDiv)
}
makeNewLine(insertAt) {
let lineBefore = undefined;
if (insertAt >= 0 && insertAt < this.getLinesCount(this.target)) {
lineBefore = this.getLine(this.target, insertAt)
}
return this.container.createFromSerializable(this.target.id, this.lineDescriptor, lineBefore, this.appId)
}
#updateTextUnitDescriptorFromState() {
this.textUnitDescriptor.computedStyle['color'] = this.state.textColor;
//this.textUnitDescriptor.computedStyle['background-color'] = this.state.highlightColor;
this.textUnitDescriptor.computedStyle['font-family'] = this.state.fontFam;
this.textUnitDescriptor.computedStyle['font-size'] = this.state.fontSize;
this.textUnitDescriptor.computedStyle['font-weight'] = this.state.bold;
this.textUnitDescriptor.computedStyle['font-style'] = this.state.italic;
this.textUnitDescriptor.computedStyle['text-decoration-line'] = this.state.underlined;
}
makeNewTextChild (line, before) {
this.#updateTextUnitDescriptorFromState();
let unit = this.container.createFromSerializable(line.id, this.textUnitDescriptor, before, this.appId)
return unit
}
styleTarget() {
if ( this.target ) {
this.container.fitVisibleContent(this.target, true)
}
}
//[TODO]: NOT USED?
findClosestTextUnit(line, direction) {
let pointer = line
let skippedLines = new Set([])
while (this.isNewLine(pointer)) {
skippedLines.add(pointer)
if (direction < 0) {
pointer = pointer.previousSibling
} else {
pointer = pointer.nextSibling
}
}
let result = null
if (pointer && !this.isNewLine(pointer)) {
if (direction < 0){
result = pointer.childNodes[pointer.childNodes.length - 1]
} else {
result = pointer.childNodes[0]
}
}
return {textUnit: result, skippedLines: skippedLines}
}
findBetween (start, end, stopAtEOL) {
if (!start && !end) {
return {lines:new Set([]), units: new Set([])}
}
let currentLine = null
let currentTextUnit = null
if (this.isTextUnit(start)) {
currentTextUnit = start
currentLine = start.parentNode
} else if (this.isLine(start)) {
currentLine = start
currentTextUnit = start.firstChild
} else if (start) {
throw `Find only works using text units and lines, you seem to have provided a different kind of DOM element for start`
}
let endTextUnit = null
let endLine = null
if (this.isTextUnit(end)) {
endTextUnit = end
endLine = end.parentNode
} else if (this.isLine(end)) {
endLine = end
endTextUnit = end.lastChild
} else if(end) {
throw `Find only works using text units and lines, you seem to have provided a different kind of DOM element for end`
}
if (!start) {
currentLine = endLine
currentTextUnit = currentLine.firstChild
}
if (!end) {
endLine = currentLine
endTextUnit = currentLine.lastChild
}
let units = new Set([])
let lines = new Set([])
while (currentLine) {
if (this.isLine(currentLine) && this.isVisible(currentLine)) {
if (!currentTextUnit) {
currentTextUnit = currentLine.firstChild
}
lines.add(currentLine)
while (currentTextUnit) {
if (this.isTextUnit(currentTextUnit)) {
units.add(currentTextUnit)
if (currentTextUnit == endTextUnit) {
units.add(end)
return {lines:lines, units:units}
}
}
currentTextUnit = currentTextUnit.nextSibling
}
if (stopAtEOL) {
break;
}
if (currentLine == endLine) {
break;
}
}
currentLine = currentLine.nextSibling
}
return {lines:lines, units:units}
}
makeSelection(start, end, startOffset = 0, endOffset = 0) {
let range = new Range();
if (this.isTextUnitBefore(end, start)) {
range.setStart(end.lastChild, (endOffset > 0) ? endOffset : end.lastChild.length);
range.setEnd(start.firstChild, startOffset);
} else {
range.setStart(start.firstChild, startOffset);
range.setEnd(end.lastChild, (endOffset > 0) ? endOffset : end.lastChild.length);
}
let sel = this.clearSelection()
sel.addRange(range)
}
clearSelection () {
let sel = document.getSelection();
sel.removeAllRanges();
return sel;
}
onSelectionChange(e) {
let docSelect = document.getSelection();
if (!docSelect.focusNode) {
return;
}
let focusTextUnit = docSelect.focusNode.parentNode
let offset = docSelect.focusOffset;
this.cursor.putOn(focusTextUnit, offset)
this.cursorUpdateVisible(this.#cursorDiv)
}
getSelected () {
let docSelect = document.getSelection();
//figure out if selection belongs to target
if (docSelect
&& docSelect.focusNode
&& docSelect.anchorNode) {
let focusTunitInTarget = this.isTextUnitInCurrentTarget(docSelect.focusNode.parentNode)
let anchorTunitInTarget = this.isTextUnitInCurrentTarget(docSelect.anchorNode.parentNode)
if (focusTunitInTarget && anchorTunitInTarget) {
var start = docSelect.anchorNode.parentNode
var startOffset = docSelect.anchorOffset
var end = docSelect.focusNode.parentNode
var endOffset = docSelect.focusOffset
if (start == end && startOffset == endOffset) {
this.clearSelection();
return null;
}
if (this.isTextUnitBefore(end, start)) {
let aux = start;
start = end;
end = aux;
}
if (start == end && (startOffset > endOffset)){
let aux = endOffset;
endOffset = startOffset;
startOffset = aux;
}
let startSplit = this.splitTextUnit(start, startOffset)
let startNode = startSplit[1]
if (start == end) {
endOffset -= startOffset
end = startNode
}
let endSplit = this.splitTextUnit(end, endOffset)
let endNode = endSplit[0]
this.makeSelection(startNode, endNode)
if (startSplit[0]) {
this.cursor.putOn(startSplit[0], startSplit[0].innerHTML.length)
} else if(endSplit[1]) {
this.cursor.putOn(endSplit[1], 0)
}
return this.findBetween(startNode, endNode)
}
}
return null;
}
modifySelection () {
let curPos = this.cursor.get()
let docSelect = document.getSelection();
if (docSelect && docSelect.anchorNode && docSelect.focusNode) {
docSelect.extend(curPos.textUnit.firstChild, curPos.localCharNumber)
} else {
this.makeSelection(this.#anchorCursor.textUnit, curPos.textUnit,
this.#anchorCursor.localCharNumber, curPos.localCharNumber)
}
}
selectAll() {
if (this.target) {
this.makeSelection(this.target.firstChild.firstChild, this.target.lastChild.lastChild)
}
}
// onLineClick(e) {
// let line = e.target
// this.start(e.target.parentNode)
// }
//cursor logic
onTextUnitClick(e) {
let textUnit = e.target
let textStyle = this.container.toSerializableStyle(textUnit)
let offset = this.#textSize.positionToCharNumber(textUnit.innerHTML, textStyle, 0, e.offsetX)
this.start(e.target.parentNode.parentNode)
this.cursor.putOn(textUnit, offset)
this.cursorUpdateVisible(this.#cursorDiv)
}
cursorUpdateVisible(blinker) {
if (!blinker) {
blinker = this.#cursorDiv
}
let curStat = this.cursor.get()
let localText = ""
let localTarget = curStat.line
let textUnit = curStat.textUnit
if (textUnit) {
localText = textUnit.innerHTML
localTarget = textUnit
}
let localStyle = this.container.toSerializableStyle(localTarget)
let localPosition = this.#textSize.charNumberToPosition(localText, localStyle, curStat.localCharNumber)
let globalPosition = this.container.localToGlobalPosition(localTarget, localPosition.left, localPosition.top)
let unitHeight = this.container.getHeight(localTarget)
this.container.show(blinker, this.appId)
this.container.setPosition(blinker, globalPosition, this.appId)
this.container.setHeight(blinker, unitHeight, this.appId)
this.container.bringToFront(blinker, this.appId)
}
splitTextUnit(unit, offset) {
if (!this.isTextUnit(unit)) {
throw 'You can use splitTextUnit only on textUnits'
}
if (offset == 0) {
return [unit.previousSibling, unit]
}
if (offset == unit.innerHTML.length) {
return [unit, unit.nextSibling]
}
let rightText = unit.innerHTML.substring(offset, unit.innerHTML.length)
unit.innerHTML = unit.innerHTML.substring(0, offset)
let descriptor = this.container.toSerializable(unit.id)
delete descriptor.id;
descriptor.innerHTML = rightText;
descriptor.data.containerPermissions = JSON.stringify(textItemPerms)
this.container.notifyUpdate(unit, this.appId)
let right = this.container.createFromSerializable(unit.parentNode.id, descriptor, unit.nextSibling, this.appId)
return [unit, right]
}
deleteSelection() {
let selection = this.getSelected();
this.clearSelection()
if (selection && selection.units && selection.units.size > 0) {
this.deleteTextUnits(selection.units, true);
return true;
}
return false;
}
deleteLines(lines) {
for (const line of lines) {
this.container.delete(line, this.appId)
}
}
deleteTextUnits(units, andLines) {
for (const unit of units) {
let parent = unit.parentNode
this.container.delete(unit, this.appId)
if (parent.children.length == 0 && andLines) {
this.container.delete(parent, this.appId)
}
}
}
insertTextBlockAsLine(descriptor, lineBefore) {
let line = this.container.createFromSerializable(this.target.id, this.lineDescriptor, lineBefore, this.appId)
this.container.createFromSerializable(line, descriptor, null, this.appId)
}
addPrintable(text) {
if (!this.target) {
return;
}
//if there's a selection delete it first
this.deleteSelection();
let curStat = this.cursor.get()
if (!curStat.line) {
this.makeNewLine(0)
curStat = this.cursor.get()
}
if (!curStat.textUnit) {
this.makeNewTextChild(curStat.line)
curStat = this.cursor.get()
}
let textLines = text.split(this.newLineChar)
console.log(textLines)
let textUnit = curStat.textUnit;
let existing = textUnit.innerHTML
let before = existing.substring(0, curStat.localCharNumber)
let after = existing.substring(curStat.localCharNumber, existing.length)
let firstLine = textLines[0]
textUnit.innerHTML = `${before}${firstLine.replaceAll(this.tabCharacter, this.tabInSpaces)}${after}`
this.cursor.move(firstLine.length)
this.cursorUpdateVisible(this.#cursorDiv)
this.container.notifyUpdate(textUnit.id, this.appId)
if (textLines.length > 1) {
let lineBefore = this.getLine(this.target, curStat.lineNumber + 1)
for (var i = 1; i < textLines.length; ++i) {
let descriptor = Container.clone(this.textUnitDescriptor)
descriptor.innerHTML = textLines[i].replaceAll(this.tabCharacter, this.tabInSpaces)
queueWork(this.insertTextBlockAsLine, this, [descriptor, lineBefore])
}
}
queueWork(this.styleTarget, this)
}
newLine() {
if (!this.target) {
return;
}
this.clearSelection()
let moveToNewLine = new Set([])
let curStat = this.cursor.get()
if (curStat.textUnit) {
let split = this.splitTextUnit(curStat.textUnit, curStat.localCharNumber)
if (split[1]) {
moveToNewLine = this.findBetween(split[1]).units
}
}
//make new line and pull in items
let line = this.makeNewLine(curStat.lineNumber + 1)
for (const textUnit of moveToNewLine) {
this.container.setParent(textUnit, line, this.appId)
}
//update cursor
this.cursor.putAt(curStat.lineNumber + 1, 0)
this.cursorUpdateVisible(this.#cursorDiv)
this.styleTarget()
}
//BUG: fix forward deletion
removePrintable(count) {
if (!this.target) {
return;
}
if (!count) {
return;
}
//ToDo: update cursor in deleteSelection
if (this.deleteSelection()) {
return;
}
let start = undefined
let startLine = undefined
let end = undefined
let endLine = undefined
let cursorStart = this.cursor.get()
let cursorEnd = null
let currentLine = null
let carryOver = null
if (count > 0) {
//completely untested flow
if (cursorStart.textUnit) {
start = this.splitTextUnit(cursorStart.textUnit, cursorStart.localCharNumber)
carryOver = this.findBetween(null,start[0]).units
}
startLine = cursorStart.line
cursorEnd = this.cursor.move(count)
endLine = cursorEnd.line
currentLine = endLine
if (cursorEnd.textUnit) {
end = this.splitTextUnit(cursorEnd.textUnit, cursorEnd.localCharNumber)
}
if(start && end && start[1] && start[1] == end[1]) { //split happened on the same text unit
start[1] = end[0]
}
//todo replicate below case
} else {
if (cursorStart.textUnit) {
end = this.splitTextUnit(cursorStart.textUnit, cursorStart.localCharNumber)
carryOver = this.findBetween(end[1], null).units
}
endLine = cursorStart.line
cursorEnd = this.cursor.move(count)
startLine = cursorEnd.line
currentLine = startLine
if (cursorEnd.textUnit) {
start = this.splitTextUnit(cursorEnd.textUnit, cursorEnd.localCharNumber)
}
if (end && start && cursorEnd.textUnit == end[0]) {
//this is a special annoying case
end[0] = start[1]
}
}
//computing delete list
let startElem = (start) ? (start[1] || start[0]) : startLine
let endElem = (end) ? (end[0] || end[1]) : endLine
let delList = this.findBetween( startElem, endElem )
let toDelete = delList.units
if (start && !start[1]) {
toDelete.delete(start[0])
}
if (end && !end[0]) {
toDelete.delete(end[1])
}
let linesToDelete = delList.lines
linesToDelete.delete(currentLine)
this.deleteTextUnits(toDelete, false)
if ( carryOver ) {
for (const carryOverUnit of carryOver) {
try { //
this.container.setParent(carryOverUnit, currentLine, this.appId)
} catch (e) {
console.log("PLEASE FIX THIS. THERE WAS AN INVALID ELEMENT IN THE CARRY OVER SET")
}
}
}
this.deleteLines(linesToDelete)
this.cursorUpdateVisible(this.#cursorDiv)
}
tabLineStart (line) {
let firstTextUnit = line.firstChild || this.makeNewTextChild(line)
firstTextUnit.innerHTML = `${this.tabInSpaces}${firstTextUnit.innerHTML}`
}
tab () {
let selection = this.getSelected();
if (selection && selection.lines.size > 0) {
for (const line of selection.lines) {
queueWork(this.tabLineStart, this, [line])
}
} else {
this.addPrintable(this.tabInSpaces)
}
queueWork(this.styleTarget, this)
}
getTextUnitDistanceFromRoot(textUnit) {
let dist = 0;
let node = textUnit
while (node) {
if (this.isTextUnit(node) && !node.previousSibling) {
node = node.parentNode
} else {
node = node.previousSibling
}
dist ++;
}
return dist;
}
isTextUnitBefore(textUnit1, textUnit2) {
return this.getTextUnitDistanceFromRoot(textUnit1) < this.getTextUnitDistanceFromRoot(textUnit2)
}
styleTextUnits(style, textUnits) {
for (const unit of textUnits) {
if (this.isTextUnit(unit)) {
this.container.styleChild(unit, style, this.appId)
}
}
}
styleLines (style, lines) {
for (const unit of lines) {
if (this.isLine(unit)) {
this.container.styleChild(unit, style, this.appId)
}
}
}
getAllTextUnits() {
let result = []
for (const line of this.target.children) {
if (this.isLine(line)) {
for(const unit of line.children) {
if(this.isTextUnit(unit)) {
result.push(unit)
}
}
}
}
return result;
}
newTextUnitAtCursor () {
let cursor = this.cursor.get()
if (cursor && cursor.textUnit) {
if (cursor.textUnit.innerHTML.length == 0) {
this.#updateTextUnitDescriptorFromState();
this.container.styleChild(cursor.textUnit, this.textUnitDescriptor.computedStyle, this.appId)
} else {
let split = this.splitTextUnit(cursor.textUnit, cursor.localCharNumber)
this.makeNewTextChild(cursor.line, split[1]);
}
}
}
changeFontSize (delta) {
let selection = this.getSelected()
if (!selection || selection.units.size == 0) {
selection = {units:this.getAllTextUnits()}
}
let modFunc = function(e) {
try {
let fsize = parseInt(e,10) + delta
return fsize+"px"
} catch (ex){
return e
}
}
this.styleTextUnits({"font-size": modFunc}, selection.units)
this.cursorUpdateVisible(this.#cursorDiv)
this.styleTarget()
}
setFontSize (fontSize) {
let selection = this.getSelected()
if (!selection || selection.units.size == 0) {
this.newTextUnitAtCursor()
return;
}
this.styleTextUnits({"font-size": fontSize}, selection.units)
this.cursorUpdateVisible(this.#cursorDiv)
this.styleTarget()
}
uiSelectFontSize() {
let fsize = document.getElementById('ns-text-editor-font-select').value
this.state.fontSize = `${fsize}px`
this.setFontSize(this.state.fontSize)
}
fontUp (e) {
e.preventDefault();
this.changeFontSize(1)
try {
let prevVal = document.getElementById('ns-text-editor-font-select').value
document.getElementById('ns-text-editor-font-select').value = parseInt(prevVal) + 1
} catch (e) {
}
this.styleTarget()
}
fontDown (e) {
e.preventDefault();
this.changeFontSize(-1)
try {
let prevVal = document.getElementById('ns-text-editor-font-select').value
document.getElementById('ns-text-editor-font-select').value = parseInt(prevVal) - 1
} catch (e) {
}
this.styleTarget()
}
setFont(fontFam) {
this.state.fontFam = fontFam
let selection = this.getSelected()
if (selection && selection.units.size > 0) {
this.styleTextUnits({"font-family": fontFam}, selection.units);
} else {
this.newTextUnitAtCursor()
}
this.cursorUpdateVisible(this.#cursorDiv)
this.styleTarget()
}
align (alignment) {
let selection = this.getSelected()
if (!selection || selection.lines.size == 0) {
selection = {lines:this.target.children}
}
this.styleLines({"text-align": alignment, "text-justify": "inter-word"}, selection.lines)
this.cursorUpdateVisible(this.#cursorDiv)
}
bold () {
let toggleFunc = function(e) {
if (e == "bold") {
return "normal"
}
return "bold"
}
let selection = this.getSelected()
if (selection && selection.units.size > 0) {
let commonStyle = findCommonStyleSubset(this.container, selection.units)
let currentValue = commonStyle["font-weight"] || "normal"
this.styleTextUnits({"font-weight": toggleFunc(currentValue)}, selection.units)
} else {
this.state.bold = toggleFunc(this.state.bold);
this.newTextUnitAtCursor()
}
this.clearSelection()
this.cursorUpdateVisible(this.#cursorDiv)
this.styleTarget()
}
italic (textUnits) {
let toggleFunc = function(e) {
if (e == 'italic') {
return 'normal'
}
return 'italic'
}
let selection = this.getSelected()
if (selection && selection.units.size > 0) {
let commonStyle = findCommonStyleSubset(this.container, selection.units)
let currentValue = commonStyle["font-style"] || "normal"
this.styleTextUnits({"font-style": toggleFunc(currentValue)}, selection.units)
} else {
this.state.italic = toggleFunc(this.state.italic);
this.newTextUnitAtCursor()
}
this.clearSelection()
this.cursorUpdateVisible(this.#cursorDiv)
this.styleTarget()
}
underlined (textUnits) {
let toggleFunc = function(e) {
if (e == 'underline') {
return ''
}
return 'underline'
}
let selection = this.getSelected()
if (selection && selection.units.size > 0) {
let commonStyle = findCommonStyleSubset(this.container, selection.units)
let currentValue = commonStyle["text-decoration-line"] || ""
this.styleTextUnits({"text-decoration-line": toggleFunc(currentValue)}, selection.units)
} else {
this.state.underlined = toggleFunc(this.state.underlined);
this.newTextUnitAtCursor()
}
this.clearSelection()
this.cursorUpdateVisible(this.#cursorDiv)
this.styleTarget()
}
#setColor(clr) {
this.state.textColor = clr;
let selection = this.getSelected()
if (selection && selection.units.size > 0) {
this.styleTextUnits({"color": clr}, selection.units)
} else {
this.newTextUnitAtCursor()
}
}
setColor(e) {
this.#setColor(e.target.value)
}
#setHighlightColor(clr) {
this.state.highlightColor = clr;
let selection = this.getSelected()
if (selection && selection.units.size > 0) {
this.styleTextUnits({"background-color": clr}, selection.units)
}
}
setHighlightColor(e) {
this.#setHighlightColor(e.target.value)
}
changeFont(e) {
console.log(`${this.appId} setting font family to: ${e.target.value}`)
this.setFont(e.target.value)
}
updateInterface() {
document.getElementById('ns-text-editor-font').value = this.state.fontSize
}
} | #enabled = false
#handlers = {};
//shift select |
logger.go | package logger
import (
"context"
c "eventers-marketplace-backend/context"
"fmt"
"os"
"regexp"
"github.com/sirupsen/logrus"
)
type Logger struct {
logrus.Logger
}
var logger *logrus.Logger
const CorrelationId = "correlation_id"
func init() {
logger = logrus.New()
logger.SetOutput(os.Stdout)
}
func Fatalf(ctx context.Context, format string, args ...interface{}) {
logger.WithField(CorrelationId, c.GetContextValue(ctx, c.ContextKeyCorrelationID)).Fatalf(format, args...)
}
func Infof(ctx context.Context, format string, args ...interface{}) {
logger.WithField(CorrelationId, c.GetContextValue(ctx, c.ContextKeyCorrelationID)).Infof(format, args...)
}
func Info(ctx context.Context, msg string) {
logger.WithField(CorrelationId, c.GetContextValue(ctx, c.ContextKeyCorrelationID)).Info(msg)
}
func Debugf(ctx context.Context, format string, args ...interface{}) {
formattedError := escapeString(format, args...)
logger.WithField(CorrelationId, c.GetContextValue(ctx, c.ContextKeyCorrelationID)).Debug(formattedError)
}
| logger.WithField(CorrelationId, c.GetContextValue(ctx, c.ContextKeyCorrelationID)).Warnf(format, args...)
}
func Errorf(ctx context.Context, format string, args ...interface{}) {
formattedError := escapeString(format, args...)
logger.WithField(CorrelationId, c.GetContextValue(ctx, c.ContextKeyCorrelationID)).Error(formattedError)
}
func escapeString(format string, args ...interface{}) string {
errorMessage := fmt.Sprintf(format, args...)
re := regexp.MustCompile(`(\n)|(\r\n)`)
formattedError := re.ReplaceAllString(errorMessage, "\\n ")
return formattedError
} | func Warnf(ctx context.Context, format string, args ...interface{}) { |
azure_standard_test.go | // +build !providerless
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"net/http"
"strconv"
"testing"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
"k8s.io/legacy-cloud-providers/azure/retry"
)
const (
networkResourceTenantID = "networkResourceTenantID"
networkResourceSubscriptionID = "networkResourceSubscriptionID"
)
func TestIsMasterNode(t *testing.T) {
if isMasterNode(&v1.Node{}) {
t.Errorf("Empty node should not be master!")
}
if isMasterNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
nodeLabelRole: "worker",
},
},
}) {
t.Errorf("Node labelled 'worker' should not be master!")
}
if !isMasterNode(&v1.Node{
ObjectMeta: meta.ObjectMeta{
Labels: map[string]string{
nodeLabelRole: "master",
},
},
}) {
t.Errorf("Node should be master!")
}
}
func TestGetLastSegment(t *testing.T) {
tests := []struct {
ID string
separator string
expected string
expectErr bool
}{
{
ID: "",
separator: "/",
expected: "",
expectErr: true,
},
{
ID: "foo/",
separator: "/",
expected: "",
expectErr: true,
},
{
ID: "foo/bar",
separator: "/",
expected: "bar",
expectErr: false,
},
{
ID: "foo/bar/baz",
separator: "/",
expected: "baz",
expectErr: false,
},
{
ID: "k8s-agentpool-36841236-vmss_1",
separator: "_",
expected: "1",
expectErr: false,
},
}
for _, test := range tests {
s, e := getLastSegment(test.ID, test.separator)
if test.expectErr && e == nil {
t.Errorf("Expected err, but it was nil")
continue
}
if !test.expectErr && e != nil {
t.Errorf("Unexpected error: %v", e)
continue
}
if s != test.expected {
t.Errorf("expected: %s, got %s", test.expected, s)
}
}
}
func TestGenerateStorageAccountName(t *testing.T) {
tests := []struct {
prefix string
}{
{
prefix: "",
},
{
prefix: "pvc",
},
{
prefix: "1234512345123451234512345",
},
}
for _, test := range tests {
accountName := generateStorageAccountName(test.prefix)
if len(accountName) > storageAccountNameMaxLength || len(accountName) < 3 {
t.Errorf("input prefix: %s, output account name: %s, length not in [3,%d]", test.prefix, accountName, storageAccountNameMaxLength)
}
for _, char := range accountName {
if (char < 'a' || char > 'z') && (char < '0' || char > '9') {
t.Errorf("input prefix: %s, output account name: %s, there is non-digit or non-letter(%q)", test.prefix, accountName, char)
break
}
}
}
}
func TestMapLoadBalancerNameToVMSet(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
az := GetTestCloud(ctrl)
az.PrimaryAvailabilitySetName = "primary"
cases := []struct {
description string
lbName string
useStandardLB bool
clusterName string
expectedVMSet string
}{
{
description: "default external LB should map to primary vmset",
lbName: "azure",
clusterName: "azure",
expectedVMSet: "primary",
},
{
description: "default internal LB should map to primary vmset",
lbName: "azure-internal",
clusterName: "azure",
expectedVMSet: "primary",
},
{
description: "non-default external LB should map to its own vmset",
lbName: "azuretest-internal",
clusterName: "azure",
expectedVMSet: "azuretest",
},
{
description: "non-default internal LB should map to its own vmset",
lbName: "azuretest-internal",
clusterName: "azure",
expectedVMSet: "azuretest",
},
}
for _, c := range cases {
if c.useStandardLB {
az.Config.LoadBalancerSku = loadBalancerSkuStandard
} else {
az.Config.LoadBalancerSku = loadBalancerSkuBasic
}
vmset := az.mapLoadBalancerNameToVMSet(c.lbName, c.clusterName)
assert.Equal(t, c.expectedVMSet, vmset, c.description)
}
}
func TestGetAzureLoadBalancerName(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
az := GetTestCloud(ctrl)
az.PrimaryAvailabilitySetName = "primary"
cases := []struct {
description string
vmSet string
isInternal bool
useStandardLB bool
clusterName string
lbName string
expected string
}{
{
description: "prefix of loadBalancerName should be az.LoadBalancerName if az.LoadBalancerName is not nil",
vmSet: "primary",
clusterName: "azure",
lbName: "azurelb",
expected: "azurelb",
},
{
description: "default external LB should get primary vmset",
vmSet: "primary",
clusterName: "azure",
expected: "azure",
},
{
description: "default internal LB should get primary vmset",
vmSet: "primary",
clusterName: "azure",
isInternal: true,
expected: "azure-internal",
},
{
description: "non-default external LB should get its own vmset",
vmSet: "as",
clusterName: "azure",
expected: "as",
},
{
description: "non-default internal LB should get its own vmset",
vmSet: "as",
clusterName: "azure",
isInternal: true,
expected: "as-internal",
},
{
description: "default standard external LB should get cluster name",
vmSet: "primary",
useStandardLB: true,
clusterName: "azure",
expected: "azure",
},
{
description: "default standard internal LB should get cluster name",
vmSet: "primary",
useStandardLB: true,
isInternal: true,
clusterName: "azure",
expected: "azure-internal",
},
{
description: "non-default standard external LB should get cluster-name",
vmSet: "as",
useStandardLB: true,
clusterName: "azure",
expected: "azure",
},
{
description: "non-default standard internal LB should get cluster-name",
vmSet: "as",
useStandardLB: true,
isInternal: true,
clusterName: "azure",
expected: "azure-internal",
},
}
for _, c := range cases {
if c.useStandardLB {
az.Config.LoadBalancerSku = loadBalancerSkuStandard
} else {
az.Config.LoadBalancerSku = loadBalancerSkuBasic
}
az.Config.LoadBalancerName = c.lbName
loadbalancerName := az.getAzureLoadBalancerName(c.clusterName, c.vmSet, c.isInternal)
assert.Equal(t, c.expected, loadbalancerName, c.description)
}
}
func TestGetLoadBalancingRuleName(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
az := GetTestCloud(ctrl)
az.PrimaryAvailabilitySetName = "primary"
svc := &v1.Service{
ObjectMeta: meta.ObjectMeta{
Annotations: map[string]string{},
UID: "257b9655-5137-4ad2-b091-ef3f07043ad3",
},
}
cases := []struct {
description string
subnetName string
isInternal bool
useStandardLB bool
protocol v1.Protocol
port int32
expected string
}{
{
description: "internal lb should have subnet name on the rule name",
subnetName: "shortsubnet",
isInternal: true,
useStandardLB: true,
protocol: v1.ProtocolTCP,
port: 9000,
expected: "a257b965551374ad2b091ef3f07043ad-shortsubnet-TCP-9000",
},
{
description: "internal standard lb should have subnet name on the rule name but truncated to 80 characters",
subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet",
isInternal: true,
useStandardLB: true,
protocol: v1.ProtocolTCP,
port: 9000,
expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnngg-TCP-9000",
},
{
description: "internal basic lb should have subnet name on the rule name but truncated to 80 characters",
subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet",
isInternal: true,
useStandardLB: false,
protocol: v1.ProtocolTCP,
port: 9000,
expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnngg-TCP-9000",
},
{
description: "external standard lb should not have subnet name on the rule name",
subnetName: "shortsubnet",
isInternal: false,
useStandardLB: true,
protocol: v1.ProtocolTCP,
port: 9000,
expected: "a257b965551374ad2b091ef3f07043ad-TCP-9000",
},
{
description: "external basic lb should not have subnet name on the rule name",
subnetName: "shortsubnet",
isInternal: false,
useStandardLB: false,
protocol: v1.ProtocolTCP,
port: 9000,
expected: "a257b965551374ad2b091ef3f07043ad-TCP-9000",
},
}
for _, c := range cases {
if c.useStandardLB {
az.Config.LoadBalancerSku = loadBalancerSkuStandard
} else {
az.Config.LoadBalancerSku = loadBalancerSkuBasic
}
svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = c.subnetName
svc.Annotations[ServiceAnnotationLoadBalancerInternal] = strconv.FormatBool(c.isInternal)
loadbalancerRuleName := az.getLoadBalancerRuleName(svc, c.protocol, c.port)
assert.Equal(t, c.expected, loadbalancerRuleName, c.description)
}
}
func TestGetFrontendIPConfigName(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
az := GetTestCloud(ctrl)
az.PrimaryAvailabilitySetName = "primary"
svc := &v1.Service{
ObjectMeta: meta.ObjectMeta{
Annotations: map[string]string{
ServiceAnnotationLoadBalancerInternalSubnet: "subnet",
ServiceAnnotationLoadBalancerInternal: "true",
},
UID: "257b9655-5137-4ad2-b091-ef3f07043ad3",
},
}
cases := []struct {
description string
subnetName string
isInternal bool
useStandardLB bool
expected string
}{
{
description: "internal lb should have subnet name on the frontend ip configuration name",
subnetName: "shortsubnet",
isInternal: true,
useStandardLB: true,
expected: "a257b965551374ad2b091ef3f07043ad-shortsubnet",
},
{
description: "internal standard lb should have subnet name on the frontend ip configuration name but truncated to 80 characters",
subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet",
isInternal: true,
useStandardLB: true,
expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnnggggggggggg",
},
{
description: "internal basic lb should have subnet name on the frontend ip configuration name but truncated to 80 characters",
subnetName: "averylonnnngggnnnnnnnnnnnnnnnnnnnnnngggggggggggggggggggggggggggggggggggggsubet",
isInternal: true,
useStandardLB: false,
expected: "a257b965551374ad2b091ef3f07043ad-averylonnnngggnnnnnnnnnnnnnnnnnnnnnnggggggggggg",
},
{
description: "external standard lb should not have subnet name on the frontend ip configuration name",
subnetName: "shortsubnet",
isInternal: false,
useStandardLB: true,
expected: "a257b965551374ad2b091ef3f07043ad",
},
{
description: "external basic lb should not have subnet name on the frontend ip configuration name",
subnetName: "shortsubnet",
isInternal: false,
useStandardLB: false,
expected: "a257b965551374ad2b091ef3f07043ad",
},
}
for _, c := range cases {
if c.useStandardLB {
az.Config.LoadBalancerSku = loadBalancerSkuStandard
} else {
az.Config.LoadBalancerSku = loadBalancerSkuBasic
}
svc.Annotations[ServiceAnnotationLoadBalancerInternalSubnet] = c.subnetName
svc.Annotations[ServiceAnnotationLoadBalancerInternal] = strconv.FormatBool(c.isInternal)
ipconfigName := az.getDefaultFrontendIPConfigName(svc)
assert.Equal(t, c.expected, ipconfigName, c.description)
}
}
func TestGetFrontendIPConfigID(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
az := GetTestCloud(ctrl)
testGetLoadBalancerSubResourceID(t, az, az.getFrontendIPConfigID, frontendIPConfigIDTemplate)
}
func TestGetBackendPoolID(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
az := GetTestCloud(ctrl)
testGetLoadBalancerSubResourceID(t, az, az.getBackendPoolID, backendPoolIDTemplate)
}
func | (t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
az := GetTestCloud(ctrl)
testGetLoadBalancerSubResourceID(t, az, az.getLoadBalancerProbeID, loadBalancerProbeIDTemplate)
}
func testGetLoadBalancerSubResourceID(
t *testing.T,
az *Cloud,
getLoadBalancerSubResourceID func(string, string, string) string,
expectedResourceIDTemplate string) {
cases := []struct {
description string
loadBalancerName string
resourceGroupName string
subResourceName string
useNetworkResourceInDifferentTenant bool
expected string
}{
{
description: "resource id should contain NetworkResourceSubscriptionID when using network resources in different subscription",
loadBalancerName: "lbName",
resourceGroupName: "rgName",
subResourceName: "subResourceName",
useNetworkResourceInDifferentTenant: true,
},
{
description: "resource id should contain SubscriptionID when not using network resources in different subscription",
loadBalancerName: "lbName",
resourceGroupName: "rgName",
subResourceName: "subResourceName",
useNetworkResourceInDifferentTenant: false,
},
}
for _, c := range cases {
if c.useNetworkResourceInDifferentTenant {
az.NetworkResourceTenantID = networkResourceTenantID
az.NetworkResourceSubscriptionID = networkResourceSubscriptionID
c.expected = fmt.Sprintf(
expectedResourceIDTemplate,
az.NetworkResourceSubscriptionID,
c.resourceGroupName,
c.loadBalancerName,
c.subResourceName)
} else {
az.NetworkResourceTenantID = ""
az.NetworkResourceSubscriptionID = ""
c.expected = fmt.Sprintf(
expectedResourceIDTemplate,
az.SubscriptionID,
c.resourceGroupName,
c.loadBalancerName,
c.subResourceName)
}
subResourceID := getLoadBalancerSubResourceID(c.loadBalancerName, c.resourceGroupName, c.subResourceName)
assert.Equal(t, c.expected, subResourceID, c.description)
}
}
func TestGetProtocolsFromKubernetesProtocol(t *testing.T) {
testcases := []struct {
Name string
protocol v1.Protocol
expectedTransportProto network.TransportProtocol
expectedSecurityGroupProto network.SecurityRuleProtocol
expectedProbeProto network.ProbeProtocol
nilProbeProto bool
expectedErrMsg error
}{
{
Name: "getProtocolsFromKubernetesProtocol should get TCP protocol",
protocol: v1.ProtocolTCP,
expectedTransportProto: network.TransportProtocolTCP,
expectedSecurityGroupProto: network.SecurityRuleProtocolTCP,
expectedProbeProto: network.ProbeProtocolTCP,
},
{
Name: "getProtocolsFromKubernetesProtocol should get UDP protocol",
protocol: v1.ProtocolUDP,
expectedTransportProto: network.TransportProtocolUDP,
expectedSecurityGroupProto: network.SecurityRuleProtocolUDP,
nilProbeProto: true,
},
{
Name: "getProtocolsFromKubernetesProtocol should report error",
protocol: v1.ProtocolSCTP,
expectedErrMsg: fmt.Errorf("only TCP and UDP are supported for Azure LoadBalancers"),
},
}
for _, test := range testcases {
transportProto, securityGroupProto, probeProto, err := getProtocolsFromKubernetesProtocol(test.protocol)
assert.Equal(t, test.expectedTransportProto, *transportProto, test.Name)
assert.Equal(t, test.expectedSecurityGroupProto, *securityGroupProto, test.Name)
if test.nilProbeProto {
assert.Nil(t, probeProto, test.Name)
} else {
assert.Equal(t, test.expectedProbeProto, *probeProto, test.Name)
}
assert.Equal(t, test.expectedErrMsg, err, test.Name)
}
}
func TestGetStandardVMPrimaryInterfaceID(t *testing.T) {
testcases := []struct {
name string
vm compute.VirtualMachine
expectedNicID string
expectedErrMsg error
}{
{
name: "GetPrimaryInterfaceID should get the only NIC ID",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm1"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: to.StringPtr("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic"),
},
},
},
},
},
expectedNicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic",
},
{
name: "GetPrimaryInterfaceID should get primary NIC ID",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm2"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
Primary: to.BoolPtr(true),
},
ID: to.StringPtr("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1"),
},
{
NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
Primary: to.BoolPtr(false),
},
ID: to.StringPtr("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic2"),
},
},
},
},
},
expectedNicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1",
},
{
name: "GetPrimaryInterfaceID should report error if node don't have primary NIC",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm3"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
Primary: to.BoolPtr(false),
},
ID: to.StringPtr("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1"),
},
{
NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
Primary: to.BoolPtr(false),
},
ID: to.StringPtr("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic2"),
},
},
},
},
},
expectedErrMsg: fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", "vm3"),
},
}
for _, test := range testcases {
primaryNicID, err := getPrimaryInterfaceID(test.vm)
assert.Equal(t, test.expectedNicID, primaryNicID, test.name)
assert.Equal(t, test.expectedErrMsg, err, test.name)
}
}
func TestGetPrimaryIPConfig(t *testing.T) {
testcases := []struct {
name string
nic network.Interface
expectedIPConfig *network.InterfaceIPConfiguration
expectedErrMsg error
}{
{
name: "GetPrimaryIPConfig should get the only IP configuration",
nic: network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("ipconfig1"),
},
},
},
},
expectedIPConfig: &network.InterfaceIPConfiguration{
Name: to.StringPtr("ipconfig1"),
},
},
{
name: "GetPrimaryIPConfig should get the primary IP configuration",
nic: network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("ipconfig1"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(true),
},
},
{
Name: to.StringPtr("ipconfig2"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(false),
},
},
},
},
},
expectedIPConfig: &network.InterfaceIPConfiguration{
Name: to.StringPtr("ipconfig1"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(true),
},
},
},
{
name: "GetPrimaryIPConfig should report error if nic don't have IP configuration",
nic: network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{},
},
expectedErrMsg: fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", "nic"),
},
{
name: "GetPrimaryIPConfig should report error if node has more than one IP configuration and don't have primary IP configuration",
nic: network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("ipconfig1"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(false),
},
},
{
Name: to.StringPtr("ipconfig2"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(false),
},
},
},
},
},
expectedErrMsg: fmt.Errorf("failed to determine the primary ipconfig. nicname=%q", "nic"),
},
}
for _, test := range testcases {
primaryIPConfig, err := getPrimaryIPConfig(test.nic)
assert.Equal(t, test.expectedIPConfig, primaryIPConfig, test.name)
assert.Equal(t, test.expectedErrMsg, err, test.name)
}
}
func TestGetIPConfigByIPFamily(t *testing.T) {
ipv4IPconfig := network.InterfaceIPConfiguration{
Name: to.StringPtr("ipconfig1"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddressVersion: network.IPv4,
PrivateIPAddress: to.StringPtr("10.10.0.12"),
},
}
ipv6IPconfig := network.InterfaceIPConfiguration{
Name: to.StringPtr("ipconfig2"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddressVersion: network.IPv6,
PrivateIPAddress: to.StringPtr("1111:11111:00:00:1111:1111:000:111"),
},
}
testNic := network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{ipv4IPconfig, ipv6IPconfig},
},
}
testcases := []struct {
name string
nic network.Interface
expectedIPConfig *network.InterfaceIPConfiguration
IPv6 bool
expectedErrMsg error
}{
{
name: "GetIPConfigByIPFamily should get the IPv6 IP configuration if IPv6 is false",
nic: testNic,
expectedIPConfig: &ipv4IPconfig,
},
{
name: "GetIPConfigByIPFamily should get the IPv4 IP configuration if IPv6 is true",
nic: testNic,
IPv6: true,
expectedIPConfig: &ipv6IPconfig,
},
{
name: "GetIPConfigByIPFamily should report error if nic don't have IP configuration",
nic: network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{},
},
expectedErrMsg: fmt.Errorf("nic.IPConfigurations for nic (nicname=%q) is nil", "nic"),
},
{
name: "GetIPConfigByIPFamily should report error if nic don't have IPv6 configuration when IPv6 is true",
nic: network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{ipv4IPconfig},
},
},
IPv6: true,
expectedErrMsg: fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", true, "nic"),
},
{
name: "GetIPConfigByIPFamily should report error if nic don't have PrivateIPAddress",
nic: network.Interface{
Name: to.StringPtr("nic"),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("ipconfig1"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddressVersion: network.IPv4,
},
},
},
},
},
expectedErrMsg: fmt.Errorf("failed to determine the ipconfig(IPv6=%v). nicname=%q", false, "nic"),
},
}
for _, test := range testcases {
ipConfig, err := getIPConfigByIPFamily(test.nic, test.IPv6)
assert.Equal(t, test.expectedIPConfig, ipConfig, test.name)
assert.Equal(t, test.expectedErrMsg, err, test.name)
}
}
func TestGetBackendPoolName(t *testing.T) {
testcases := []struct {
name string
service v1.Service
clusterName string
expectedPoolName string
}{
{
name: "GetBackendPoolName should return <clusterName>-IPv6",
service: getTestService("test1", v1.ProtocolTCP, nil, true, 80),
clusterName: "azure",
expectedPoolName: "azure-IPv6",
},
{
name: "GetBackendPoolName should return <clusterName>",
service: getTestService("test1", v1.ProtocolTCP, nil, false, 80),
clusterName: "azure",
expectedPoolName: "azure",
},
}
for _, test := range testcases {
backPoolName := getBackendPoolName(test.clusterName, &test.service)
assert.Equal(t, test.expectedPoolName, backPoolName, test.name)
}
}
func TestGetStandardInstanceIDByNodeName(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
expectedVM := compute.VirtualMachine{
Name: to.StringPtr("vm1"),
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1"),
}
invalidResouceID := "/subscriptions/subscription/resourceGroups/rg/Microsoft.Compute/virtualMachines/vm4"
testcases := []struct {
name string
nodeName string
expectedID string
expectedErrMsg error
}{
{
name: "GetInstanceIDByNodeName should get instanceID as expected",
nodeName: "vm1",
expectedID: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
},
{
name: "GetInstanceIDByNodeName should report error if node don't exist",
nodeName: "vm2",
expectedErrMsg: fmt.Errorf("instance not found"),
},
{
name: "GetInstanceIDByNodeName should report error if Error encountered when invoke mockVMClient.Get",
nodeName: "vm3",
expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: VMGet error"),
},
{
name: "GetInstanceIDByNodeName should report error if ResourceID is invalid",
nodeName: "vm4",
expectedErrMsg: fmt.Errorf("%q isn't in Azure resource ID format %q", invalidResouceID, azureResourceGroupNameRE.String()),
},
}
for _, test := range testcases {
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm1", gomock.Any()).Return(expectedVM, nil).AnyTimes()
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm2", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm3", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{
HTTPStatusCode: http.StatusInternalServerError,
RawError: fmt.Errorf("VMGet error"),
}).AnyTimes()
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm4", gomock.Any()).Return(compute.VirtualMachine{
Name: to.StringPtr("vm4"),
ID: to.StringPtr(invalidResouceID),
}, nil).AnyTimes()
instanceID, err := cloud.VMSet.GetInstanceIDByNodeName(test.nodeName)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedID, instanceID, test.name)
}
}
func TestGetStandardVMPowerStatusByNodeName(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
testcases := []struct {
name string
nodeName string
vm compute.VirtualMachine
expectedStatus string
getErr *retry.Error
expectedErrMsg error
}{
{
name: "GetPowerStatusByNodeName should report error if node don't exist",
nodeName: "vm1",
vm: compute.VirtualMachine{},
getErr: &retry.Error{
HTTPStatusCode: http.StatusNotFound,
RawError: cloudprovider.InstanceNotFound,
},
expectedErrMsg: fmt.Errorf("instance not found"),
},
{
name: "GetPowerStatusByNodeName should get power status as expected",
nodeName: "vm2",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm2"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
InstanceView: &compute.VirtualMachineInstanceView{
Statuses: &[]compute.InstanceViewStatus{
{
Code: to.StringPtr("PowerState/Running"),
},
},
},
},
},
expectedStatus: "Running",
},
{
name: "GetPowerStatusByNodeName should get vmPowerStateStopped if vm.InstanceView is nil",
nodeName: "vm3",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm3"),
VirtualMachineProperties: &compute.VirtualMachineProperties{},
},
expectedStatus: vmPowerStateStopped,
},
{
name: "GetPowerStatusByNodeName should get vmPowerStateStopped if vm.InstanceView.statuses is nil",
nodeName: "vm4",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm4"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
InstanceView: &compute.VirtualMachineInstanceView{},
},
},
expectedStatus: vmPowerStateStopped,
},
}
for _, test := range testcases {
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(test.vm, test.getErr).AnyTimes()
powerState, err := cloud.VMSet.GetPowerStatusByNodeName(test.nodeName)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedStatus, powerState, test.name)
}
}
func TestGetStandardVMZoneByNodeName(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
var faultDomain int32 = 3
testcases := []struct {
name string
nodeName string
vm compute.VirtualMachine
expectedZone cloudprovider.Zone
getErr *retry.Error
expectedErrMsg error
}{
{
name: "GetZoneByNodeName should report error if node don't exist",
nodeName: "vm1",
vm: compute.VirtualMachine{},
getErr: &retry.Error{
HTTPStatusCode: http.StatusNotFound,
RawError: cloudprovider.InstanceNotFound,
},
expectedErrMsg: fmt.Errorf("instance not found"),
},
{
name: "GetZoneByNodeName should get zone as expected",
nodeName: "vm2",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm2"),
Location: to.StringPtr("EASTUS"),
Zones: &[]string{"2"},
VirtualMachineProperties: &compute.VirtualMachineProperties{
InstanceView: &compute.VirtualMachineInstanceView{
PlatformFaultDomain: &faultDomain,
},
},
},
expectedZone: cloudprovider.Zone{
FailureDomain: "eastus-2",
Region: "eastus",
},
},
{
name: "GetZoneByNodeName should get FailureDomain as zone if zone is not used for node",
nodeName: "vm3",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm3"),
Location: to.StringPtr("EASTUS"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
InstanceView: &compute.VirtualMachineInstanceView{
PlatformFaultDomain: &faultDomain,
},
},
},
expectedZone: cloudprovider.Zone{
FailureDomain: "3",
Region: "eastus",
},
},
{
name: "GetZoneByNodeName should report error if zones is invalid",
nodeName: "vm4",
vm: compute.VirtualMachine{
Name: to.StringPtr("vm4"),
Location: to.StringPtr("EASTUS"),
Zones: &[]string{"a"},
VirtualMachineProperties: &compute.VirtualMachineProperties{
InstanceView: &compute.VirtualMachineInstanceView{
PlatformFaultDomain: &faultDomain,
},
},
},
expectedErrMsg: fmt.Errorf("failed to parse zone %q: strconv.Atoi: parsing %q: invalid syntax", []string{"a"}, "a"),
},
}
for _, test := range testcases {
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(test.vm, test.getErr).AnyTimes()
zone, err := cloud.VMSet.GetZoneByNodeName(test.nodeName)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedZone, zone, test.name)
}
}
func TestGetStandardVMSetNames(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
asID := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/myAvailabilitySet"
testVM := compute.VirtualMachine{
Name: to.StringPtr("vm1"),
VirtualMachineProperties: &compute.VirtualMachineProperties{
AvailabilitySet: &compute.SubResource{ID: to.StringPtr(asID)},
},
}
testVMWithoutAS := compute.VirtualMachine{
Name: to.StringPtr("vm2"),
VirtualMachineProperties: &compute.VirtualMachineProperties{},
}
testCases := []struct {
name string
vm []compute.VirtualMachine
service *v1.Service
nodes []*v1.Node
expectedVMSetNames *[]string
expectedErrMsg error
}{
{
name: "GetVMSetNames should return the primary vm set name if the service has no mode annotation",
vm: []compute.VirtualMachine{testVM},
service: &v1.Service{},
expectedVMSetNames: &[]string{"as"},
},
{
name: "GetVMSetNames should return the correct as names if the service has auto mode annotation",
vm: []compute.VirtualMachine{testVM},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{Annotations: map[string]string{ServiceAnnotationLoadBalancerMode: ServiceAnnotationLoadBalancerAutoModeValue}},
},
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm1",
},
},
},
expectedVMSetNames: &[]string{"myavailabilityset"},
},
{
name: "GetVMSetNames should return the correct as names if node don't have availability set",
vm: []compute.VirtualMachine{testVMWithoutAS},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{Annotations: map[string]string{ServiceAnnotationLoadBalancerMode: ServiceAnnotationLoadBalancerAutoModeValue}},
},
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm2",
},
},
},
expectedErrMsg: fmt.Errorf("node (vm2) - has no availability sets"),
},
{
name: "GetVMSetNames should report the error if there's no such availability set",
vm: []compute.VirtualMachine{testVM},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{Annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "vm2"}},
},
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm1",
},
},
},
expectedErrMsg: fmt.Errorf("availability set (vm2) - not found"),
},
{
name: "GetVMSetNames should return the correct node name",
vm: []compute.VirtualMachine{testVM},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{Annotations: map[string]string{ServiceAnnotationLoadBalancerMode: "myAvailabilitySet"}},
},
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm1",
},
},
},
expectedVMSetNames: &[]string{"myavailabilityset"},
},
}
for _, test := range testCases {
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().List(gomock.Any(), cloud.ResourceGroup).Return(test.vm, nil).AnyTimes()
vmSetNames, err := cloud.VMSet.GetVMSetNames(test.service, test.nodes)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedVMSetNames, vmSetNames, test.name)
}
}
func TestExtractResourceGroupByNicID(t *testing.T) {
testCases := []struct {
name string
nicID string
expectedRG string
expectedErrMsg error
}{
{
name: "ExtractResourceGroupByNicID should return correct resource group",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic",
expectedRG: "rg",
},
{
name: "ExtractResourceGroupByNicID should report error if nicID is invalid",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/networkInterfaces/nic",
expectedErrMsg: fmt.Errorf("error of extracting resourceGroup from nicID %q", "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/networkInterfaces/nic"),
},
}
for _, test := range testCases {
rgName, err := extractResourceGroupByNicID(test.nicID)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Equal(t, test.expectedRG, rgName, test.name)
}
}
func TestStandardEnsureHostInPool(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
availabilitySetID := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/myAvailabilitySet"
backendAddressPoolID := "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/backendpool-1"
testCases := []struct {
name string
service *v1.Service
nodeName types.NodeName
backendPoolID string
nicName string
nicID string
vmSetName string
nicProvisionState string
isStandardLB bool
expectedErrMsg error
}{
{
name: "EnsureHostInPool should return nil if node is not in VMSet",
service: &v1.Service{},
nodeName: "vm1",
nicName: "nic1",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1",
vmSetName: "availabilityset-1",
},
{
name: "EnsureHostInPool should report error if last segment of nicID is nil",
service: &v1.Service{},
nodeName: "vm2",
nicName: "nic2",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/",
vmSetName: "availabilityset-1",
expectedErrMsg: fmt.Errorf("resource name was missing from identifier"),
},
{
name: "EnsureHostInPool should return nil if node's provisioning state is Failed",
service: &v1.Service{},
nodeName: "vm3",
nicName: "nic3",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic3",
nicProvisionState: nicFailedState,
vmSetName: "myAvailabilitySet",
},
{
name: "EnsureHostInPool should report error if service.Spec.ClusterIP is ipv6 but node don't have IPv6 address",
service: &v1.Service{Spec: v1.ServiceSpec{ClusterIP: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}},
nodeName: "vm4",
nicName: "nic4",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic4",
vmSetName: "myAvailabilitySet",
expectedErrMsg: fmt.Errorf("failed to determine the ipconfig(IPv6=true). nicname=%q", "nic4"),
},
{
name: "EnsureHostInPool should return nil if there is matched backend pool",
service: &v1.Service{},
backendPoolID: backendAddressPoolID,
nodeName: "vm5",
nicName: "nic5",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic5",
vmSetName: "myAvailabilitySet",
},
{
name: "EnsureHostInPool should return nil if there isn't matched backend pool",
service: &v1.Service{},
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/backendpool-2",
nodeName: "vm6",
nicName: "nic6",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic6",
vmSetName: "myAvailabilitySet",
},
{
name: "EnsureHostInPool should return nil if BackendPool is not on same LB",
service: &v1.Service{},
isStandardLB: true,
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb2-internal/backendAddressPools/backendpool-3",
nodeName: "vm7",
nicName: "nic7",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic7",
vmSetName: "myAvailabilitySet",
},
{
name: "EnsureHostInPool should report error if the format of backendPoolID is invalid",
service: &v1.Service{},
isStandardLB: true,
backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb2-internal/backendAddressPool/backendpool-3",
nodeName: "vm8",
nicName: "nic8",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic7",
vmSetName: "myAvailabilitySet",
expectedErrMsg: fmt.Errorf("new backendPoolID %q is in wrong format", "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb2-internal/backendAddressPool/backendpool-3"),
},
}
for _, test := range testCases {
if test.isStandardLB {
cloud.Config.LoadBalancerSku = loadBalancerSkuStandard
}
testVM := compute.VirtualMachine{
Name: to.StringPtr(string(test.nodeName)),
VirtualMachineProperties: &compute.VirtualMachineProperties{
AvailabilitySet: &compute.SubResource{ID: to.StringPtr(availabilitySetID)},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: to.StringPtr(test.nicID),
},
},
},
},
}
testNIC := network.Interface{
Name: to.StringPtr(test.nicName),
ID: to.StringPtr(test.nicID),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
ProvisioningState: to.StringPtr(test.nicProvisionState),
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("ifconfig1"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
LoadBalancerBackendAddressPools: &[]network.BackendAddressPool{
{
ID: to.StringPtr(backendAddressPoolID),
},
},
},
},
},
},
}
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, string(test.nodeName), gomock.Any()).Return(testVM, nil).AnyTimes()
mockInterfaceClient := cloud.InterfacesClient.(*mockinterfaceclient.MockInterface)
mockInterfaceClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nicName, gomock.Any()).Return(testNIC, nil).AnyTimes()
mockInterfaceClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
_, _, _, vm, err := cloud.VMSet.EnsureHostInPool(test.service, test.nodeName, test.backendPoolID, test.vmSetName, false)
assert.Equal(t, test.expectedErrMsg, err, test.name)
assert.Nil(t, vm, test.name)
}
}
func TestStandardEnsureHostsInPool(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
availabilitySetID := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/myAvailabilitySet"
backendAddressPoolID := "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/backendpool-1"
testCases := []struct {
name string
service *v1.Service
nodes []*v1.Node
nodeName string
backendPoolID string
nicName string
nicID string
vmSetName string
expectedErr bool
expectedErrMsg string
}{
{
name: "EnsureHostsInPool should return nil if there's no error when invoke EnsureHostInPool",
service: &v1.Service{},
nodeName: "vm1",
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm1",
},
},
},
nicName: "nic1",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1",
backendPoolID: backendAddressPoolID,
vmSetName: "availabilityset-1",
},
{
name: "EnsureHostsInPool should skip if node is master node",
service: &v1.Service{},
nodeName: "vm2",
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm2",
Labels: map[string]string{nodeLabelRole: "master"},
},
},
},
nicName: "nic2",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/",
vmSetName: "availabilityset-1",
},
{
name: "EnsureHostsInPool should skip if node is in external resource group",
service: &v1.Service{},
nodeName: "vm3",
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm3",
Labels: map[string]string{externalResourceGroupLabel: "rg-external"},
},
},
},
nicName: "nic3",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic3",
vmSetName: "availabilityset-1",
},
{
name: "EnsureHostsInPool should skip if node is unmanaged",
service: &v1.Service{},
nodeName: "vm4",
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm4",
Labels: map[string]string{managedByAzureLabel: "false"},
},
},
},
nicName: "nic4",
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic4",
vmSetName: "availabilityset-1",
},
{
name: "EnsureHostsInPool should report error if service.Spec.ClusterIP is ipv6 but node don't have IPv6 address",
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{
Name: "svc",
Namespace: "default",
},
Spec: v1.ServiceSpec{
ClusterIP: "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
},
},
nodeName: "vm5",
nodes: []*v1.Node{
{
ObjectMeta: meta.ObjectMeta{
Name: "vm5",
},
},
},
nicName: "nic5",
backendPoolID: backendAddressPoolID,
nicID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic5",
vmSetName: "myAvailabilitySet",
expectedErr: true,
expectedErrMsg: fmt.Sprintf("ensure(default/svc): backendPoolID(%s) - failed to ensure host in pool: %q", backendAddressPoolID, fmt.Errorf("failed to determine the ipconfig(IPv6=true). nicname=%q", "nic5")),
},
}
for _, test := range testCases {
cloud.Config.LoadBalancerSku = loadBalancerSkuStandard
cloud.Config.ExcludeMasterFromStandardLB = to.BoolPtr(true)
testVM := compute.VirtualMachine{
Name: to.StringPtr(string(test.nodeName)),
VirtualMachineProperties: &compute.VirtualMachineProperties{
AvailabilitySet: &compute.SubResource{ID: to.StringPtr(availabilitySetID)},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: to.StringPtr(test.nicID),
},
},
},
},
}
testNIC := network.Interface{
Name: to.StringPtr(test.nicName),
ID: to.StringPtr(test.nicID),
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("ifconfig1"),
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
LoadBalancerBackendAddressPools: &[]network.BackendAddressPool{
{
ID: to.StringPtr(backendAddressPoolID),
},
},
},
},
},
},
}
mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(testVM, nil).AnyTimes()
mockInterfaceClient := cloud.InterfacesClient.(*mockinterfaceclient.MockInterface)
mockInterfaceClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nicName, gomock.Any()).Return(testNIC, nil).AnyTimes()
mockInterfaceClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
err := cloud.VMSet.EnsureHostsInPool(test.service, test.nodes, test.backendPoolID, test.vmSetName, false)
if test.expectedErr {
assert.Equal(t, test.expectedErrMsg, err.Error(), test.name)
} else {
assert.Nil(t, err, test.name)
}
}
}
func TestServiceOwnsFrontendIP(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
testCases := []struct {
desc string
existingPIPs []network.PublicIPAddress
fip network.FrontendIPConfiguration
service *v1.Service
isOwned bool
isPrimary bool
expectedErr error
}{
{
desc: "serviceOwnsFrontendIP should detect the primary service",
fip: network.FrontendIPConfiguration{
Name: to.StringPtr("auid"),
},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{
UID: types.UID("uid"),
},
},
isOwned: true,
isPrimary: true,
},
{
desc: "serviceOwnsFrontendIP should return false if the secondary external service doesn't set it's loadBalancer IP",
fip: network.FrontendIPConfiguration{
Name: to.StringPtr("auid"),
},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{
UID: types.UID("secondary"),
},
},
},
{
desc: "serviceOwnsFrontendIP should report a not found error if there is no public IP " +
"found according to the external service's loadBalancer IP but do not return the error",
existingPIPs: []network.PublicIPAddress{
{
ID: to.StringPtr("pip"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
IPAddress: to.StringPtr("4.3.2.1"),
},
},
},
fip: network.FrontendIPConfiguration{
Name: to.StringPtr("auid"),
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
PublicIPAddress: &network.PublicIPAddress{
ID: to.StringPtr("pip"),
},
},
},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{
UID: types.UID("secondary"),
},
Spec: v1.ServiceSpec{
LoadBalancerIP: "1.2.3.4",
},
},
},
{
desc: "serviceOwnsFrontendIP should return false if there is a mismatch between the PIP's ID and " +
"the counterpart on the frontend IP config",
existingPIPs: []network.PublicIPAddress{
{
ID: to.StringPtr("pip"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
IPAddress: to.StringPtr("4.3.2.1"),
},
},
},
fip: network.FrontendIPConfiguration{
Name: to.StringPtr("auid"),
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
PublicIPAddress: &network.PublicIPAddress{
ID: to.StringPtr("pip1"),
},
},
},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{
UID: types.UID("secondary"),
},
Spec: v1.ServiceSpec{
LoadBalancerIP: "4.3.2.1",
},
},
},
{
desc: "serviceOwnsFrontendIP should detect the secondary external service",
existingPIPs: []network.PublicIPAddress{
{
ID: to.StringPtr("pip"),
PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
IPAddress: to.StringPtr("4.3.2.1"),
},
},
},
fip: network.FrontendIPConfiguration{
Name: to.StringPtr("auid"),
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
PublicIPAddress: &network.PublicIPAddress{
ID: to.StringPtr("pip"),
},
},
},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{
UID: types.UID("secondary"),
},
Spec: v1.ServiceSpec{
LoadBalancerIP: "4.3.2.1",
},
},
isOwned: true,
},
{
desc: "serviceOwnsFrontendIP should detect the secondary internal service",
fip: network.FrontendIPConfiguration{
Name: to.StringPtr("auid"),
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
PrivateIPAddress: to.StringPtr("4.3.2.1"),
},
},
service: &v1.Service{
ObjectMeta: meta.ObjectMeta{
UID: types.UID("secondary"),
Annotations: map[string]string{ServiceAnnotationLoadBalancerInternal: "true"},
},
Spec: v1.ServiceSpec{
LoadBalancerIP: "4.3.2.1",
},
},
isOwned: true,
},
}
for _, test := range testCases {
mockPIPClient := mockpublicipclient.NewMockInterface(ctrl)
cloud.PublicIPAddressesClient = mockPIPClient
mockPIPClient.EXPECT().List(gomock.Any(), gomock.Any()).Return(test.existingPIPs, nil).MaxTimes(1)
isOwned, isPrimary, err := cloud.serviceOwnsFrontendIP(test.fip, test.service)
assert.Equal(t, test.expectedErr, err, test.desc)
assert.Equal(t, test.isOwned, isOwned, test.desc)
assert.Equal(t, test.isPrimary, isPrimary, test.desc)
}
}
func TestStandardEnsureBackendPoolDeleted(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cloud := GetTestCloud(ctrl)
service := getTestService("test", v1.ProtocolTCP, nil, false, 80)
backendPoolID := "backendPoolID"
vmSetName := "AS"
tests := []struct {
desc string
backendAddressPools *[]network.BackendAddressPool
loadBalancerSKU string
existingVM compute.VirtualMachine
existingNIC network.Interface
}{
{
desc: "",
backendAddressPools: &[]network.BackendAddressPool{
{
ID: to.StringPtr(backendPoolID),
BackendAddressPoolPropertiesFormat: &network.BackendAddressPoolPropertiesFormat{
BackendIPConfigurations: &[]network.InterfaceIPConfiguration{
{
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1/ipConfigurations/ipconfig1"),
},
},
},
},
},
existingVM: compute.VirtualMachine{
VirtualMachineProperties: &compute.VirtualMachineProperties{
AvailabilitySet: &compute.SubResource{
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/availabilitySets/as"),
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1"),
},
},
},
},
},
existingNIC: network.Interface{
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
ProvisioningState: to.StringPtr("Succeeded"),
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
Primary: to.BoolPtr(true),
LoadBalancerBackendAddressPools: &[]network.BackendAddressPool{
{
ID: to.StringPtr("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/k8s-agentpool1-00000000-nic-1/ipConfigurations/ipconfig1"),
},
},
},
},
},
},
},
},
}
for _, test := range tests {
cloud.LoadBalancerSku = test.loadBalancerSKU
mockVMClient := mockvmclient.NewMockInterface(ctrl)
mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "k8s-agentpool1-00000000-1", gomock.Any()).Return(test.existingVM, nil)
cloud.VirtualMachinesClient = mockVMClient
mockNICClient := mockinterfaceclient.NewMockInterface(ctrl)
mockNICClient.EXPECT().Get(gomock.Any(), "rg", "k8s-agentpool1-00000000-nic-1", gomock.Any()).Return(test.existingNIC, nil)
mockNICClient.EXPECT().CreateOrUpdate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
cloud.InterfacesClient = mockNICClient
err := cloud.VMSet.EnsureBackendPoolDeleted(&service, backendPoolID, vmSetName, test.backendAddressPools)
assert.NoError(t, err, test.desc)
}
}
| TestGetLoadBalancerProbeID |
test_main.py | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Christian Ledermann
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
try:
import unittest2 as unittest # Needed in Python 2.6
except:
import unittest
from fastkml import kml
from fastkml import styles
from fastkml import base
from fastkml import atom
from fastkml import config
from fastkml import gx # NOQA
import datetime
from dateutil.tz import tzutc, tzoffset
from fastkml.config import etree
from fastkml.geometry import Point, LineString, Polygon
from fastkml.geometry import MultiPoint, MultiLineString, MultiPolygon
from fastkml.geometry import LinearRing, GeometryCollection
from fastkml.geometry import Geometry
class BaseClassesTestCase(unittest.TestCase):
""" BaseClasses must raise a NotImplementedError on etree_element
and a TypeError on from_element """
def test_base_object(self):
bo = base._BaseObject(id='id0')
self.assertEqual(bo.id, 'id0')
self.assertEqual(bo.ns, config.NS)
self.assertEqual(bo.targetId, None)
self.assertEqual(bo.__name__, None)
bo.targetId = 'target'
self.assertEqual(bo.targetId, 'target')
bo.ns = ''
bo.id = None
self.assertEqual(bo.id, None)
self.assertEqual(bo.ns, '')
self.assertRaises(NotImplementedError, bo.etree_element)
element = etree.Element(config.NS + 'Base')
self.assertRaises(TypeError, bo.from_element)
self.assertRaises(TypeError, bo.from_element, element)
bo.__name__ = 'NotABaseObject'
self.assertRaises(TypeError, bo.from_element, element)
# Note that we can coax baseclasses not to throw errors
bo.__name__ = 'Base'
bo.ns = config.NS
bo.from_element(element)
self.assertEqual(bo.id, None)
self.assertEqual(bo.ns, config.NS)
self.assertFalse(bo.etree_element(), None)
self.assertTrue(len(bo.to_string()) > 1)
def test_feature(self):
f = kml._Feature(name='A Feature')
self.assertRaises(NotImplementedError, f.etree_element)
self.assertEqual(f.name, 'A Feature')
self.assertEqual(f.visibility, 1)
self.assertEqual(f.isopen, 0)
self.assertEqual(f._atom_author, None)
self.assertEqual(f._atom_link, None)
self.assertEqual(f.address, None)
# self.assertEqual(f.phoneNumber, None)
self.assertEqual(f._snippet, None)
self.assertEqual(f.description, None)
self.assertEqual(f._styleUrl, None)
self.assertEqual(f._styles, [])
self.assertEqual(f._time_span, None)
self.assertEqual(f._time_stamp, None)
# self.assertEqual(f.region, None)
# self.assertEqual(f.extended_data, None)
f.__name__ = 'Feature'
f.styleUrl = '#default'
self.assertTrue('Feature>' in str(f.to_string()))
self.assertTrue('#default' in str(f.to_string()))
def test_container(self):
f = kml._Container(name='A Container')
# apparently you can add documents to containes
# d = kml.Document()
# self.assertRaises(TypeError, f.append, d)
p = kml.Placemark()
f.append(p)
self.assertRaises(NotImplementedError, f.etree_element)
def test_overlay(self):
o = kml._Overlay(name='An Overlay')
self.assertEqual(o._color, None)
self.assertEqual(o._drawOrder, None)
self.assertEqual(o._icon, None)
self.assertRaises(NotImplementedError, o.etree_element)
def test_atom_link(self):
ns = '{http://www.opengis.net/kml/2.2}'
l = atom.Link(ns=ns)
self.assertEqual(l.ns, ns)
def test_atom_person(self):
ns = '{http://www.opengis.net/kml/2.2}'
p = atom._Person(ns=ns)
self.assertEqual(p.ns, ns)
class BuildKmlTestCase(unittest.TestCase):
""" Build a simple KML File """
def test_kml(self):
""" kml file without contents """
k = kml.KML()
self.assertEqual(len(list(k.features())), 0)
if config.LXML:
self.assertEqual(
str(k.to_string())[:43],
'<kml xmlns="http://www.opengis.net/kml/2.2"/>' [:43])
else:
if hasattr(etree, 'register_namespace'):
self.assertEqual(str(k.to_string())[:51], '<kml:kml xmlns:kml="http://www.opengis.net/kml/2.2" />'[:51])
else:
self.assertEqual(str(k.to_string())[:51], '<ns0:kml xmlns:ns0="http://www.opengis.net/kml/2.2" />'[:51])
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_folder(self):
""" KML file with folders """
ns = '{http://www.opengis.net/kml/2.2}'
k = kml.KML()
f = kml.Folder(ns, 'id', 'name', 'description')
nf = kml.Folder(ns, 'nested-id', 'nested-name', 'nested-description')
f.append(nf)
k.append(f)
f2 = kml.Folder(ns, 'id2', 'name2', 'description2')
k.append(f2)
self.assertEqual(len(list(k.features())), 2)
self.assertEqual(len(list(list(k.features())[0].features())), 1)
k2 = kml.KML()
s = k.to_string()
k2.from_string(s)
self.assertEqual(s, k2.to_string())
def test_placemark(self):
ns = '{http://www.opengis.net/kml/2.2}'
k = kml.KML(ns=ns)
p = kml.Placemark(ns, 'id', 'name', 'description')
p.geometry = Point(0.0, 0.0, 0.0)
p2 = kml.Placemark(ns, 'id2', 'name2', 'description2')
p2.geometry = LineString([(0, 0, 0), (1, 1, 1)])
k.append(p)
k.append(p2)
self.assertEqual(len(list(k.features())), 2)
k2 = kml.KML()
k2.from_string(k.to_string(prettyprint=True))
self.assertEqual(k.to_string(), k2.to_string())
def test_schema(self):
ns = '{http://www.opengis.net/kml/2.2}'
self.assertRaises(ValueError, kml.Schema, ns)
s = kml.Schema(ns, 'some_id')
self.assertEqual(len(list(s.simple_fields)), 0)
s.append('int', 'Integer', 'An Integer')
self.assertEqual(list(s.simple_fields)[0]['type'], 'int')
self.assertEqual(list(s.simple_fields)[0]['name'], 'Integer')
self.assertEqual(list(s.simple_fields)[0]['displayName'], 'An Integer')
s.simple_fields = None | self.assertRaises(
TypeError, s.simple_fields, [('none', 'Integer', 'An Integer')])
self.assertRaises(
TypeError, s.simple_fields, ('int', 'Integer', 'An Integer'))
fields = {
'type': 'int',
'name': 'Integer',
'displayName': 'An Integer'
}
s.simple_fields = fields
self.assertEqual(list(s.simple_fields)[0]['type'], 'int')
self.assertEqual(list(s.simple_fields)[0]['name'], 'Integer')
self.assertEqual(list(s.simple_fields)[0]['displayName'], 'An Integer')
s.simple_fields = [['float', 'Float'], fields]
self.assertEqual(list(s.simple_fields)[0]['type'], 'float')
self.assertEqual(list(s.simple_fields)[0]['name'], 'Float')
self.assertEqual(list(s.simple_fields)[0]['displayName'], None)
self.assertEqual(list(s.simple_fields)[1]['type'], 'int')
self.assertEqual(list(s.simple_fields)[1]['name'], 'Integer')
self.assertEqual(list(s.simple_fields)[1]['displayName'], 'An Integer')
def test_schema_data(self):
ns = '{http://www.opengis.net/kml/2.2}'
self.assertRaises(ValueError, kml.SchemaData, ns)
self.assertRaises(ValueError, kml.SchemaData, ns, '')
sd = kml.SchemaData(ns, '#default')
sd.append_data('text', 'Some Text')
self.assertEqual(len(sd.data), 1)
sd.append_data(value=1, name='Integer')
self.assertEqual(len(sd.data), 2)
self.assertEqual(sd.data[0], {'value': 'Some Text', 'name': 'text'})
self.assertEqual(sd.data[1], {'value': 1, 'name': 'Integer'})
data = (('text', 'Some new Text'), {'value': 2, 'name': 'Integer'})
sd.data = data
self.assertEqual(len(sd.data), 2)
self.assertEqual(
sd.data[0], {'value': 'Some new Text',
'name': 'text'})
self.assertEqual(sd.data[1], {'value': 2, 'name': 'Integer'})
def test_untyped_extended_data(self):
ns = '{http://www.opengis.net/kml/2.2}'
k = kml.KML(ns=ns)
p = kml.Placemark(ns, 'id', 'name', 'description')
p.geometry = Point(0.0, 0.0, 0.0)
p.extended_data = kml.UntypedExtendedData(elements=[
kml.UntypedExtendedDataElement(
name='info',
value='so much to see'), kml.UntypedExtendedDataElement(
name='weather',
display_name='Weather',
value='blue skies')
])
self.assertEqual(len(p.extended_data.elements), 2)
k.append(p)
k2 = kml.KML()
k2.from_string(k.to_string(prettyprint=True))
k.to_string()
extended_data = list(k2.features())[0].extended_data
self.assertTrue(extended_data is not None)
self.assertTrue(len(extended_data.elements), 2)
self.assertEqual(extended_data.elements[0].name, 'info')
self.assertEqual(extended_data.elements[0].value, 'so much to see')
self.assertEqual(extended_data.elements[0].display_name, None)
self.assertEqual(extended_data.elements[1].name, 'weather')
self.assertEqual(extended_data.elements[1].value, 'blue skies')
self.assertEqual(extended_data.elements[1].display_name, 'Weather')
def test_untyped_extended_data_nested(self):
ns = '{http://www.opengis.net/kml/2.2}'
k = kml.KML(ns=ns)
d = kml.Document(ns, 'docid', 'doc name', 'doc description')
d.extended_data = kml.UntypedExtendedData(elements=[
kml.UntypedExtendedDataElement(name='type',
value='Document')
])
f = kml.Folder(ns, 'fid', 'f name', 'f description')
f.extended_data = kml.UntypedExtendedData(elements=[
kml.UntypedExtendedDataElement(name='type',
value='Folder')
])
k.append(d)
d.append(f)
k2 = kml.KML()
k2.from_string(k.to_string())
document_data = list(k2.features())[0].extended_data
folder_data = list(list(k2.features())[0].features())[0].extended_data
self.assertEqual(document_data.elements[0].name, 'type')
self.assertEqual(document_data.elements[0].value, 'Document')
self.assertEqual(folder_data.elements[0].name, 'type')
self.assertEqual(folder_data.elements[0].value, 'Folder')
def test_document(self):
k = kml.KML()
ns = '{http://www.opengis.net/kml/2.2}'
d = kml.Document(ns, 'docid', 'doc name', 'doc description')
f = kml.Folder(ns, 'fid', 'f name', 'f description')
k.append(d)
d.append(f)
nf = kml.Folder(
ns, 'nested-fid', 'nested f name', 'nested f description')
f.append(nf)
f2 = kml.Folder(ns, 'id2', 'name2', 'description2')
d.append(f2)
p = kml.Placemark(ns, 'id', 'name', 'description')
p.geometry = Polygon([(0, 0, 0), (1, 1, 0), (1, 0, 1)])
p2 = kml.Placemark(ns, 'id2', 'name2', 'description2')
# p2 does not have a geometry!
f2.append(p)
nf.append(p2)
self.assertEqual(len(list(k.features())), 1)
self.assertEqual(len(list((list(k.features())[0].features()))), 2)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_author(self):
d = kml.Document()
d.author = 'Christian Ledermann'
self.assertTrue('Christian Ledermann' in str(d.to_string()))
a = atom.Author(
name='Nobody',
uri='http://localhost',
email='[email protected]')
d.author = a
self.assertEqual(d.author, 'Nobody')
self.assertFalse('Christian Ledermann' in str(d.to_string()))
self.assertTrue('Nobody' in str(d.to_string()))
self.assertTrue('http://localhost' in str(d.to_string()))
self.assertTrue('[email protected]' in str(d.to_string()))
d2 = kml.Document()
d2.from_string(d.to_string())
self.assertEqual(d.to_string(), d2.to_string())
d.author = None
def test_link(self):
d = kml.Document()
d.link = 'http://localhost'
self.assertTrue('http://localhost' in str(d.to_string()))
l = atom.Link(href='#here')
d.link = l
self.assertTrue('#here' in str(d.to_string()))
self.assertRaises(TypeError, d.link, object)
d2 = kml.Document()
d2.from_string(d.to_string())
self.assertEqual(d.to_string(), d2.to_string())
d.link = None
def test_address(self):
address = '1600 Amphitheatre Parkway, Mountain View, CA 94043, USA'
d = kml.Document()
d.address = address
self.assertTrue(address in str(d.to_string()))
self.assertTrue('address>' in str(d.to_string()))
def test_phone_number(self):
phone = '+1 234 567 8901'
d = kml.Document()
d.phoneNumber = phone
self.assertTrue(phone in str(d.to_string()))
self.assertTrue('phoneNumber>' in str(d.to_string()))
class KmlFromStringTestCase(unittest.TestCase):
def test_document(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document targetId="someTargetId">
<name>Document.kml</name>
<open>1</open>
<Style id="exampleStyleDocument">
<LabelStyle>
<color>ff0000cc</color>
</LabelStyle>
</Style>
<Placemark>
<name>Document Feature 1</name>
<styleUrl>#exampleStyleDocument</styleUrl>
<Point>
<coordinates>-122.371,37.816,0</coordinates>
</Point>
</Placemark>
<Placemark targetId="someTargetId">
<name>Document Feature 2</name>
<styleUrl>#exampleStyleDocument</styleUrl>
<Point>
<coordinates>-122.370,37.817,0</coordinates>
</Point>
</Placemark>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertEqual(len(list(list(k.features())[0].features())), 2)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_document_booleans(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document targetId="someTargetId">
<name>Document.kml</name>
<visibility>true</visibility>
<open>1</open>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(list(k.features())[0].visibility, 1)
self.assertEqual(list(k.features())[0].isopen, 1)
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document targetId="someTargetId">
<name>Document.kml</name>
<visibility>0</visibility>
<open>false</open>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(list(k.features())[0].visibility, 0)
self.assertEqual(list(k.features())[0].isopen, 0)
def test_folders(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Folder>
<name>Folder.kml</name>
<open>1</open>
<description>
A folder is a container that can hold multiple other objects
</description>
<Placemark>
<name>Folder object 1 (Placemark)</name>
<Point>
<coordinates>-122.377588,37.830266,0</coordinates>
</Point>
</Placemark>
<Placemark>
<name>Folder object 2 (Polygon)</name>
<Polygon>
<outerBoundaryIs>
<LinearRing>
<coordinates>
-122.377830,37.830445,0
-122.377576,37.830631,0
-122.377840,37.830642,0
-122.377830,37.830445,0
</coordinates>
</LinearRing>
</outerBoundaryIs>
</Polygon>
</Placemark>
<Placemark>
<name>Folder object 3 (Path)</name>
<LineString>
<tessellate>1</tessellate>
<coordinates>
-122.378009,37.830128,0 -122.377885,37.830379,0
</coordinates>
</LineString>
</Placemark>
</Folder>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertEqual(len(list(list(k.features())[0].features())), 3)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_placemark(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark>
<name>Simple placemark</name>
<description>Attached to the ground. Intelligently places itself
at the height of the underlying terrain.</description>
<Point>
<coordinates>-122.0822035425683,37.42228990140251,0</coordinates>
</Point>
</Placemark>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertEqual(list(k.features())[0].name, "Simple placemark")
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_extended_data(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark>
<name>Simple placemark</name>
<description></description>
<Point>
<coordinates>-122.0822035425683,37.42228990140251,0</coordinates>
</Point>
<ExtendedData>
<Data name="holeNumber">
<displayName><![CDATA[
<b>This is hole </b>
]]></displayName>
<value>1</value>
</Data>
<Data name="holePar">
<displayName><![CDATA[
<i>The par for this hole is </i>
]]></displayName>
<value>4</value>
</Data>
<SchemaData schemaUrl="#TrailHeadTypeId">
<SimpleData name="TrailHeadName">Mount Everest</SimpleData>
<SimpleData name="TrailLength">347.45</SimpleData>
<SimpleData name="ElevationGain">10000</SimpleData>
</SchemaData>
</ExtendedData>
</Placemark>
</kml>"""
k = kml.KML()
k.from_string(doc)
extended_data = list(k.features())[0].extended_data
self.assertEqual(extended_data.elements[0].name, 'holeNumber')
self.assertEqual(extended_data.elements[0].value, '1')
self.assertTrue(
'<b>This is hole </b>' in extended_data.elements[0].display_name)
self.assertEqual(extended_data.elements[1].name, 'holePar')
self.assertEqual(extended_data.elements[1].value, '4')
self.assertTrue(
'<i>The par for this hole is </i>' in
extended_data.elements[1].display_name)
sd = extended_data.elements[2]
self.assertEqual(sd.data[0]['name'], 'TrailHeadName')
self.assertEqual(sd.data[1]['value'], '347.45')
def test_polygon(self):
doc = """
<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark>
<name>South Africa</name>
<Polygon>
<outerBoundaryIs>
<LinearRing>
<coordinates>
31.521,-29.257,0
31.326,-29.402,0
30.902,-29.91,0
30.623,-30.424,0
30.056,-31.14,0
28.926,-32.172,0
28.22,-32.772,0
27.465,-33.227,0
26.419,-33.615,0
25.91,-33.667,0
25.781,-33.945,0
25.173,-33.797,0
24.678,-33.987,0
23.594,-33.794,0
22.988,-33.916,0
22.574,-33.864,0
21.543,-34.259,0
20.689,-34.417,0
20.071,-34.795,0
19.616,-34.819,0
19.193,-34.463,0
18.855,-34.444,0
18.425,-33.998,0
18.377,-34.137,0
18.244,-33.868,0
18.25,-33.281,0
17.925,-32.611,0
18.248,-32.429,0
18.222,-31.662,0
17.567,-30.726,0
17.064,-29.879,0
17.063,-29.876,0
16.345,-28.577,0
16.824,-28.082,0
17.219,-28.356,0
17.387,-28.784,0
17.836,-28.856,0
18.465,-29.045,0
19.002,-28.972,0
19.895,-28.461,0
19.896,-24.768,0
20.166,-24.918,0
20.759,-25.868,0
20.666,-26.477,0
20.89,-26.829,0
21.606,-26.727,0
22.106,-26.28,0
22.58,-25.979,0
22.824,-25.5,0
23.312,-25.269,0
23.734,-25.39,0
24.211,-25.67,0
25.025,-25.72,0
25.665,-25.487,0
25.766,-25.175,0
25.942,-24.696,0
26.486,-24.616,0
26.786,-24.241,0
27.119,-23.574,0
28.017,-22.828,0
29.432,-22.091,0
29.839,-22.102,0
30.323,-22.272,0
30.66,-22.152,0
31.191,-22.252,0
31.67,-23.659,0
31.931,-24.369,0
31.752,-25.484,0
31.838,-25.843,0
31.333,-25.66,0
31.044,-25.731,0
30.95,-26.023,0
30.677,-26.398,0
30.686,-26.744,0
31.283,-27.286,0
31.868,-27.178,0
32.072,-26.734,0
32.83,-26.742,0
32.58,-27.47,0
32.462,-28.301,0
32.203,-28.752,0
31.521,-29.257,0
</coordinates>
</LinearRing>
</outerBoundaryIs>
<innerBoundaryIs>
<LinearRing>
<coordinates>
28.978,-28.956,0
28.542,-28.648,0
28.074,-28.851,0
27.533,-29.243,0
26.999,-29.876,0
27.749,-30.645,0
28.107,-30.546,0
28.291,-30.226,0
28.848,-30.07,0
29.018,-29.744,0
29.325,-29.257,0
28.978,-28.956,0
</coordinates>
</LinearRing>
</innerBoundaryIs>
</Polygon>
</Placemark>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(isinstance(list(k.features())[0].geometry, Polygon))
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_multipoints(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark id="feat_2">
<name>MultiPoint</name>
<styleUrl>#stylesel_9</styleUrl>
<MultiGeometry id="geom_0">
<Point id="geom_5">
<coordinates>16,-35,0.0</coordinates>
</Point>
<Point id="geom_6">
<coordinates>16,-33,0.0</coordinates>
</Point>
<Point id="geom_7">
<coordinates>16,-31,0.0</coordinates>
</Point>
<Point id="geom_8">
<coordinates>16,-29,0.0</coordinates>
</Point>
<Point id="geom_9">
<coordinates>16,-27,0.0</coordinates>
</Point>
<Point id="geom_10">
<coordinates>16,-25,0.0</coordinates>
</Point>
<Point id="geom_11">
<coordinates>16,-23,0.0</coordinates>
</Point>
<Point id="geom_12">
<coordinates>16,-21,0.0</coordinates>
</Point>
<Point id="geom_15">
<coordinates>18,-35,0.0</coordinates>
</Point>
<Point id="geom_16">
<coordinates>18,-33,0.0</coordinates>
</Point>
<Point id="geom_17">
<coordinates>18,-31,0.0</coordinates>
</Point>
<Point id="geom_18">
<coordinates>18,-29,0.0</coordinates>
</Point>
</MultiGeometry>
</Placemark></kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(isinstance(list(k.features())[0].geometry, MultiPoint))
self.assertEqual(len(list(k.features())[0].geometry.geoms), 12)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_multilinestrings(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark>
<name>Dnipro (Dnieper)</name>
<MultiGeometry>
<LineString><coordinates>33.54,46.831,0 33.606,46.869,0 33.662,46.957,0 33.739,47.05,0 33.859,47.149,0 33.976,47.307,0 33.998,47.411,0 34.155,47.49,0 34.448,47.542,0 34.712,47.553,0 34.946,47.521,0 35.088,47.528,0 35.138,47.573,0 35.149,47.657,0 35.106,47.842,0 </coordinates></LineString>
<LineString><coordinates>33.194,49.094,0 32.884,49.225,0 32.603,49.302,0 31.886,49.555,0 </coordinates></LineString>
<LineString><coordinates>31.44,50,0 31.48,49.933,0 31.486,49.871,0 31.467,49.754,0 </coordinates></LineString>
<LineString><coordinates>30.508,51.217,0 30.478,50.904,0 30.479,50.749,0 30.515,50.597,0 </coordinates></LineString>
</MultiGeometry>
</Placemark> </kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(k.features())[0].geometry, MultiLineString))
self.assertEqual(len(list(k.features())[0].geometry.geoms), 4)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_multipolygon(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark>
<name>Italy</name>
<MultiGeometry><Polygon><outerBoundaryIs><LinearRing><coordinates>12.621,35.492,0 12.611,35.489,0 12.603,35.491,0 12.598,35.494,0 12.594,35.494,0 12.556,35.508,0 12.536,35.513,0 12.526,35.517,0 12.534,35.522,0 12.556,35.521,0 12.567,35.519,0 12.613,35.515,0 12.621,35.513,0 12.624,35.512,0 12.622,35.51,0 12.621,35.508,0 12.624,35.502,0 12.621,35.492,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>12.873,35.852,0 12.857,35.852,0 12.851,35.856,0 12.846,35.863,0 12.847,35.868,0 12.854,35.871,0 12.86,35.872,0 12.867,35.872,0 12.874,35.866,0 12.877,35.856,0 12.873,35.852,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>11.981,36.827,0 11.988,36.824,0 11.994,36.825,0 12,36.836,0 12.038,36.806,0 12.052,36.79,0 12.054,36.767,0 12.031,36.741,0 11.997,36.745,0 11.962,36.765,0 11.938,36.789,0 11.934,36.795,0 11.926,36.812,0 11.923,36.828,0 11.935,36.836,0 11.939,36.837,0 11.947,36.841,0 11.952,36.843,0 11.958,36.84,0 11.968,36.831,0 11.972,36.829,0 11.981,36.827,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>12.322,37.94,0 12.337,37.933,0 12.355,37.927,0 12.369,37.925,0 12.358,37.914,0 12.343,37.913,0 12.327,37.918,0 12.315,37.925,0 12.3,37.919,0 12.288,37.921,0 12.279,37.929,0 12.274,37.939,0 12.288,37.938,0 12.298,37.941,0 12.306,37.945,0 12.315,37.946,0 12.322,37.94,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>12.078,37.96,0 12.079,37.95,0 12.065,37.951,0 12.048,37.961,0 12.037,37.974,0 12.03,37.984,0 12.036,37.991,0 12.054,37.992,0 12.065,37.986,0 12.072,37.968,0 12.078,37.96,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>15.643,38.262,0 15.635,38.261,0 15.625,38.261,0 15.584,38.24,0 15.57,38.227,0 15.564,38.214,0 15.56,38.2,0 15.576,38.2,0 15.527,38.137,0 15.501,38.085,0 15.393,37.976,0 15.303,37.864,0 15.284,37.833,0 15.267,37.812,0 15.242,37.795,0 15.214,37.761,0 15.207,37.747,0 15.209,37.737,0 15.219,37.718,0 15.221,37.706,0 15.217,37.696,0 15.203,37.685,0 15.2,37.675,0 15.197,37.655,0 15.185,37.626,0 15.179,37.604,0 15.164,37.567,0 15.117,37.522,0 15.097,37.494,0 15.092,37.477,0 15.09,37.459,0 15.093,37.36,0 15.097,37.343,0 15.104,37.33,0 15.111,37.322,0 15.181,37.291,0 15.218,37.285,0 15.237,37.275,0 15.253,37.257,0 15.262,37.234,0 15.245,37.246,0 15.236,37.242,0 15.229,37.23,0 15.221,37.22,0 15.222,37.237,0 15.216,37.244,0 15.206,37.244,0 15.193,37.24,0 15.2,37.227,0 15.184,37.207,0 15.195,37.176,0 15.217,37.155,0 15.234,37.165,0 15.248,37.158,0 15.248,37.152,0 15.23,37.149,0 15.232,37.135,0 15.247,37.118,0 15.265,37.11,0 15.289,37.108,0 15.304,37.101,0 15.309,37.086,0 15.303,37.062,0 15.289,37.069,0 15.283,37.061,0 15.284,37.048,0 15.292,37.042,0 15.313,37.044,0 15.322,37.04,0 15.33,37.027,0 15.333,37.011,0 15.325,37.008,0 15.315,37.012,0 15.309,37.018,0 15.304,37.016,0 15.269,37,0 15.275,36.993,0 15.267,36.989,0 15.264,36.987,0 15.269,36.98,0 15.269,36.973,0 15.245,36.972,0 15.227,36.965,0 15.212,36.956,0 15.197,36.952,0 15.175,36.944,0 15.159,36.924,0 15.108,36.82,0 15.107,36.808,0 15.095,36.799,0 15.099,36.779,0 15.118,36.747,0 15.135,36.687,0 15.135,36.675,0 15.115,36.66,0 15.094,36.655,0 15.074,36.659,0 15.056,36.671,0 15.041,36.687,0 15.034,36.694,0 15.021,36.699,0 15.008,36.703,0 14.998,36.702,0 14.994,36.696,0 14.983,36.689,0 14.958,36.698,0 14.919,36.72,0 14.883,36.73,0 14.847,36.726,0 14.781,36.699,0 14.777,36.707,0 14.774,36.71,0 14.761,36.706,0 14.745,36.719,0 14.685,36.726,0 14.672,36.744,0 14.659,36.754,0 14.601,36.772,0 14.583,36.781,0 14.566,36.778,0 14.488,36.793,0 14.476,36.805,0 14.395,36.945,0 14.37,36.973,0 14.279,37.044,0 14.209,37.081,0 14.127,37.112,0 14.089,37.117,0 13.977,37.11,0 13.968,37.108,0 13.949,37.099,0 13.939,37.096,0 13.895,37.101,0 13.833,37.139,0 13.795,37.152,0 13.752,37.159,0 13.716,37.171,0 13.684,37.189,0 13.599,37.256,0 13.57,37.273,0 13.535,37.282,0 13.489,37.288,0 13.453,37.299,0 13.422,37.314,0 13.373,37.346,0 13.33,37.366,0 13.312,37.381,0 13.303,37.386,0 13.29,37.389,0 13.279,37.393,0 13.254,37.432,0 13.248,37.436,0 13.226,37.446,0 13.215,37.458,0 13.207,37.464,0 13.195,37.466,0 13.19,37.469,0 13.18,37.484,0 13.175,37.487,0 13.052,37.5,0 13.037,37.495,0 13.027,37.493,0 13.017,37.497,0 13.011,37.507,0 13.005,37.527,0 13.001,37.535,0 12.975,37.557,0 12.943,37.568,0 12.863,37.576,0 12.781,37.574,0 12.698,37.563,0 12.66,37.565,0 12.637,37.582,0 12.595,37.638,0 12.578,37.652,0 12.564,37.658,0 12.524,37.658,0 12.507,37.665,0 12.49,37.682,0 12.475,37.703,0 12.466,37.72,0 12.461,37.734,0 12.46,37.748,0 12.457,37.76,0 12.449,37.771,0 12.437,37.783,0 12.428,37.797,0 12.428,37.809,0 12.445,37.816,0 12.447,37.812,0 12.461,37.819,0 12.466,37.823,0 12.464,37.825,0 12.471,37.853,0 12.473,37.854,0 12.478,37.872,0 12.479,37.881,0 12.477,37.886,0 12.468,37.897,0 12.466,37.906,0 12.465,37.913,0 12.465,37.914,0 12.468,37.916,0 12.491,37.954,0 12.497,37.98,0 12.503,37.997,0 12.505,38.011,0 12.493,38.021,0 12.524,38.031,0 12.55,38.055,0 12.577,38.072,0 12.609,38.062,0 12.639,38.079,0 12.652,38.091,0 12.657,38.107,0 12.663,38.116,0 12.677,38.116,0 12.692,38.112,0 12.705,38.111,0 12.726,38.126,0 12.725,38.15,0 12.72,38.175,0 12.732,38.193,0 12.738,38.181,0 12.75,38.182,0 12.761,38.181,0 12.767,38.162,0 12.791,38.117,0 12.819,38.078,0 12.829,38.07,0 12.858,38.058,0 12.869,38.051,0 12.87,38.042,0 12.902,38.028,0 12.945,38.033,0 13.028,38.062,0 13.062,38.083,0 13.07,38.091,0 13.072,38.095,0 13.07,38.101,0 13.069,38.114,0 13.067,38.123,0 13.057,38.133,0 13.055,38.142,0 13.09,38.166,0 13.084,38.174,0 13.09,38.183,0 13.102,38.19,0 13.113,38.193,0 13.123,38.191,0 13.158,38.179,0 13.18,38.176,0 13.208,38.176,0 13.231,38.184,0 13.239,38.207,0 13.255,38.202,0 13.267,38.205,0 13.278,38.21,0 13.297,38.214,0 13.311,38.219,0 13.319,38.22,0 13.324,38.218,0 13.326,38.211,0 13.327,38.205,0 13.329,38.2,0 13.367,38.179,0 13.372,38.173,0 13.374,38.14,0 13.377,38.131,0 13.392,38.103,0 13.514,38.11,0 13.542,38.094,0 13.54,38.077,0 13.542,38.067,0 13.548,38.056,0 13.558,38.049,0 13.588,38.039,0 13.623,38.015,0 13.652,38.001,0 13.698,37.993,0 13.712,37.988,0 13.708,37.985,0 13.708,37.984,0 13.706,37.98,0 13.727,37.981,0 13.791,37.973,0 13.813,37.978,0 13.858,37.996,0 13.899,38.004,0 13.913,38.012,0 13.925,38.022,0 13.939,38.029,0 14.008,38.038,0 14.021,38.049,0 14.063,38.03,0 14.084,38.024,0 14.107,38.021,0 14.122,38.022,0 14.152,38.029,0 14.274,38.015,0 14.332,38.018,0 14.385,38.029,0 14.433,38.049,0 14.465,38.037,0 14.512,38.044,0 14.635,38.081,0 14.668,38.099,0 14.696,38.121,0 14.734,38.157,0 14.745,38.161,0 14.778,38.159,0 14.799,38.16,0 14.875,38.175,0 14.889,38.182,0 14.898,38.186,0 14.908,38.187,0 14.936,38.186,0 14.945,38.182,0 14.963,38.163,0 14.97,38.159,0 14.982,38.158,0 15.008,38.152,0 15.04,38.153,0 15.049,38.152,0 15.054,38.148,0 15.064,38.135,0 15.069,38.131,0 15.088,38.128,0 15.106,38.133,0 15.123,38.141,0 15.178,38.156,0 15.204,38.183,0 15.241,38.241,0 15.238,38.249,0 15.237,38.251,0 15.237,38.253,0 15.241,38.261,0 15.238,38.265,0 15.244,38.265,0 15.247,38.254,0 15.241,38.23,0 15.246,38.217,0 15.258,38.21,0 15.275,38.207,0 15.292,38.207,0 15.322,38.211,0 15.4,38.232,0 15.423,38.244,0 15.434,38.253,0 15.473,38.268,0 15.513,38.297,0 15.529,38.302,0 15.56,38.3,0 15.616,38.28,0 15.652,38.275,0 15.649,38.266,0 15.643,38.262,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>14.999,38.371,0 14.987,38.364,0 14.964,38.381,0 14.949,38.396,0 14.946,38.412,0 14.96,38.433,0 14.967,38.433,0 14.967,38.418,0 14.983,38.412,0 14.994,38.403,0 15.002,38.391,0 15.008,38.378,0 14.999,38.371,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>14.967,38.453,0 14.949,38.451,0 14.935,38.458,0 14.922,38.469,0 14.908,38.474,0 14.9,38.481,0 14.901,38.498,0 14.91,38.515,0 14.925,38.522,0 14.958,38.522,0 14.967,38.516,0 14.96,38.502,0 14.966,38.497,0 14.975,38.49,0 14.98,38.487,0 14.98,38.481,0 14.953,38.481,0 14.958,38.469,0 14.962,38.465,0 14.967,38.461,0 14.967,38.453,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>14.361,38.539,0 14.346,38.535,0 14.343,38.547,0 14.357,38.551,0 14.361,38.539,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>14.864,38.549,0 14.862,38.539,0 14.824,38.552,0 14.794,38.571,0 14.815,38.584,0 14.852,38.585,0 14.867,38.581,0 14.877,38.569,0 14.873,38.565,0 14.869,38.56,0 14.864,38.549,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>14.585,38.557,0 14.574,38.557,0 14.552,38.562,0 14.544,38.575,0 14.543,38.587,0 14.546,38.588,0 14.564,38.585,0 14.576,38.577,0 14.58,38.566,0 14.585,38.561,0 14.585,38.557,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>13.177,38.693,0 13.165,38.691,0 13.153,38.695,0 13.153,38.702,0 13.158,38.71,0 13.169,38.717,0 13.186,38.718,0 13.196,38.711,0 13.197,38.708,0 13.177,38.693,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>15.225,38.777,0 15.217,38.773,0 15.206,38.775,0 15.187,38.789,0 15.187,38.793,0 15.194,38.798,0 15.204,38.802,0 15.209,38.806,0 15.212,38.81,0 15.219,38.812,0 15.228,38.81,0 15.235,38.808,0 15.239,38.804,0 15.237,38.796,0 15.232,38.789,0 15.23,38.783,0 15.225,38.777,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>8.361,39.118,0 8.386,39.105,0 8.418,39.106,0 8.445,39.102,0 8.457,39.073,0 8.459,39.068,0 8.464,39.065,0 8.47,39.065,0 8.477,39.07,0 8.478,39.07,0 8.48,39.072,0 8.484,39.07,0 8.465,39.056,0 8.46,39.05,0 8.464,39.042,0 8.455,39.028,0 8.447,38.994,0 8.438,38.967,0 8.433,38.963,0 8.422,38.96,0 8.41,38.962,0 8.407,38.967,0 8.406,38.974,0 8.402,38.981,0 8.365,39.029,0 8.35,39.062,0 8.354,39.083,0 8.354,39.091,0 8.347,39.091,0 8.347,39.097,0 8.361,39.118,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>8.306,39.104,0 8.291,39.099,0 8.27,39.1,0 8.255,39.107,0 8.258,39.118,0 8.258,39.124,0 8.233,39.144,0 8.225,39.157,0 8.231,39.173,0 8.246,39.181,0 8.291,39.188,0 8.306,39.193,0 8.307,39.161,0 8.313,39.12,0 8.306,39.104,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>13.959,40.712,0 13.945,40.701,0 13.935,40.705,0 13.92,40.704,0 13.904,40.7,0 13.891,40.694,0 13.882,40.699,0 13.86,40.707,0 13.85,40.715,0 13.857,40.735,0 13.862,40.744,0 13.871,40.749,0 13.868,40.752,0 13.863,40.762,0 13.884,40.762,0 13.947,40.745,0 13.966,40.735,0 13.963,40.729,0 13.963,40.723,0 13.966,40.715,0 13.959,40.712,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>13.427,40.791,0 13.415,40.786,0 13.419,40.796,0 13.424,40.8,0 13.432,40.801,0 13.427,40.791,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>8.333,41.105,0 8.343,41.098,0 8.345,41.086,0 8.342,41.074,0 8.333,41.064,0 8.275,41.057,0 8.252,41.043,0 8.252,41.016,0 8.247,40.993,0 8.21,40.996,0 8.218,41.005,0 8.222,41.014,0 8.224,41.024,0 8.224,41.033,0 8.229,41.042,0 8.242,41.052,0 8.261,41.064,0 8.276,41.07,0 8.278,41.081,0 8.276,41.095,0 8.278,41.105,0 8.285,41.107,0 8.303,41.105,0 8.306,41.109,0 8.309,41.114,0 8.314,41.118,0 8.327,41.126,0 8.326,41.118,0 8.328,41.112,0 8.333,41.105,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>9.471,41.19,0 9.474,41.184,0 9.475,41.179,0 9.47,41.172,0 9.464,41.173,0 9.456,41.181,0 9.449,41.186,0 9.442,41.183,0 9.437,41.186,0 9.448,41.205,0 9.443,41.211,0 9.446,41.22,0 9.454,41.234,0 9.46,41.242,0 9.468,41.241,0 9.475,41.236,0 9.478,41.228,0 9.48,41.224,0 9.479,41.217,0 9.471,41.19,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>9.239,41.249,0 9.247,41.248,0 9.258,41.249,0 9.269,41.236,0 9.268,41.202,0 9.279,41.195,0 9.275,41.199,0 9.274,41.205,0 9.275,41.212,0 9.279,41.221,0 9.286,41.221,0 9.29,41.209,0 9.289,41.205,0 9.286,41.201,0 9.286,41.195,0 9.3,41.196,0 9.306,41.198,0 9.313,41.201,0 9.317,41.196,0 9.334,41.187,0 9.336,41.211,0 9.353,41.207,0 9.389,41.181,0 9.389,41.187,0 9.397,41.184,0 9.405,41.181,0 9.413,41.181,0 9.423,41.181,0 9.423,41.174,0 9.417,41.171,0 9.415,41.168,0 9.413,41.164,0 9.409,41.16,0 9.421,41.156,0 9.427,41.149,0 9.433,41.14,0 9.443,41.133,0 9.438,41.125,0 9.437,41.115,0 9.443,41.092,0 9.455,41.112,0 9.461,41.12,0 9.471,41.126,0 9.467,41.13,0 9.466,41.134,0 9.463,41.137,0 9.457,41.14,0 9.47,41.146,0 9.482,41.145,0 9.495,41.142,0 9.509,41.14,0 9.514,41.143,0 9.519,41.148,0 9.524,41.15,0 9.533,41.14,0 9.525,41.133,0 9.535,41.128,0 9.541,41.123,0 9.547,41.121,0 9.553,41.126,0 9.56,41.126,0 9.562,41.122,0 9.562,41.121,0 9.564,41.121,0 9.567,41.119,0 9.566,41.107,0 9.563,41.097,0 9.557,41.088,0 9.546,41.077,0 9.544,41.082,0 9.541,41.087,0 9.54,41.092,0 9.522,41.031,0 9.512,41.016,0 9.533,41.016,0 9.525,41.03,0 9.544,41.037,0 9.555,41.034,0 9.558,41.025,0 9.553,41.009,0 9.558,41.009,0 9.559,41.011,0 9.559,41.013,0 9.56,41.016,0 9.566,41.011,0 9.569,41.009,0 9.574,41.009,0 9.589,41.02,0 9.616,41.019,0 9.645,41.011,0 9.663,41.002,0 9.652,40.991,0 9.637,40.992,0 9.62,40.999,0 9.605,41.002,0 9.588,40.996,0 9.583,40.98,0 9.579,40.962,0 9.567,40.948,0 9.572,40.935,0 9.558,40.931,0 9.512,40.934,0 9.512,40.929,0 9.513,40.928,0 9.505,40.927,0 9.512,40.915,0 9.521,40.915,0 9.53,40.919,0 9.54,40.92,0 9.55,40.917,0 9.568,40.908,0 9.574,40.906,0 9.593,40.91,0 9.608,40.918,0 9.623,40.924,0 9.643,40.92,0 9.638,40.911,0 9.632,40.905,0 9.624,40.9,0 9.615,40.899,0 9.615,40.893,0 9.651,40.879,0 9.656,40.876,0 9.658,40.864,0 9.664,40.858,0 9.672,40.859,0 9.684,40.865,0 9.69,40.856,0 9.7,40.85,0 9.712,40.847,0 9.725,40.845,0 9.691,40.836,0 9.682,40.829,0 9.69,40.817,0 9.69,40.811,0 9.675,40.814,0 9.662,40.809,0 9.658,40.8,0 9.669,40.79,0 9.67,40.801,0 9.676,40.788,0 9.705,40.759,0 9.711,40.745,0 9.715,40.727,0 9.745,40.68,0 9.749,40.667,0 9.754,40.605,0 9.757,40.595,0 9.762,40.587,0 9.769,40.584,0 9.782,40.582,0 9.786,40.576,0 9.787,40.567,0 9.793,40.557,0 9.821,40.536,0 9.827,40.529,0 9.827,40.519,0 9.816,40.502,0 9.813,40.492,0 9.809,40.471,0 9.801,40.455,0 9.779,40.427,0 9.762,40.39,0 9.75,40.377,0 9.728,40.372,0 9.713,40.366,0 9.701,40.353,0 9.684,40.324,0 9.671,40.312,0 9.646,40.296,0 9.635,40.282,0 9.627,40.263,0 9.625,40.248,0 9.629,40.205,0 9.632,40.196,0 9.655,40.144,0 9.666,40.131,0 9.68,40.126,0 9.688,40.12,0 9.711,40.096,0 9.733,40.084,0 9.731,40.068,0 9.694,39.993,0 9.688,39.961,0 9.697,39.934,0 9.703,39.937,0 9.71,39.94,0 9.716,39.94,0 9.718,39.934,0 9.715,39.924,0 9.709,39.922,0 9.702,39.922,0 9.697,39.919,0 9.69,39.906,0 9.685,39.894,0 9.684,39.882,0 9.69,39.871,0 9.684,39.871,0 9.684,39.865,0 9.688,39.863,0 9.693,39.86,0 9.697,39.858,0 9.697,39.852,0 9.685,39.84,0 9.676,39.819,0 9.671,39.793,0 9.669,39.769,0 9.67,39.756,0 9.676,39.732,0 9.677,39.718,0 9.675,39.708,0 9.665,39.691,0 9.663,39.677,0 9.661,39.67,0 9.656,39.663,0 9.652,39.652,0 9.65,39.639,0 9.656,39.594,0 9.654,39.567,0 9.629,39.502,0 9.645,39.484,0 9.64,39.452,0 9.615,39.399,0 9.603,39.355,0 9.601,39.341,0 9.604,39.326,0 9.612,39.316,0 9.635,39.303,0 9.635,39.297,0 9.608,39.289,0 9.582,39.266,0 9.568,39.238,0 9.574,39.214,0 9.566,39.205,0 9.569,39.199,0 9.577,39.194,0 9.581,39.187,0 9.578,39.179,0 9.569,39.159,0 9.567,39.149,0 9.558,39.139,0 9.54,39.134,0 9.523,39.125,0 9.519,39.104,0 9.511,39.108,0 9.508,39.111,0 9.508,39.116,0 9.512,39.124,0 9.497,39.133,0 9.481,39.135,0 9.466,39.132,0 9.451,39.124,0 9.443,39.124,0 9.439,39.133,0 9.429,39.138,0 9.409,39.146,0 9.384,39.169,0 9.378,39.173,0 9.368,39.177,0 9.346,39.196,0 9.337,39.201,0 9.327,39.203,0 9.313,39.208,0 9.3,39.214,0 9.293,39.221,0 9.286,39.214,0 9.272,39.22,0 9.253,39.225,0 9.217,39.228,0 9.198,39.221,0 9.182,39.207,0 9.17,39.193,0 9.167,39.187,0 9.137,39.194,0 9.114,39.211,0 9.073,39.248,0 9.064,39.243,0 9.056,39.247,0 9.048,39.256,0 9.039,39.262,0 9.025,39.265,0 9.015,39.264,0 9.013,39.26,0 9.026,39.256,0 9.026,39.248,0 9.022,39.24,0 9.027,39.236,0 9.036,39.232,0 9.038,39.227,0 9.039,39.228,0 9.051,39.225,0 9.075,39.23,0 9.08,39.224,0 9.08,39.216,0 9.08,39.212,0 9.039,39.179,0 9.027,39.165,0 9.019,39.146,0 9.017,39.124,0 9.019,39.104,0 9.025,39.086,0 9.033,39.07,0 9.038,39.063,0 9.044,39.058,0 9.046,39.051,0 9.03,39.03,0 9.019,38.995,0 9.026,38.995,0 9.016,38.989,0 9.013,38.99,0 9.005,38.995,0 8.997,38.983,0 8.895,38.902,0 8.889,38.9,0 8.878,38.899,0 8.873,38.896,0 8.862,38.882,0 8.854,38.878,0 8.842,38.88,0 8.828,38.889,0 8.806,38.906,0 8.806,38.885,0 8.791,38.904,0 8.767,38.92,0 8.74,38.93,0 8.717,38.932,0 8.695,38.925,0 8.669,38.91,0 8.652,38.891,0 8.656,38.871,0 8.641,38.864,0 8.635,38.871,0 8.643,38.89,0 8.634,38.895,0 8.616,38.896,0 8.6,38.899,0 8.6,38.906,0 8.616,38.923,0 8.616,38.947,0 8.604,38.965,0 8.581,38.96,0 8.573,39.013,0 8.56,39.057,0 8.553,39.057,0 8.545,39.051,0 8.521,39.061,0 8.505,39.063,0 8.51,39.068,0 8.519,39.083,0 8.505,39.091,0 8.483,39.08,0 8.483,39.084,0 8.478,39.09,0 8.474,39.107,0 8.466,39.119,0 8.455,39.125,0 8.443,39.118,0 8.439,39.128,0 8.439,39.153,0 8.436,39.166,0 8.429,39.173,0 8.419,39.177,0 8.413,39.175,0 8.416,39.166,0 8.41,39.169,0 8.406,39.174,0 8.403,39.181,0 8.402,39.19,0 8.399,39.201,0 8.393,39.204,0 8.386,39.204,0 8.381,39.207,0 8.373,39.222,0 8.372,39.23,0 8.377,39.238,0 8.427,39.283,0 8.433,39.302,0 8.416,39.323,0 8.418,39.339,0 8.383,39.359,0 8.375,39.379,0 8.379,39.388,0 8.396,39.404,0 8.402,39.412,0 8.406,39.427,0 8.404,39.436,0 8.39,39.462,0 8.387,39.465,0 8.387,39.47,0 8.395,39.481,0 8.422,39.508,0 8.436,39.525,0 8.452,39.558,0 8.464,39.577,0 8.457,39.584,0 8.465,39.598,0 8.463,39.617,0 8.45,39.659,0 8.447,39.704,0 8.443,39.714,0 8.443,39.721,0 8.447,39.731,0 8.445,39.757,0 8.447,39.762,0 8.46,39.76,0 8.469,39.755,0 8.5,39.716,0 8.518,39.702,0 8.539,39.696,0 8.566,39.701,0 8.515,39.713,0 8.505,39.721,0 8.507,39.738,0 8.521,39.755,0 8.536,39.771,0 8.546,39.783,0 8.539,39.783,0 8.536,39.776,0 8.531,39.77,0 8.525,39.766,0 8.519,39.762,0 8.53,39.772,0 8.541,39.789,0 8.549,39.807,0 8.553,39.821,0 8.556,39.852,0 8.554,39.864,0 8.546,39.878,0 8.524,39.899,0 8.495,39.912,0 8.464,39.914,0 8.436,39.899,0 8.443,39.893,0 8.446,39.898,0 8.45,39.899,0 8.456,39.898,0 8.464,39.899,0 8.452,39.893,0 8.445,39.883,0 8.436,39.858,0 8.429,39.865,0 8.438,39.877,0 8.432,39.885,0 8.419,39.892,0 8.404,39.903,0 8.401,39.903,0 8.399,39.905,0 8.395,39.912,0 8.394,39.92,0 8.397,39.927,0 8.4,39.933,0 8.402,39.94,0 8.394,39.977,0 8.395,39.988,0 8.407,40.01,0 8.408,40.022,0 8.395,40.036,0 8.381,40.03,0 8.378,40.033,0 8.385,40.042,0 8.402,40.05,0 8.405,40.049,0 8.435,40.051,0 8.453,40.056,0 8.46,40.057,0 8.469,40.062,0 8.48,40.074,0 8.488,40.089,0 8.491,40.104,0 8.486,40.118,0 8.468,40.144,0 8.464,40.163,0 8.46,40.216,0 8.477,40.262,0 8.477,40.292,0 8.463,40.314,0 8.442,40.331,0 8.416,40.345,0 8.409,40.338,0 8.387,40.352,0 8.384,40.372,0 8.395,40.424,0 8.391,40.442,0 8.38,40.468,0 8.366,40.492,0 8.35,40.502,0 8.332,40.51,0 8.324,40.531,0 8.32,40.555,0 8.313,40.578,0 8.292,40.595,0 8.268,40.594,0 8.217,40.57,0 8.196,40.578,0 8.206,40.598,0 8.217,40.612,0 8.194,40.617,0 8.177,40.606,0 8.167,40.586,0 8.162,40.564,0 8.154,40.578,0 8.148,40.593,0 8.141,40.619,0 8.141,40.625,0 8.158,40.632,0 8.174,40.641,0 8.186,40.656,0 8.189,40.68,0 8.192,40.68,0 8.196,40.685,0 8.198,40.691,0 8.193,40.694,0 8.18,40.695,0 8.174,40.697,0 8.168,40.701,0 8.154,40.719,0 8.146,40.726,0 8.134,40.729,0 8.21,40.865,0 8.216,40.881,0 8.217,40.899,0 8.21,40.914,0 8.193,40.92,0 8.179,40.928,0 8.183,40.945,0 8.194,40.963,0 8.203,40.975,0 8.21,40.975,0 8.213,40.963,0 8.221,40.962,0 8.229,40.962,0 8.237,40.955,0 8.236,40.946,0 8.232,40.934,0 8.23,40.921,0 8.234,40.91,0 8.278,40.865,0 8.311,40.85,0 8.422,40.839,0 8.478,40.826,0 8.501,40.824,0 8.521,40.827,0 8.599,40.853,0 8.619,40.866,0 8.635,40.881,0 8.641,40.896,0 8.71,40.92,0 8.734,40.921,0 8.752,40.919,0 8.765,40.914,0 8.823,40.947,0 8.84,40.961,0 8.876,41.008,0 8.889,41.016,0 8.887,41.02,0 8.887,41.021,0 8.886,41.022,0 8.882,41.023,0 8.914,41.032,0 8.923,41.037,0 8.93,41.043,0 8.941,41.061,0 8.947,41.064,0 8.959,41.07,0 8.976,41.082,0 8.991,41.097,0 9.006,41.122,0 9.025,41.129,0 9.094,41.135,0 9.108,41.139,0 9.136,41.16,0 9.142,41.153,0 9.158,41.169,0 9.164,41.184,0 9.163,41.225,0 9.172,41.243,0 9.191,41.251,0 9.213,41.256,0 9.231,41.262,0 9.233,41.253,0 9.239,41.249,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>9.435,41.217,0 9.395,41.211,0 9.377,41.213,0 9.373,41.222,0 9.373,41.23,0 9.378,41.234,0 9.385,41.237,0 9.392,41.241,0 9.396,41.248,0 9.398,41.256,0 9.402,41.258,0 9.408,41.258,0 9.414,41.262,0 9.422,41.261,0 9.427,41.254,0 9.431,41.246,0 9.43,41.238,0 9.429,41.229,0 9.431,41.225,0 9.434,41.221,0 9.435,41.217,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>10.316,42.341,0 10.313,42.324,0 10.294,42.328,0 10.297,42.345,0 10.306,42.352,0 10.316,42.341,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>10.922,42.334,0 10.909,42.325,0 10.874,42.36,0 10.862,42.366,0 10.871,42.376,0 10.877,42.387,0 10.884,42.392,0 10.896,42.386,0 10.907,42.378,0 10.919,42.356,0 10.931,42.346,0 10.926,42.339,0 10.922,42.334,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>10.095,42.577,0 10.086,42.572,0 10.072,42.573,0 10.059,42.576,0 10.05,42.582,0 10.053,42.589,0 10.063,42.592,0 10.073,42.6,0 10.08,42.614,0 10.084,42.615,0 10.088,42.604,0 10.092,42.596,0 10.096,42.591,0 10.098,42.588,0 10.098,42.584,0 10.095,42.577,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>10.431,42.816,0 10.437,42.804,0 10.431,42.787,0 10.421,42.776,0 10.407,42.769,0 10.389,42.763,0 10.408,42.757,0 10.426,42.741,0 10.431,42.722,0 10.416,42.709,0 10.411,42.718,0 10.404,42.719,0 10.394,42.718,0 10.382,42.722,0 10.378,42.728,0 10.368,42.746,0 10.365,42.75,0 10.352,42.755,0 10.338,42.765,0 10.326,42.765,0 10.314,42.743,0 10.305,42.76,0 10.266,42.744,0 10.246,42.757,0 10.241,42.742,0 10.236,42.736,0 10.23,42.735,0 10.148,42.737,0 10.125,42.743,0 10.107,42.757,0 10.102,42.784,0 10.112,42.801,0 10.134,42.812,0 10.159,42.817,0 10.18,42.819,0 10.19,42.817,0 10.213,42.808,0 10.225,42.804,0 10.243,42.803,0 10.266,42.804,0 10.266,42.809,0 10.265,42.81,0 10.263,42.81,0 10.26,42.812,0 10.273,42.819,0 10.273,42.826,0 10.273,42.827,0 10.29,42.825,0 10.327,42.826,0 10.323,42.811,0 10.333,42.806,0 10.348,42.806,0 10.355,42.808,0 10.359,42.817,0 10.366,42.823,0 10.375,42.827,0 10.382,42.832,0 10.393,42.858,0 10.401,42.869,0 10.413,42.873,0 10.422,42.871,0 10.432,42.864,0 10.439,42.855,0 10.444,42.845,0 10.437,42.838,0 10.432,42.828,0 10.431,42.816,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>9.844,43.06,0 9.848,43.058,0 9.854,43.059,0 9.843,43.035,0 9.828,43.019,0 9.81,43.017,0 9.793,43.037,0 9.812,43.071,0 9.827,43.081,0 9.841,43.065,0 9.842,43.063,0 9.844,43.06,0 </coordinates></LinearRing></outerBoundaryIs></Polygon><Polygon><outerBoundaryIs><LinearRing><coordinates>12.122,46.972,0 12.128,46.949,0 12.135,46.937,0 12.142,46.928,0 12.142,46.919,0 12.127,46.909,0 12.137,46.906,0 12.161,46.903,0 12.172,46.899,0 12.184,46.891,0 12.189,46.885,0 12.195,46.88,0 12.209,46.877,0 12.251,46.876,0 12.267,46.868,0 12.276,46.846,0 12.276,46.834,0 12.273,46.827,0 12.27,46.82,0 12.267,46.808,0 12.267,46.795,0 12.269,46.789,0 12.275,46.785,0 12.284,46.78,0 12.305,46.774,0 12.326,46.772,0 12.343,46.765,0 12.351,46.743,0 12.37,46.711,0 12.405,46.69,0 12.446,46.679,0 12.5,46.672,0 12.531,46.658,0 12.547,46.652,0 12.562,46.651,0 12.62,46.656,0 12.67,46.653,0 12.679,46.65,0 12.697,46.641,0 12.707,46.638,0 12.716,46.638,0 12.732,46.642,0 12.74,46.643,0 12.774,46.635,0 12.83,46.61,0 13.065,46.598,0 13.146,46.585,0 13.21,46.558,0 13.231,46.552,0 13.271,46.551,0 13.373,46.566,0 13.417,46.56,0 13.478,46.564,0 13.485,46.562,0 13.499,46.551,0 13.507,46.547,0 13.549,46.546,0 13.67,46.519,0 13.685,46.518,0 13.701,46.52,0 13.701,46.512,0 13.699,46.505,0 13.695,46.499,0 13.69,46.493,0 13.688,46.468,0 13.677,46.452,0 13.659,46.445,0 13.634,46.446,0 13.6,46.443,0 13.576,46.427,0 13.554,46.406,0 13.53,46.388,0 13.484,46.371,0 13.46,46.359,0 13.447,46.355,0 13.434,46.354,0 13.423,46.345,0 13.41,46.324,0 13.391,46.302,0 13.365,46.29,0 13.373,46.28,0 13.379,46.268,0 13.385,46.243,0 13.385,46.243,0 13.385,46.243,0 13.398,46.231,0 13.402,46.217,0 13.41,46.208,0 13.437,46.211,0 13.423,46.229,0 13.438,46.225,0 13.468,46.223,0 13.482,46.218,0 13.51,46.214,0 13.529,46.205,0 13.559,46.184,0 13.584,46.181,0 13.614,46.184,0 13.637,46.18,0 13.645,46.162,0 13.616,46.125,0 13.505,46.066,0 13.482,46.045,0 13.49,46.039,0 13.493,46.032,0 13.49,46.026,0 13.482,46.018,0 13.477,46.016,0 13.462,46.006,0 13.475,45.996,0 13.479,45.993,0 13.48,45.992,0 13.481,45.991,0 13.482,45.99,0 13.482,45.989,0 13.509,45.967,0 13.539,45.969,0 13.572,45.98,0 13.606,45.985,0 13.623,45.966,0 13.608,45.927,0 13.569,45.865,0 13.566,45.83,0 13.581,45.809,0 13.609,45.799,0 13.644,45.796,0 13.66,45.792,0 13.709,45.765,0 13.779,45.743,0 13.858,45.649,0 13.869,45.641,0 13.884,45.635,0 13.893,45.635,0 13.895,45.632,0 13.887,45.619,0 13.848,45.585,0 13.801,45.581,0 13.761,45.596,0 13.712,45.593,0 13.719,45.6,0 13.731,45.613,0 13.757,45.613,0 13.787,45.611,0 13.809,45.614,0 13.796,45.617,0 13.787,45.624,0 13.778,45.635,0 13.74,45.649,0 13.758,45.655,0 13.754,45.672,0 13.74,45.691,0 13.727,45.703,0 13.648,45.762,0 13.63,45.772,0 13.575,45.789,0 13.552,45.792,0 13.535,45.782,0 13.525,45.76,0 13.529,45.74,0 13.555,45.737,0 13.519,45.725,0 13.514,45.721,0 13.508,45.714,0 13.481,45.71,0 13.47,45.707,0 13.452,45.694,0 13.429,45.681,0 13.402,45.675,0 13.377,45.683,0 13.392,45.686,0 13.41,45.691,0 13.425,45.698,0 13.432,45.707,0 13.423,45.724,0 13.382,45.73,0 13.37,45.744,0 13.352,45.74,0 13.255,45.756,0 13.246,45.759,0 13.222,45.776,0 13.216,45.779,0 13.206,45.778,0 13.17,45.768,0 13.158,45.754,0 13.15,45.751,0 13.14,45.755,0 13.132,45.769,0 13.12,45.772,0 13.111,45.767,0 13.109,45.758,0 13.112,45.749,0 13.124,45.744,0 13.124,45.737,0 13.101,45.736,0 13.081,45.727,0 13.07,45.713,0 13.076,45.697,0 13.092,45.689,0 13.112,45.691,0 13.15,45.703,0 13.139,45.689,0 13.104,45.669,0 13.096,45.652,0 13.086,45.642,0 13.061,45.636,0 12.982,45.635,0 12.944,45.628,0 12.781,45.553,0 12.612,45.496,0 12.513,45.47,0 12.497,45.46,0 12.488,45.456,0 12.452,45.45,0 12.424,45.438,0 12.411,45.436,0 12.419,45.451,0 12.43,45.464,0 12.436,45.475,0 12.431,45.484,0 12.441,45.483,0 12.448,45.484,0 12.452,45.489,0 12.452,45.498,0 12.459,45.498,0 12.463,45.489,0 12.468,45.485,0 12.472,45.486,0 12.479,45.491,0 12.466,45.504,0 12.477,45.503,0 12.488,45.504,0 12.498,45.506,0 12.5,45.504,0 12.501,45.506,0 12.504,45.503,0 12.507,45.499,0 12.507,45.498,0 12.504,45.498,0 12.493,45.498,0 12.493,45.491,0 12.516,45.492,0 12.521,45.505,0 12.522,45.519,0 12.531,45.525,0 12.549,45.527,0 12.563,45.531,0 12.574,45.54,0 12.582,45.553,0 12.57,45.549,0 12.545,45.536,0 12.538,45.536,0 12.519,45.55,0 12.511,45.559,0 12.507,45.573,0 12.486,45.565,0 12.459,45.548,0 12.443,45.53,0 12.452,45.518,0 12.452,45.512,0 12.435,45.512,0 12.418,45.523,0 12.411,45.518,0 12.404,45.518,0 12.397,45.539,0 12.385,45.523,0 12.391,45.514,0 12.425,45.504,0 12.425,45.498,0 12.412,45.493,0 12.394,45.491,0 12.381,45.494,0 12.384,45.504,0 12.351,45.505,0 12.31,45.489,0 12.273,45.463,0 12.253,45.436,0 12.253,45.43,0 12.259,45.43,0 12.251,45.42,0 12.247,45.411,0 12.249,45.402,0 12.259,45.395,0 12.25,45.385,0 12.248,45.378,0 12.249,45.371,0 12.246,45.361,0 12.238,45.358,0 12.229,45.357,0 12.224,45.354,0 12.233,45.34,0 12.221,45.327,0 12.217,45.316,0 12.209,45.309,0 12.188,45.306,0 12.175,45.31,0 12.164,45.316,0 12.155,45.313,0 12.15,45.292,0 12.16,45.283,0 12.169,45.262,0 12.181,45.258,0 12.192,45.263,0 12.2,45.274,0 12.203,45.288,0 12.198,45.299,0 12.218,45.294,0 12.222,45.283,0 12.221,45.269,0 12.225,45.251,0 12.214,45.248,0 12.212,45.243,0 12.216,45.237,0 12.225,45.23,0 12.222,45.216,0 12.231,45.204,0 12.248,45.197,0 12.267,45.196,0 12.264,45.2,0 12.263,45.201,0 12.259,45.203,0 12.274,45.211,0 12.296,45.226,0 12.308,45.23,0 12.299,45.215,0 12.305,45.201,0 12.316,45.186,0 12.322,45.172,0 12.322,45.139,0 12.329,45.101,0 12.319,45.103,0 12.308,45.108,0 12.309,45.114,0 12.308,45.124,0 12.308,45.128,0 12.298,45.106,0 12.297,45.088,0 12.307,45.078,0 12.329,45.08,0 12.326,45.083,0 12.324,45.086,0 12.322,45.093,0 12.341,45.081,0 12.354,45.067,0 12.364,45.052,0 12.377,45.039,0 12.377,45.032,0 12.369,45.031,0 12.365,45.029,0 12.361,45.027,0 12.356,45.024,0 12.369,45.011,0 12.384,45.026,0 12.387,45.039,0 12.381,45.051,0 12.369,45.065,0 12.384,45.056,0 12.402,45.05,0 12.414,45.043,0 12.411,45.032,0 12.427,45.02,0 12.435,45.015,0 12.445,45.011,0 12.465,44.992,0 12.487,44.976,0 12.5,44.983,0 12.497,44.984,0 12.49,44.983,0 12.487,44.983,0 12.487,44.991,0 12.503,44.991,0 12.517,44.987,0 12.528,44.98,0 12.535,44.97,0 12.534,44.961,0 12.524,44.95,0 12.528,44.943,0 12.519,44.934,0 12.516,44.928,0 12.513,44.922,0 12.507,44.922,0 12.5,44.921,0 12.495,44.91,0 12.493,44.878,0 12.488,44.862,0 12.475,44.845,0 12.445,44.82,0 12.444,44.825,0 12.439,44.835,0 12.433,44.846,0 12.425,44.854,0 12.44,44.877,0 12.444,44.89,0 12.439,44.901,0 12.427,44.905,0 12.416,44.9,0 12.407,44.891,0 12.404,44.884,0 12.393,44.868,0 12.392,44.859,0 12.417,44.851,0 12.416,44.843,0 12.409,44.836,0 12.397,44.833,0 12.397,44.826,0 12.404,44.825,0 12.417,44.821,0 12.425,44.82,0 12.417,44.803,0 12.398,44.794,0 12.376,44.792,0 12.358,44.804,0 12.347,44.815,0 12.322,44.833,0 12.304,44.843,0 12.293,44.843,0 12.267,44.826,0 12.267,44.82,0 12.281,44.82,0 12.254,44.751,0 12.247,44.711,0 12.253,44.668,0 12.266,44.636,0 12.276,44.62,0 12.284,44.614,0 12.286,44.602,0 12.281,44.532,0 12.284,44.487,0 12.315,44.387,0 12.319,44.361,0 12.322,44.353,0 12.326,44.348,0 12.34,44.334,0 12.343,44.329,0 12.345,44.308,0 12.351,44.288,0 12.369,44.25,0 12.391,44.222,0 12.418,44.195,0 12.459,44.166,0 12.479,44.139,0 12.511,44.114,0 12.548,44.093,0 12.575,44.085,0 12.632,44.03,0 12.662,44.008,0 12.692,43.99,0 12.711,43.983,0 12.757,43.972,0 12.804,43.967,0 12.823,43.958,0 12.863,43.935,0 12.929,43.916,0 12.939,43.904,0 12.948,43.897,0 13.254,43.703,0 13.371,43.65,0 13.39,43.644,0 13.4,43.635,0 13.447,43.623,0 13.474,43.612,0 13.484,43.616,0 13.491,43.623,0 13.497,43.627,0 13.5,43.628,0 13.502,43.63,0 13.505,43.633,0 13.511,43.633,0 13.517,43.631,0 13.52,43.627,0 13.522,43.622,0 13.525,43.62,0 13.544,43.613,0 13.558,43.596,0 13.57,43.58,0 13.579,43.573,0 13.599,43.569,0 13.616,43.56,0 13.625,43.547,0 13.618,43.531,0 13.761,43.264,0 13.777,43.243,0 13.781,43.236,0 13.787,43.2,0 13.791,43.192,0 13.803,43.178,0 13.835,43.127,0 13.849,43.092,0 13.866,43.007,0 13.945,42.798,0 13.981,42.73,0 14.002,42.698,0 14.064,42.625,0 14.069,42.609,0 14.076,42.599,0 14.221,42.47,0 14.285,42.428,0 14.357,42.393,0 14.388,42.373,0 14.43,42.321,0 14.561,42.225,0 14.596,42.208,0 14.654,42.191,0 14.694,42.185,0 14.71,42.175,0 14.718,42.16,0 14.723,42.119,0 14.73,42.099,0 14.741,42.084,0 14.758,42.079,0 14.781,42.075,0 14.8,42.066,0 14.836,42.044,0 14.871,42.032,0 14.953,42.021,0 14.994,42.01,0 15.008,42.001,0 15.035,41.974,0 15.046,41.969,0 15.064,41.964,0 15.105,41.942,0 15.124,41.934,0 15.166,41.927,0 15.282,41.928,0 15.401,41.908,0 15.447,41.907,0 15.612,41.928,0 15.775,41.921,0 16.028,41.944,0 16.112,41.928,0 16.112,41.926,0 16.141,41.92,0 16.161,41.892,0 16.18,41.893,0 16.177,41.877,0 16.184,41.858,0 16.193,41.821,0 16.194,41.808,0 16.193,41.791,0 16.185,41.779,0 16.167,41.763,0 16.146,41.749,0 16.128,41.742,0 16.108,41.737,0 16.09,41.726,0 16.064,41.701,0 16.028,41.68,0 15.926,41.64,0 15.901,41.614,0 15.892,41.577,0 15.897,41.536,0 15.912,41.503,0 15.934,41.479,0 15.962,41.459,0 16.022,41.428,0 16.086,41.412,0 16.101,41.403,0 16.115,41.393,0 16.302,41.328,0 16.461,41.262,0 16.521,41.25,0 16.539,41.239,0 16.555,41.227,0 16.594,41.207,0 16.831,41.146,0 16.852,41.133,0 16.859,41.133,0 16.859,41.14,0 16.865,41.14,0 16.886,41.124,0 17.058,41.082,0 17.204,41.021,0 17.277,40.98,0 17.311,40.955,0 17.348,40.912,0 17.362,40.906,0 17.378,40.902,0 17.414,40.881,0 17.476,40.83,0 17.493,40.824,0 17.513,40.82,0 17.549,40.802,0 17.635,40.785,0 17.646,40.78,0 17.749,40.747,0 17.844,40.694,0 17.922,40.683,0 17.956,40.67,0 17.956,40.647,0 17.967,40.647,0 17.993,40.653,0 18.008,40.65,0 18.012,40.644,0 18.012,40.635,0 18.016,40.625,0 18.04,40.608,0 18.044,40.602,0 18.038,40.557,0 18.12,40.504,0 18.212,40.464,0 18.232,40.461,0 18.239,40.457,0 18.259,40.43,0 18.271,40.421,0 18.304,40.4,0 18.33,40.366,0 18.344,40.351,0 18.362,40.345,0 18.371,40.338,0 18.438,40.268,0 18.501,40.152,0 18.505,40.146,0 18.51,40.142,0 18.517,40.139,0 18.512,40.127,0 18.514,40.12,0 18.518,40.114,0 18.517,40.104,0 18.509,40.094,0 18.492,40.084,0 18.484,40.055,0 18.471,40.043,0 18.435,40.022,0 18.412,39.979,0 18.408,39.968,0 18.405,39.947,0 18.395,39.925,0 18.393,39.916,0 18.4,39.89,0 18.401,39.878,0 18.387,39.825,0 18.39,39.817,0 18.384,39.814,0 18.374,39.8,0 18.369,39.796,0 18.347,39.798,0 18.339,39.8,0 18.331,39.803,0 18.283,39.833,0 18.266,39.837,0 18.225,39.837,0 18.212,39.839,0 18.187,39.852,0 18.162,39.86,0 18.131,39.883,0 18.095,39.903,0 18.082,39.906,0 18.072,39.911,0 18.008,39.986,0 17.996,39.995,0 17.996,40.002,0 18.012,40.003,0 18.021,40.01,0 18.023,40.021,0 18.016,40.036,0 18.006,40.045,0 17.979,40.051,0 17.968,40.057,0 18.003,40.074,0 18.012,40.096,0 17.998,40.12,0 17.968,40.146,0 17.941,40.163,0 17.927,40.176,0 17.92,40.191,0 17.92,40.21,0 17.917,40.227,0 17.912,40.24,0 17.9,40.249,0 17.913,40.249,0 17.913,40.255,0 17.864,40.285,0 17.848,40.29,0 17.513,40.303,0 17.494,40.307,0 17.441,40.331,0 17.431,40.331,0 17.41,40.33,0 17.4,40.331,0 17.393,40.335,0 17.375,40.348,0 17.369,40.351,0 17.352,40.355,0 17.297,40.379,0 17.241,40.395,0 17.213,40.406,0 17.201,40.42,0 17.224,40.428,0 17.244,40.441,0 17.248,40.457,0 17.228,40.474,0 17.248,40.48,0 17.296,40.473,0 17.317,40.482,0 17.324,40.498,0 17.305,40.499,0 17.262,40.488,0 17.264,40.491,0 17.269,40.496,0 17.248,40.503,0 17.23,40.497,0 17.211,40.487,0 17.191,40.482,0 17.182,40.485,0 17.177,40.493,0 17.172,40.502,0 17.167,40.509,0 17.157,40.512,0 17.134,40.512,0 17.125,40.515,0 17.05,40.519,0 16.977,40.492,0 16.913,40.445,0 16.783,40.301,0 16.762,40.269,0 16.738,40.211,0 16.731,40.2,0 16.716,40.193,0 16.68,40.146,0 16.625,40.108,0 16.605,40.084,0 16.597,40.046,0 16.6,40.034,0 16.614,39.996,0 16.632,39.966,0 16.622,39.953,0 16.606,39.943,0 16.59,39.92,0 16.543,39.885,0 16.509,39.837,0 16.492,39.805,0 16.49,39.775,0 16.503,39.747,0 16.529,39.721,0 16.529,39.714,0 16.516,39.689,0 16.546,39.661,0 16.592,39.636,0 16.625,39.625,0 16.75,39.62,0 16.783,39.611,0 16.799,39.603,0 16.817,39.591,0 16.831,39.576,0 16.838,39.56,0 16.847,39.552,0 16.906,39.529,0 16.954,39.499,0 16.971,39.495,0 16.996,39.492,0 17.012,39.486,0 17.024,39.475,0 17.036,39.461,0 17.058,39.441,0 17.089,39.422,0 17.125,39.409,0 17.159,39.406,0 17.123,39.338,0 17.115,39.283,0 17.115,39.269,0 17.118,39.256,0 17.125,39.244,0 17.143,39.222,0 17.146,39.21,0 17.141,39.179,0 17.123,39.121,0 17.125,39.091,0 17.148,39.054,0 17.152,39.046,0 17.159,39.04,0 17.193,39.031,0 17.207,39.029,0 17.187,39.019,0 17.177,39.012,0 17.173,39.005,0 17.172,38.966,0 17.173,38.96,0 17.139,38.936,0 17.136,38.932,0 17.128,38.929,0 17.119,38.919,0 17.105,38.899,0 17.096,38.919,0 17.071,38.923,0 17.043,38.916,0 17.023,38.906,0 16.997,38.929,0 16.982,38.937,0 16.958,38.94,0 16.936,38.938,0 16.839,38.918,0 16.728,38.879,0 16.688,38.856,0 16.68,38.847,0 16.671,38.84,0 16.611,38.816,0 16.586,38.798,0 16.575,38.785,0 16.564,38.756,0 16.551,38.741,0 16.539,38.723,0 16.535,38.7,0 16.547,38.693,0 16.55,38.69,0 16.549,38.672,0 16.559,38.596,0 16.578,38.528,0 16.578,38.503,0 16.57,38.429,0 16.562,38.416,0 16.523,38.387,0 16.509,38.371,0 16.498,38.369,0 16.468,38.348,0 16.436,38.34,0 16.34,38.301,0 16.307,38.277,0 16.17,38.143,0 16.152,38.111,0 16.126,38.005,0 16.112,37.973,0 16.102,37.96,0 16.091,37.949,0 16.078,37.94,0 16.064,37.932,0 16.016,37.924,0 16.002,37.919,0 15.943,37.933,0 15.762,37.925,0 15.736,37.931,0 15.709,37.941,0 15.685,37.953,0 15.666,37.967,0 15.646,37.988,0 15.636,38.009,0 15.639,38.027,0 15.659,38.042,0 15.633,38.074,0 15.625,38.092,0 15.628,38.107,0 15.642,38.126,0 15.648,38.143,0 15.647,38.162,0 15.639,38.186,0 15.633,38.22,0 15.651,38.241,0 15.685,38.253,0 15.787,38.278,0 15.796,38.285,0 15.799,38.291,0 15.813,38.3,0 15.817,38.306,0 15.83,38.351,0 15.905,38.474,0 15.918,38.517,0 15.916,38.55,0 15.901,38.578,0 15.871,38.604,0 15.864,38.608,0 15.851,38.613,0 15.845,38.618,0 15.836,38.628,0 15.834,38.634,0 15.836,38.639,0 15.837,38.649,0 15.845,38.66,0 15.864,38.668,0 15.905,38.679,0 15.969,38.712,0 16.003,38.725,0 16.049,38.728,0 16.121,38.721,0 16.137,38.724,0 16.153,38.731,0 16.18,38.748,0 16.201,38.776,0 16.216,38.814,0 16.222,38.856,0 16.221,38.899,0 16.215,38.919,0 16.205,38.934,0 16.19,38.943,0 16.169,38.947,0 16.155,38.955,0 16.14,38.974,0 16.084,39.075,0 16.043,39.31,0 16.032,39.345,0 15.955,39.489,0 15.934,39.513,0 15.905,39.536,0 15.877,39.551,0 15.868,39.564,0 15.865,39.588,0 15.851,39.615,0 15.837,39.652,0 15.816,39.679,0 15.807,39.695,0 15.789,39.796,0 15.789,39.79,0 15.784,39.81,0 15.779,39.82,0 15.772,39.824,0 15.77,39.83,0 15.783,39.868,0 15.775,39.891,0 15.742,39.929,0 15.735,39.943,0 15.729,39.964,0 15.714,39.981,0 15.679,40.009,0 15.652,40.043,0 15.631,40.057,0 15.625,40.065,0 15.625,40.078,0 15.611,40.073,0 15.536,40.078,0 15.51,40.07,0 15.493,40.059,0 15.46,40.029,0 15.425,40.004,0 15.405,39.999,0 15.377,40.002,0 15.354,40.012,0 15.315,40.034,0 15.303,40.036,0 15.294,40.032,0 15.284,40.03,0 15.273,40.028,0 15.262,40.029,0 15.262,40.036,0 15.28,40.047,0 15.264,40.074,0 15.234,40.1,0 15.21,40.112,0 15.191,40.119,0 15.128,40.169,0 15.113,40.175,0 15.096,40.173,0 15.066,40.166,0 15.048,40.169,0 15.035,40.175,0 15.015,40.194,0 14.974,40.223,0 14.967,40.224,0 14.959,40.231,0 14.923,40.238,0 14.912,40.241,0 14.907,40.258,0 14.932,40.285,0 14.94,40.307,0 14.933,40.324,0 14.933,40.334,0 14.943,40.338,0 14.954,40.34,0 14.965,40.345,0 14.973,40.352,0 14.98,40.359,0 14.99,40.394,0 14.976,40.431,0 14.889,40.573,0 14.862,40.607,0 14.836,40.632,0 14.81,40.653,0 14.783,40.67,0 14.753,40.676,0 14.72,40.667,0 14.691,40.649,0 14.679,40.646,0 14.626,40.649,0 14.614,40.646,0 14.572,40.617,0 14.545,40.613,0 14.517,40.62,0 14.487,40.632,0 14.472,40.624,0 14.423,40.615,0 14.402,40.602,0 14.356,40.583,0 14.343,40.57,0 14.331,40.584,0 14.329,40.605,0 14.338,40.624,0 14.36,40.632,0 14.38,40.634,0 14.388,40.637,0 14.395,40.65,0 14.403,40.657,0 14.471,40.699,0 14.48,40.711,0 14.475,40.729,0 14.461,40.744,0 14.443,40.755,0 14.426,40.762,0 14.415,40.765,0 14.399,40.767,0 14.391,40.77,0 14.385,40.774,0 14.372,40.787,0 14.367,40.79,0 14.349,40.797,0 14.313,40.828,0 14.295,40.839,0 14.276,40.84,0 14.249,40.837,0 14.224,40.831,0 14.213,40.821,0 14.204,40.801,0 14.182,40.8,0 14.112,40.829,0 14.096,40.834,0 14.083,40.831,0 14.077,40.822,0 14.078,40.81,0 14.082,40.797,0 14.083,40.783,0 14.075,40.788,0 14.041,40.798,0 14.053,40.837,0 14.044,40.875,0 13.966,40.996,0 13.931,41.014,0 13.918,41.023,0 13.915,41.033,0 13.913,41.054,0 13.911,41.064,0 13.885,41.104,0 13.786,41.203,0 13.722,41.252,0 13.709,41.256,0 13.679,41.25,0 13.664,41.25,0 13.657,41.259,0 13.595,41.253,0 13.564,41.238,0 13.576,41.208,0 13.544,41.206,0 13.535,41.208,0 13.526,41.215,0 13.52,41.225,0 13.515,41.229,0 13.508,41.221,0 13.5,41.221,0 13.481,41.239,0 13.325,41.295,0 13.286,41.295,0 13.205,41.284,0 13.187,41.278,0 13.152,41.26,0 13.115,41.251,0 13.091,41.226,0 13.069,41.221,0 13.045,41.227,0 13.037,41.24,0 13.034,41.257,0 13.024,41.273,0 13.013,41.286,0 12.993,41.315,0 12.98,41.331,0 12.924,41.379,0 12.894,41.399,0 12.863,41.413,0 12.842,41.418,0 12.764,41.421,0 12.749,41.423,0 12.679,41.458,0 12.655,41.465,0 12.643,41.458,0 12.636,41.447,0 12.62,41.459,0 12.546,41.544,0 12.449,41.63,0 12.343,41.702,0 12.328,41.711,0 12.301,41.717,0 12.286,41.727,0 12.277,41.729,0 12.247,41.733,0 12.24,41.736,0 12.224,41.75,0 12.216,41.768,0 12.212,41.787,0 12.212,41.808,0 12.207,41.827,0 12.195,41.847,0 12.171,41.879,0 12.148,41.903,0 12.05,41.96,0 12.039,41.965,0 12.03,41.973,0 12.027,41.986,0 12.021,41.993,0 11.993,41.996,0 11.983,42,0 11.97,42.011,0 11.953,42.022,0 11.935,42.031,0 11.917,42.038,0 11.84,42.036,0 11.828,42.034,0 11.823,42.047,0 11.81,42.066,0 11.794,42.084,0 11.78,42.092,0 11.772,42.106,0 11.751,42.128,0 11.746,42.136,0 11.744,42.152,0 11.737,42.169,0 11.683,42.252,0 11.659,42.279,0 11.54,42.349,0 11.49,42.359,0 11.421,42.386,0 11.397,42.393,0 11.397,42.4,0 11.387,42.404,0 11.377,42.407,0 11.366,42.408,0 11.355,42.407,0 11.363,42.4,0 11.334,42.4,0 11.26,42.421,0 11.246,42.422,0 11.228,42.422,0 11.212,42.419,0 11.205,42.411,0 11.201,42.395,0 11.187,42.379,0 11.185,42.366,0 11.175,42.369,0 11.165,42.369,0 11.158,42.368,0 11.157,42.366,0 11.148,42.371,0 11.135,42.384,0 11.107,42.391,0 11.095,42.402,0 11.087,42.418,0 11.081,42.435,0 11.1,42.443,0 11.123,42.446,0 11.167,42.448,0 11.175,42.458,0 11.184,42.48,0 11.19,42.504,0 11.188,42.521,0 11.167,42.546,0 11.159,42.564,0 11.149,42.563,0 11.138,42.559,0 11.129,42.558,0 11.117,42.572,0 11.108,42.591,0 11.098,42.607,0 11.081,42.612,0 11.078,42.632,0 11.054,42.647,0 11.006,42.668,0 11.001,42.68,0 10.996,42.696,0 10.99,42.71,0 10.982,42.716,0 10.973,42.72,0 10.944,42.743,0 10.891,42.764,0 10.732,42.804,0 10.756,42.819,0 10.766,42.835,0 10.767,42.854,0 10.766,42.877,0 10.769,42.884,0 10.775,42.888,0 10.778,42.894,0 10.774,42.908,0 10.764,42.918,0 10.751,42.925,0 10.682,42.949,0 10.633,42.958,0 10.584,42.959,0 10.54,42.949,0 10.544,42.939,0 10.547,42.935,0 10.519,42.925,0 10.5,42.94,0 10.478,42.99,0 10.503,43.005,0 10.518,43.024,0 10.54,43.079,0 10.536,43.091,0 10.536,43.112,0 10.54,43.134,0 10.547,43.147,0 10.539,43.164,0 10.535,43.185,0 10.533,43.226,0 10.529,43.246,0 10.517,43.267,0 10.438,43.388,0 10.374,43.453,0 10.36,43.465,0 10.327,43.477,0 10.318,43.492,0 10.295,43.568,0 10.265,43.809,0 10.252,43.846,0 10.211,43.92,0 10.181,43.955,0 10.137,43.978,0 10.106,44.016,0 10.091,44.025,0 10.073,44.029,0 10.036,44.048,0 10.015,44.052,0 9.999,44.058,0 9.989,44.06,0 9.985,44.055,0 9.981,44.05,0 9.973,44.045,0 9.963,44.044,0 9.954,44.048,0 9.938,44.06,0 9.905,44.08,0 9.888,44.093,0 9.877,44.088,0 9.845,44.108,0 9.827,44.107,0 9.834,44.1,0 9.829,44.098,0 9.825,44.095,0 9.82,44.093,0 9.825,44.085,0 9.831,44.079,0 9.839,44.075,0 9.848,44.072,0 9.848,44.066,0 9.842,44.063,0 9.839,44.06,0 9.834,44.052,0 9.847,44.046,0 9.843,44.041,0 9.833,44.042,0 9.827,44.055,0 9.82,44.063,0 9.772,44.079,0 9.722,44.113,0 9.71,44.118,0 9.683,44.136,0 9.673,44.141,0 9.644,44.142,0 9.632,44.144,0 9.622,44.148,0 9.587,44.178,0 9.581,44.179,0 9.573,44.191,0 9.557,44.2,0 9.512,44.215,0 9.5,44.222,0 9.49,44.231,0 9.485,44.244,0 9.473,44.24,0 9.454,44.237,0 9.437,44.239,0 9.43,44.247,0 9.423,44.257,0 9.375,44.272,0 9.368,44.294,0 9.263,44.336,0 9.231,44.353,0 9.222,44.344,0 9.214,44.333,0 9.21,44.321,0 9.211,44.305,0 9.166,44.318,0 9.147,44.328,0 9.149,44.34,0 9.131,44.363,0 9.103,44.374,0 9.002,44.387,0 8.953,44.4,0 8.924,44.411,0 8.915,44.409,0 8.869,44.409,0 8.846,44.413,0 8.838,44.417,0 8.828,44.428,0 8.763,44.432,0 8.738,44.429,0 8.725,44.424,0 8.696,44.406,0 8.686,44.398,0 8.679,44.394,0 8.671,44.394,0 8.663,44.395,0 8.656,44.394,0 8.594,44.363,0 8.577,44.36,0 8.565,44.357,0 8.541,44.34,0 8.467,44.304,0 8.445,44.284,0 8.45,44.264,0 8.44,44.253,0 8.437,44.247,0 8.436,44.24,0 8.433,44.238,0 8.418,44.23,0 8.412,44.227,0 8.407,44.215,0 8.409,44.204,0 8.409,44.193,0 8.395,44.182,0 8.37,44.173,0 8.314,44.16,0 8.285,44.148,0 8.27,44.138,0 8.257,44.128,0 8.234,44.103,0 8.231,44.096,0 8.232,44.08,0 8.231,44.072,0 8.224,44.057,0 8.217,44.045,0 8.17,44.006,0 8.153,43.983,0 8.168,43.962,0 8.168,43.956,0 8.145,43.952,0 8.116,43.927,0 8.09,43.92,0 8.082,43.915,0 8.076,43.909,0 8.073,43.904,0 8.068,43.896,0 8.056,43.892,0 8.032,43.887,0 7.96,43.853,0 7.786,43.822,0 7.737,43.798,0 7.695,43.791,0 7.573,43.791,0 7.545,43.784,0 7.532,43.784,0 7.524,43.789,0 7.513,43.792,0 7.503,43.792,0 7.483,43.84,0 7.478,43.866,0 7.493,43.886,0 7.537,43.921,0 7.557,43.944,0 7.609,43.976,0 7.631,43.994,0 7.639,44.005,0 7.647,44.027,0 7.653,44.04,0 7.664,44.049,0 7.679,44.057,0 7.69,44.067,0 7.692,44.085,0 7.676,44.109,0 7.654,44.125,0 7.642,44.144,0 7.656,44.176,0 7.625,44.18,0 7.584,44.161,0 7.555,44.159,0 7.381,44.123,0 7.341,44.124,0 7.331,44.125,0 7.322,44.132,0 7.316,44.14,0 7.309,44.147,0 7.296,44.151,0 7.27,44.154,0 7.251,44.16,0 7.145,44.207,0 7.105,44.218,0 7.046,44.24,0 7.033,44.243,0 7.02,44.242,0 7.008,44.239,0 6.996,44.238,0 6.983,44.242,0 6.973,44.249,0 6.969,44.258,0 6.966,44.268,0 6.959,44.277,0 6.95,44.285,0 6.93,44.295,0 6.921,44.302,0 6.916,44.31,0 6.904,44.33,0 6.896,44.34,0 6.874,44.358,0 6.87,44.363,0 6.866,44.372,0 6.866,44.377,0 6.869,44.383,0 6.877,44.414,0 6.884,44.423,0 6.918,44.436,0 6.892,44.452,0 6.861,44.475,0 6.839,44.503,0 6.836,44.534,0 6.846,44.547,0 6.897,44.575,0 6.932,44.618,0 6.946,44.625,0 6.934,44.647,0 6.941,44.667,0 6.96,44.683,0 6.983,44.692,0 7.001,44.692,0 7.037,44.685,0 7.055,44.685,0 7.049,44.698,0 7.019,44.739,0 7.015,44.747,0 7.01,44.772,0 6.998,44.794,0 6.999,44.795,0 7.004,44.811,0 7.006,44.812,0 7.006,44.816,0 7.007,44.819,0 7.007,44.822,0 7.005,44.828,0 7.001,44.833,0 6.983,44.847,0 6.933,44.862,0 6.915,44.863,0 6.866,44.856,0 6.847,44.859,0 6.778,44.888,0 6.745,44.908,0 6.728,44.929,0 6.73,44.985,0 6.723,45.013,0 6.697,45.027,0 6.662,45.029,0 6.652,45.036,0 6.64,45.05,0 6.637,45.059,0 6.638,45.067,0 6.637,45.074,0 6.62,45.084,0 6.603,45.103,0 6.615,45.115,0 6.633,45.126,0 6.667,45.14,0 6.676,45.141,0 6.694,45.14,0 6.702,45.141,0 6.711,45.145,0 6.729,45.155,0 6.736,45.157,0 6.771,45.153,0 6.808,45.139,0 6.844,45.13,0 6.877,45.141,0 6.879,45.147,0 6.873,45.152,0 6.868,45.157,0 6.873,45.166,0 6.881,45.168,0 6.905,45.169,0 6.914,45.17,0 6.928,45.18,0 6.946,45.201,0 6.959,45.21,0 6.994,45.221,0 7.03,45.228,0 7.038,45.226,0 7.05,45.215,0 7.055,45.214,0 7.062,45.219,0 7.081,45.243,0 7.108,45.259,0 7.108,45.275,0 7.098,45.295,0 7.093,45.324,0 7.098,45.33,0 7.13,45.357,0 7.151,45.383,0 7.16,45.398,0 7.161,45.411,0 7.153,45.415,0 7.11,45.428,0 7.097,45.435,0 7.089,45.447,0 7.082,45.459,0 7.072,45.47,0 7.028,45.493,0 6.983,45.511,0 6.975,45.526,0 6.97,45.567,0 6.966,45.574,0 6.955,45.586,0 6.953,45.594,0 6.956,45.603,0 6.967,45.62,0 6.969,45.626,0 6.963,45.641,0 6.951,45.647,0 6.919,45.653,0 6.905,45.66,0 6.883,45.676,0 6.869,45.679,0 6.843,45.683,0 6.816,45.697,0 6.796,45.718,0 6.785,45.76,0 6.782,45.777,0 6.783,45.795,0 6.788,45.812,0 6.801,45.826,0 6.816,45.833,0 6.846,45.836,0 6.846,45.838,0 6.849,45.842,0 6.853,45.847,0 6.858,45.849,0 6.862,45.849,0 6.87,45.845,0 6.873,45.845,0 6.88,45.846,0 6.905,45.845,0 6.926,45.85,0 6.949,45.858,0 6.969,45.87,0 6.983,45.886,0 6.989,45.899,0 6.997,45.911,0 7.008,45.921,0 7.022,45.925,0 7.067,45.89,0 7.09,45.881,0 7.121,45.876,0 7.154,45.877,0 7.184,45.88,0 7.245,45.898,0 7.274,45.91,0 7.287,45.913,0 7.362,45.908,0 7.394,45.916,0 7.453,45.946,0 7.483,45.955,0 7.504,45.957,0 7.515,45.967,0 7.524,45.978,0 7.541,45.984,0 7.643,45.966,0 7.659,45.96,0 7.674,45.95,0 7.693,45.931,0 7.694,45.929,0 7.706,45.926,0 7.715,45.927,0 7.722,45.93,0 7.732,45.93,0 7.78,45.918,0 7.808,45.918,0 7.825,45.915,0 7.831,45.914,0 7.844,45.919,0 7.846,45.923,0 7.845,45.928,0 7.848,45.938,0 7.872,45.969,0 7.898,45.982,0 7.969,45.993,0 7.979,45.995,0 7.986,45.999,0 7.998,46.011,0 7.999,46.013,0 8.009,46.028,0 8.011,46.03,0 8.016,46.058,0 8.016,46.069,0 8.018,46.081,0 8.025,46.091,0 8.035,46.097,0 8.056,46.098,0 8.067,46.101,0 8.111,46.127,0 8.132,46.159,0 8.13,46.196,0 8.1,46.236,0 8.077,46.25,0 8.073,46.254,0 8.077,46.262,0 8.087,46.272,0 8.107,46.286,0 8.128,46.292,0 8.172,46.299,0 8.193,46.309,0 8.242,46.354,0 8.27,46.364,0 8.282,46.37,0 8.291,46.378,0 8.297,46.388,0 8.297,46.398,0 8.29,46.401,0 8.287,46.405,0 8.295,46.418,0 8.316,46.434,0 8.343,46.444,0 8.399,46.452,0 8.428,46.449,0 8.442,46.435,0 8.446,46.412,0 8.446,46.382,0 8.443,46.353,0 8.427,46.302,0 8.423,46.276,0 8.427,46.251,0 8.438,46.235,0 8.457,46.225,0 8.483,46.218,0 8.51,46.208,0 8.539,46.188,0 8.602,46.123,0 8.612,46.119,0 8.631,46.115,0 8.677,46.096,0 8.695,46.095,0 8.702,46.098,0 8.718,46.108,0 8.724,46.11,0 8.732,46.107,0 8.739,46.098,0 8.747,46.094,0 8.763,46.093,0 8.794,46.093,0 8.809,46.09,0 8.834,46.066,0 8.82,46.043,0 8.791,46.019,0 8.773,45.991,0 8.77,45.986,0 8.768,45.983,0 8.785,45.982,0 8.8,45.979,0 8.858,45.957,0 8.864,45.953,0 8.871,45.947,0 8.881,45.931,0 8.898,45.91,0 8.907,45.896,0 8.912,45.883,0 8.914,45.866,0 8.91,45.854,0 8.904,45.842,0 8.9,45.826,0 8.94,45.835,0 8.972,45.825,0 9.002,45.821,0 9.034,45.848,0 9.059,45.882,0 9.063,45.899,0 9.052,45.916,0 9.042,45.92,0 9.021,45.923,0 9.011,45.927,0 9.002,45.936,0 8.993,45.954,0 8.983,45.962,0 8.981,45.964,0 8.98,45.967,0 8.981,45.969,0 8.983,45.972,0 9.016,45.993,0 8.998,46.028,0 9.002,46.039,0 9.028,46.053,0 9.05,46.058,0 9.059,46.062,0 9.067,46.071,0 9.07,46.083,0 9.068,46.106,0 9.072,46.119,0 9.091,46.138,0 9.163,46.172,0 9.171,46.183,0 9.176,46.194,0 9.181,46.204,0 9.192,46.21,0 9.204,46.214,0 9.216,46.221,0 9.225,46.231,0 9.24,46.267,0 9.269,46.309,0 9.275,46.331,0 9.274,46.344,0 9.26,46.38,0 9.26,46.394,0 9.263,46.407,0 9.261,46.417,0 9.248,46.423,0 9.238,46.437,0 9.246,46.461,0 9.263,46.485,0 9.282,46.497,0 9.331,46.502,0 9.351,46.498,0 9.352,46.485,0 9.377,46.469,0 9.385,46.466,0 9.395,46.469,0 9.4,46.475,0 9.404,46.483,0 9.411,46.489,0 9.427,46.497,0 9.435,46.498,0 9.438,46.492,0 9.444,46.396,0 9.442,46.381,0 9.444,46.375,0 9.452,46.37,0 9.474,46.362,0 9.483,46.357,0 9.503,46.321,0 9.515,46.309,0 9.536,46.299,0 9.56,46.293,0 9.674,46.292,0 9.693,46.297,0 9.708,46.312,0 9.709,46.32,0 9.707,46.331,0 9.709,46.342,0 9.72,46.351,0 9.731,46.351,0 9.755,46.341,0 9.768,46.339,0 9.789,46.343,0 9.855,46.367,0 9.899,46.372,0 9.918,46.371,0 9.939,46.367,0 9.964,46.356,0 9.971,46.34,0 9.971,46.32,0 9.978,46.298,0 9.992,46.284,0 10.032,46.26,0 10.042,46.243,0 10.043,46.22,0 10.076,46.22,0 10.118,46.231,0 10.146,46.243,0 10.159,46.262,0 10.146,46.28,0 10.105,46.309,0 10.096,46.321,0 10.092,46.329,0 10.092,46.338,0 10.097,46.352,0 10.105,46.361,0 10.126,46.374,0 10.133,46.381,0 10.141,46.403,0 10.133,46.414,0 10.116,46.419,0 10.071,46.425,0 10.042,46.433,0 10.026,46.446,0 10.044,46.467,0 10.035,46.471,0 10.03,46.477,0 10.028,46.484,0 10.027,46.493,0 10.031,46.504,0 10.031,46.526,0 10.033,46.533,0 10.041,46.542,0 10.063,46.557,0 10.071,46.564,0 10.083,46.597,0 10.088,46.604,0 10.097,46.608,0 10.192,46.627,0 10.218,46.627,0 10.234,46.618,0 10.236,46.607,0 10.23,46.586,0 10.235,46.575,0 10.276,46.566,0 10.284,46.561,0 10.289,46.556,0 10.295,46.551,0 10.307,46.547,0 10.319,46.546,0 10.354,46.548,0 10.426,46.535,0 10.444,46.538,0 10.458,46.554,0 10.466,46.578,0 10.467,46.604,0 10.459,46.624,0 10.438,46.636,0 10.396,46.639,0 10.378,46.653,0 10.369,46.672,0 10.374,46.682,0 10.385,46.689,0 10.394,46.701,0 10.397,46.715,0 10.396,46.726,0 10.4,46.736,0 10.417,46.743,0 10.429,46.756,0 10.426,46.769,0 10.419,46.784,0 10.417,46.799,0 10.439,46.817,0 10.445,46.823,0 10.449,46.832,0 10.454,46.864,0 10.486,46.846,0 10.528,46.843,0 10.629,46.862,0 10.647,46.864,0 10.662,46.861,0 10.739,46.83,0 10.749,46.819,0 10.744,46.813,0 10.722,46.8,0 10.717,46.795,0 10.723,46.786,0 10.734,46.786,0 10.755,46.791,0 10.766,46.788,0 10.795,46.777,0 10.805,46.777,0 10.824,46.78,0 10.834,46.78,0 10.843,46.777,0 10.86,46.767,0 10.87,46.764,0 10.88,46.765,0 10.914,46.772,0 10.931,46.774,0 10.966,46.772,0 10.983,46.768,0 10.997,46.769,0 11.011,46.779,0 11.033,46.806,0 11.037,46.808,0 11.049,46.812,0 11.053,46.815,0 11.055,46.82,0 11.053,46.83,0 11.054,46.834,0 11.073,46.865,0 11.084,46.9,0 11.092,46.912,0 11.157,46.957,0 11.174,46.964,0 11.244,46.979,0 11.314,46.987,0 11.349,46.982,0 11.381,46.972,0 11.411,46.97,0 11.445,46.993,0 11.445,46.993,0 11.453,47.001,0 11.462,47.006,0 11.472,47.007,0 11.489,47.004,0 11.496,47.002,0 11.502,46.998,0 11.507,46.993,0 11.515,46.989,0 11.524,46.988,0 11.534,46.99,0 11.543,46.993,0 11.543,46.993,0 11.544,46.993,0 11.544,46.993,0 11.573,46.999,0 11.596,47,0 11.648,46.993,0 11.648,46.993,0 11.65,46.993,0 11.657,46.993,0 11.665,46.993,0 11.684,46.992,0 11.716,46.975,0 11.735,46.971,0 11.746,46.972,0 11.766,46.983,0 11.777,46.988,0 11.823,46.993,0 11.857,47.012,0 11.9,47.028,0 11.944,47.038,0 12.015,47.04,0 12.116,47.077,0 12.181,47.085,0 12.204,47.08,0 12.204,47.053,0 12.182,47.034,0 12.122,47.011,0 12.111,46.993,0 12.118,46.983,0 12.122,46.972,0 </coordinates></LinearRing></outerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>12.4,43.903,0 12.429,43.892,0 12.461,43.895,0 12.479,43.917,0 12.478,43.92,0 12.478,43.923,0 12.48,43.926,0 12.483,43.929,0 12.49,43.939,0 12.492,43.956,0 12.489,43.973,0 12.482,43.983,0 12.453,43.979,0 12.421,43.967,0 12.396,43.948,0 12.386,43.925,0 12.4,43.903,0 </coordinates></LinearRing></innerBoundaryIs><innerBoundaryIs><LinearRing><coordinates>12.444,41.902,0 12.449,41.9,0 12.455,41.9,0 12.458,41.902,0 12.455,41.908,0 12.447,41.907,0 12.444,41.902,0 </coordinates></LinearRing></innerBoundaryIs></Polygon></MultiGeometry>
</Placemark> </kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(k.features())[0].geometry, MultiPolygon))
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_atom(self):
pass
def test_schema(self):
doc = """<Schema name="TrailHeadType" id="TrailHeadTypeId">
<SimpleField type="string" name="TrailHeadName">
<displayName><![CDATA[<b>Trail Head Name</b>]]></displayName>
</SimpleField>
<SimpleField type="double" name="TrailLength">
<displayName><![CDATA[<i>The length in miles</i>]]></displayName>
</SimpleField>
<SimpleField type="int" name="ElevationGain">
<displayName><![CDATA[<i>change in altitude</i>]]></displayName>
</SimpleField>
</Schema> """
s = kml.Schema(ns='', id='default')
s.from_string(doc)
self.assertEqual(len(list(s.simple_fields)), 3)
self.assertEqual(list(s.simple_fields)[0]['type'], 'string')
self.assertEqual(list(s.simple_fields)[1]['type'], 'double')
self.assertEqual(list(s.simple_fields)[2]['type'], 'int')
self.assertEqual(list(s.simple_fields)[0]['name'], 'TrailHeadName')
self.assertEqual(list(s.simple_fields)[1]['name'], 'TrailLength')
self.assertEqual(list(s.simple_fields)[2]['name'], 'ElevationGain')
self.assertEqual(list(s.simple_fields)[0][
'displayName'
], '<b>Trail Head Name</b>')
self.assertEqual(list(s.simple_fields)[1][
'displayName'
], '<i>The length in miles</i>')
self.assertEqual(list(s.simple_fields)[2][
'displayName'
], '<i>change in altitude</i>')
s1 = kml.Schema(ns='', id='default')
s1.from_string(s.to_string())
self.assertEqual(len(list(s1.simple_fields)), 3)
self.assertEqual(list(s1.simple_fields)[0]['type'], 'string')
self.assertEqual(list(s1.simple_fields)[1]['name'], 'TrailLength')
self.assertEqual(list(s1.simple_fields)[2][
'displayName'
], '<i>change in altitude</i>')
self.assertEqual(s.to_string(), s1.to_string())
doc1 = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
%s
</Document>
</kml>""" % doc
k = kml.KML()
k.from_string(doc1)
d = list(k.features())[0]
s2 = list(d.schemata())[0]
s.ns = config.NS
self.assertEqual(s.to_string(), s2.to_string())
k1 = kml.KML()
k1.from_string(k.to_string())
self.assertTrue('Schema' in k1.to_string())
self.assertTrue('SimpleField' in k1.to_string())
self.assertEqual(k1.to_string(), k.to_string())
def test_schema_data(self):
doc = """<SchemaData schemaUrl="#TrailHeadTypeId">
<SimpleData name="TrailHeadName">Pi in the sky</SimpleData>
<SimpleData name="TrailLength">3.14159</SimpleData>
<SimpleData name="ElevationGain">10</SimpleData>
</SchemaData>"""
sd = kml.SchemaData(ns='', schema_url='#default')
sd.from_string(doc)
self.assertEqual(sd.schema_url, '#TrailHeadTypeId')
self.assertEqual(
sd.data[0], {'name': 'TrailHeadName',
'value': 'Pi in the sky'})
self.assertEqual(
sd.data[1], {'name': 'TrailLength',
'value': '3.14159'})
self.assertEqual(sd.data[2], {'name': 'ElevationGain', 'value': '10'})
sd1 = kml.SchemaData(ns='', schema_url='#default')
sd1.from_string(sd.to_string())
self.assertEqual(sd1.schema_url, '#TrailHeadTypeId')
self.assertEqual(sd.to_string(), sd1.to_string())
def test_snippet(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark>
<Snippet maxLines="2" >Short Desc</Snippet>
</Placemark> </kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(list(k.features())[0].snippet['text'], 'Short Desc')
self.assertEqual(list(k.features())[0].snippet['maxLines'], 2)
list(k.features())[0]._snippet['maxLines'] = 3
self.assertEqual(list(k.features())[0].snippet['maxLines'], 3)
self.assertTrue('maxLines="3"' in k.to_string())
list(k.features())[0].snippet = {'text': 'Annother Snippet'}
self.assertFalse('maxLines' in k.to_string())
self.assertTrue('Annother Snippet' in k.to_string())
list(k.features())[0].snippet = 'Diffrent Snippet'
self.assertFalse('maxLines' in k.to_string())
self.assertTrue('Diffrent Snippet' in k.to_string())
def test_from_wrong_string(self):
doc = kml.KML()
self.assertRaises(TypeError, doc.from_string, '<xml></xml>')
def test_address(self):
doc = kml.Document()
doc.from_string("""
<kml:Document xmlns:kml="http://www.opengis.net/kml/2.2" id="pm-id">
<kml:name>pm-name</kml:name>
<kml:description>pm-description</kml:description>
<kml:visibility>1</kml:visibility>
<kml:address>1600 Amphitheatre Parkway, Mountain View, CA 94043, USA</kml:address>
</kml:Document>
""")
doc2 = kml.Document()
doc2.from_string(doc.to_string())
self.assertEqual(doc.to_string(), doc2.to_string())
def test_phone_number(self):
doc = kml.Document()
doc.from_string("""
<kml:Document xmlns:kml="http://www.opengis.net/kml/2.2" id="pm-id">
<kml:name>pm-name</kml:name>
<kml:description>pm-description</kml:description>
<kml:visibility>1</kml:visibility>
<kml:phoneNumber>+1 234 567 8901</kml:phoneNumber>
</kml:Document>
""")
doc2 = kml.Document()
doc2.from_string(doc.to_string())
self.assertEqual(doc.to_string(), doc2.to_string())
def test_groundoverlay(self):
doc = kml.KML()
doc.from_string(
"""
<kml xmlns="http://www.opengis.net/kml/2.2">
<Folder>
<name>Ground Overlays</name>
<description>Examples of ground overlays</description>
<GroundOverlay>
<name>Large-scale overlay on terrain</name>
<description>Overlay shows Mount Etna erupting
on July 13th, 2001.</description>
<Icon>
<href>http://developers.google.com/kml/documentation/images/etna.jpg</href>
</Icon>
<LatLonBox>
<north>37.91904192681665</north>
<south>37.46543388598137</south>
<east>15.35832653742206</east>
<west>14.60128369746704</west>
<rotation>-0.1556640799496235</rotation>
</LatLonBox>
</GroundOverlay>
</Folder>
</kml>
""")
doc2 = kml.KML()
doc2.from_string(doc.to_string())
self.assertEqual(doc.to_string(), doc2.to_string())
def test_linarring_placemark(self):
doc = kml.KML()
doc.from_string( """<kml xmlns="http://www.opengis.net/kml/2.2">
<Placemark>
<LinearRing>
<coordinates>0.0,0.0 1.0,0.0 1.0,1.0 0.0,0.0</coordinates>
</LinearRing>
</Placemark> </kml>""")
doc2 = kml.KML()
doc2.from_string(doc.to_string())
self.assertTrue(
isinstance(list(doc.features())[0].geometry, LinearRing))
self.assertEqual(doc.to_string(), doc2.to_string())
class StyleTestCase(unittest.TestCase):
def test_styleurl(self):
f = kml.Document()
f.styleUrl = '#somestyle'
self.assertEqual(f.styleUrl, '#somestyle')
self.assertTrue(isinstance(f._styleUrl, styles.StyleUrl))
s = styles.StyleUrl(config.NS, url='#otherstyle')
f.styleUrl = s
self.assertTrue(isinstance(f._styleUrl, styles.StyleUrl))
self.assertEqual(f.styleUrl, '#otherstyle')
f2 = kml.Document()
f2.from_string(f.to_string())
self.assertEqual(f.to_string(), f2.to_string())
def test_style(self):
lstyle = styles.LineStyle(color='red', width=2.0)
style = styles.Style(styles=[lstyle])
f = kml.Document(styles=[style])
f2 = kml.Document()
f2.from_string(f.to_string(prettyprint=True))
self.assertEqual(f.to_string(), f2.to_string())
def test_polystyle_fill(self):
style = styles.PolyStyle()
def test_polystyle_outline(self):
style = styles.PolyStyle()
class StyleUsageTestCase(unittest.TestCase):
def test_create_document_style(self):
style = styles.Style(styles=[styles.PolyStyle(color='7f000000')])
doc = kml.Document(styles=[style])
doc2 = kml.Document()
doc2.append_style(style)
expected = """
<kml:Document xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:visibility>1</kml:visibility>
<kml:Style>
<kml:PolyStyle>
<kml:color>7f000000</kml:color>
<kml:fill>1</kml:fill>
<kml:outline>1</kml:outline>
</kml:PolyStyle>
</kml:Style>
</kml:Document>
"""
doc3 = kml.Document()
doc3.from_string(expected)
self.assertEqual(doc.to_string(), doc2.to_string())
self.assertEqual(doc2.to_string(), doc3.to_string())
self.assertEqual(doc.to_string(), doc3.to_string())
def test_create_placemark_style(self):
style = styles.Style(styles=[styles.PolyStyle(color='7f000000')])
place = kml.Placemark(styles=[style])
place2 = kml.Placemark()
place2.append_style(style)
expected = """
<kml:Placemark xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:visibility>1</kml:visibility>
<kml:Style>
<kml:PolyStyle>
<kml:color>7f000000</kml:color>
<kml:fill>1</kml:fill>
<kml:outline>1</kml:outline>
</kml:PolyStyle>
</kml:Style>
</kml:Placemark>
"""
place3 = kml.Placemark()
place3.from_string(expected)
self.assertEqual(place.to_string(), place2.to_string())
self.assertEqual(place2.to_string(), place3.to_string())
self.assertEqual(place.to_string(), place3.to_string())
class StyleFromStringTestCase(unittest.TestCase):
def test_styleurl(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>Document.kml</name>
<open>1</open>
<styleUrl>#default</styleUrl>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertEqual(list(k.features())[0].styleUrl, '#default')
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_balloonstyle(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>Document.kml</name>
<Style id="exampleBalloonStyle">
<BalloonStyle>
<!-- a background color for the balloon -->
<bgColor>ffffffbb</bgColor>
<!-- styling of the balloon text -->
<textColor>ff000000</textColor>
<text><![CDATA[
<b><font color="#CC0000" size="+3">$[name]</font></b>
<br/><br/>
<font face="Courier">$[description]</font>
<br/><br/>
Extra text that will appear in the description balloon
<br/><br/>
<!-- insert the to/from hyperlinks -->
$[geDirections]
]]></text>
<!-- kml:displayModeEnum -->
<displayMode>default</displayMode>
</BalloonStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(list(k.features())[0].styles())[0], styles.Style))
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.BalloonStyle))
self.assertEqual(style.bgColor, 'ffffffbb')
self.assertEqual(style.textColor, 'ff000000')
self.assertEqual(style.displayMode, 'default')
self.assertTrue('$[geDirections]' in style.text)
self.assertTrue('$[description]' in style.text)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k2.to_string(), k.to_string())
def test_balloonstyle_old_color(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>Document.kml</name>
<Style id="exampleBalloonStyle">
<BalloonStyle>
<!-- a background color for the balloon -->
<color>ffffffbb</color>
</BalloonStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(list(k.features())[0].styles())[0], styles.Style))
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.BalloonStyle))
self.assertEqual(style.bgColor, 'ffffffbb')
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k2.to_string(), k.to_string())
def test_labelstyle(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>Document.kml</name>
<open>1</open>
<Style id="exampleStyleDocument">
<LabelStyle>
<color>ff0000cc</color>
</LabelStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(list(k.features())[0].styles())[0], styles.Style))
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.LabelStyle))
self.assertEqual(style.color, 'ff0000cc')
self.assertEqual(style.colorMode, None)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_iconstyle(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<Style id="randomColorIcon">
<IconStyle>
<color>ff00ff00</color>
<colorMode>random</colorMode>
<scale>1.1</scale>
<heading>0</heading>
<Icon>
<href>http://maps.google.com/icon21.png</href>
</Icon>
</IconStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list((k.features()))), 1)
self.assertTrue(
isinstance(list(list(k.features())[0].styles())[0], styles.Style))
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.IconStyle))
self.assertEqual(style.color, 'ff00ff00')
self.assertEqual(style.scale, 1.1)
self.assertEqual(style.colorMode, 'random')
self.assertEqual(style.heading, 0.0)
self.assertEqual(style.icon_href, 'http://maps.google.com/icon21.png')
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_linestyle(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>LineStyle.kml</name>
<open>1</open>
<Style id="linestyleExample">
<LineStyle>
<color>7f0000ff</color>
<width>4</width>
</LineStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(list(k.features())[0].styles())[0], styles.Style))
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.LineStyle))
self.assertEqual(style.color, '7f0000ff')
self.assertEqual(style.width, 4)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_polystyle(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>PolygonStyle.kml</name>
<open>1</open>
<Style id="examplePolyStyle">
<PolyStyle>
<color>ff0000cc</color>
<colorMode>random</colorMode>
</PolyStyle>
</Style>
</Document>
</kml>"""
# XXX fill and outline
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(list(k.features())[0].styles())[0], styles.Style))
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.PolyStyle))
self.assertEqual(style.color, 'ff0000cc')
self.assertEqual(style.colorMode, 'random')
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_polystyle_float_fill(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>PolygonStyle.kml</name>
<open>1</open>
<Style id="examplePolyStyle">
<PolyStyle>
<fill>0.0</fill>
</PolyStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.PolyStyle))
self.assertEqual(style.fill, 0)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_polystyle_float_outline(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>PolygonStyle.kml</name>
<open>1</open>
<Style id="examplePolyStyle">
<PolyStyle>
<outline>0.0</outline>
</PolyStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
style = list(list(list(k.features())[0].styles())[0].styles())[0]
self.assertTrue(isinstance(style, styles.PolyStyle))
self.assertEqual(style.outline, 0)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_styles(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<!-- Begin Style Definitions -->
<Style id="myDefaultStyles">
<IconStyle>
<color>a1ff00ff</color>
<scale>1.399999976158142</scale>
<Icon>
<href>http://myserver.com/icon.jpg</href>
</Icon>
</IconStyle>
<LabelStyle>
<color>7fffaaff</color>
<scale>1.5</scale>
</LabelStyle>
<LineStyle>
<color>ff0000ff</color>
<width>15</width>
</LineStyle>
<PolyStyle>
<color>7f7faaaa</color>
<colorMode>random</colorMode>
</PolyStyle>
</Style>
<!-- End Style Definitions -->
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(list(list(k.features())[0].styles())[0], styles.Style))
style = list(list(list(k.features())[0].styles())[0].styles())
self.assertEqual(len(style), 4)
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_stylemapurl(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<StyleMap id="styleMapExample">
<Pair>
<key>normal</key>
<styleUrl>#normalState</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#highlightState</styleUrl>
</Pair>
</StyleMap>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(
list(list(k.features())[0].styles())[0], styles.StyleMap))
sm = list(list(list(k.features())[0].styles()))[0]
self.assertTrue(isinstance(sm.normal, styles.StyleUrl))
self.assertEqual(sm.normal.url, '#normalState')
self.assertTrue(isinstance(sm.highlight, styles.StyleUrl))
self.assertEqual(sm.highlight.url, '#highlightState')
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_stylemapstyles(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<StyleMap id="styleMapExample">
<Pair>
<key>normal</key>
<Style id="exampleStyleDocument">
<LabelStyle>
<color>ff0000cc</color>
</LabelStyle>
</Style>
</Pair>
<Pair>
<key>highlight</key>
<Style id="examplePolyStyle">
<PolyStyle>
<color>ff0000cc</color>
<colorMode>random</colorMode>
</PolyStyle>
<LineStyle>
<color>ff0000ff</color>
<width>15</width>
</LineStyle>
</Style>
</Pair>
</StyleMap>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
self.assertTrue(
isinstance(
list(list(k.features())[0].styles())[0], styles.StyleMap))
sm = list(list(list(k.features())[0].styles()))[0]
self.assertTrue(isinstance(sm.normal, styles.Style))
self.assertEqual(len(list(sm.normal.styles())), 1)
self.assertTrue(
isinstance(list(sm.normal.styles())[0], styles.LabelStyle))
self.assertTrue(isinstance(sm.highlight, styles.Style))
self.assertTrue(isinstance(sm.highlight, styles.Style))
self.assertEqual(len(list(sm.highlight.styles())), 2)
self.assertTrue(
isinstance(list(sm.highlight.styles())[0], styles.LineStyle))
self.assertTrue(
isinstance(list(sm.highlight.styles())[1], styles.PolyStyle))
k2 = kml.KML()
k2.from_string(k.to_string())
self.assertEqual(k.to_string(), k2.to_string())
def test_get_style_by_url(self):
doc = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>Document.kml</name>
<open>1</open>
<Style id="exampleStyleDocument">
<LabelStyle>
<color>ff0000cc</color>
</LabelStyle>
</Style>
<StyleMap id="styleMapExample">
<Pair>
<key>normal</key>
<styleUrl>#normalState</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#highlightState</styleUrl>
</Pair>
</StyleMap>
<Style id="linestyleExample">
<LineStyle>
<color>7f0000ff</color>
<width>4</width>
</LineStyle>
</Style>
</Document>
</kml>"""
k = kml.KML()
k.from_string(doc)
self.assertEqual(len(list(k.features())), 1)
document = list(k.features())[0]
style = document.get_style_by_url(
'http://localhost:8080/somepath#exampleStyleDocument')
self.assertTrue(isinstance(list(style.styles())[0], styles.LabelStyle))
style = document.get_style_by_url('somepath#linestyleExample')
self.assertTrue(isinstance(list(style.styles())[0], styles.LineStyle))
style = document.get_style_by_url('#styleMapExample')
self.assertTrue(isinstance(style, styles.StyleMap))
class DateTimeTestCase(unittest.TestCase):
def test_timestamp(self):
now = datetime.datetime.now()
ts = kml.TimeStamp(timestamp=now)
self.assertEqual(ts.timestamp, [now, 'dateTime'])
self.assertTrue('TimeStamp>' in str(ts.to_string()))
self.assertTrue('when>' in str(ts.to_string()))
self.assertTrue(now.isoformat() in str(ts.to_string()))
y2k = datetime.date(2000, 1, 1)
ts = kml.TimeStamp(timestamp=y2k)
self.assertEqual(ts.timestamp, [y2k, 'date'])
self.assertTrue('2000-01-01' in str(ts.to_string()))
def test_timestamp_resolution(self):
now = datetime.datetime.now()
ts = kml.TimeStamp(timestamp=now)
self.assertTrue(now.isoformat() in str(ts.to_string()))
ts.timestamp[1] = 'date'
self.assertTrue(now.date().isoformat() in str(ts.to_string()))
self.assertFalse(now.isoformat() in str(ts.to_string()))
year = str(now.year)
ym = now.strftime('%Y-%m')
ts.timestamp[1] = 'gYearMonth'
self.assertTrue(ym in str(ts.to_string()))
self.assertFalse(now.date().isoformat() in str(ts.to_string()))
ts.timestamp[1] = 'gYear'
self.assertTrue(year in str(ts.to_string()))
self.assertFalse(ym in str(ts.to_string()))
ts.timestamp = None
self.assertRaises(TypeError, ts.to_string)
def test_timespan(self):
now = datetime.datetime.now()
y2k = datetime.datetime(2000, 1, 1)
ts = kml.TimeSpan(end=now, begin=y2k)
self.assertEqual(ts.end, [now, 'dateTime'])
self.assertEqual(ts.begin, [y2k, 'dateTime'])
self.assertTrue('TimeSpan>' in str(ts.to_string()))
self.assertTrue('begin>' in str(ts.to_string()))
self.assertTrue('end>' in str(ts.to_string()))
self.assertTrue(now.isoformat() in str(ts.to_string()))
self.assertTrue(y2k.isoformat() in str(ts.to_string()))
ts.end = None
self.assertFalse(now.isoformat() in str(ts.to_string()))
self.assertTrue(y2k.isoformat() in str(ts.to_string()))
ts.begin = None
self.assertRaises(ValueError, ts.to_string)
def test_feature_timestamp(self):
now = datetime.datetime.now()
f = kml.Document()
f.timeStamp = now
self.assertEqual(f.timeStamp, now)
self.assertTrue(now.isoformat() in str(f.to_string()))
self.assertTrue('TimeStamp>' in str(f.to_string()))
self.assertTrue('when>' in str(f.to_string()))
f.timeStamp = now.date()
self.assertTrue(now.date().isoformat() in str(f.to_string()))
self.assertFalse(now.isoformat() in str(f.to_string()))
f.timeStamp = None
self.assertFalse('TimeStamp>' in str(f.to_string()))
def test_feature_timespan(self):
now = datetime.datetime.now()
y2k = datetime.date(2000, 1, 1)
f = kml.Document()
f.begin = y2k
f.end = now
self.assertEqual(f.begin, y2k)
self.assertEqual(f.end, now)
self.assertTrue(now.isoformat() in str(f.to_string()))
self.assertTrue('2000-01-01' in str(f.to_string()))
self.assertTrue('TimeSpan>' in str(f.to_string()))
self.assertTrue('begin>' in str(f.to_string()))
self.assertTrue('end>' in str(f.to_string()))
f.end = None
self.assertFalse(now.isoformat() in str(f.to_string()))
self.assertTrue('2000-01-01' in str(f.to_string()))
self.assertTrue('TimeSpan>' in str(f.to_string()))
self.assertTrue('begin>' in str(f.to_string()))
self.assertFalse('end>' in str(f.to_string()))
f.begin = None
self.assertFalse('TimeSpan>' in str(f.to_string()))
def test_feature_timespan_stamp(self):
now = datetime.datetime.now()
y2k = datetime.date(2000, 1, 1)
f = kml.Document()
f.begin = y2k
f.end = now
self.assertTrue(now.isoformat() in str(f.to_string()))
self.assertTrue('2000-01-01' in str(f.to_string()))
self.assertTrue('TimeSpan>' in str(f.to_string()))
self.assertTrue('begin>' in str(f.to_string()))
self.assertTrue('end>' in str(f.to_string()))
self.assertFalse('TimeStamp>' in str(f.to_string()))
self.assertFalse('when>' in str(f.to_string()))
# when we set a timestamp an existing timespan will be deleted
f.timeStamp = now
self.assertTrue(now.isoformat() in str(f.to_string()))
self.assertTrue('TimeStamp>' in str(f.to_string()))
self.assertTrue('when>' in str(f.to_string()))
self.assertFalse('2000-01-01' in str(f.to_string()))
self.assertFalse('TimeSpan>' in str(f.to_string()))
self.assertFalse('begin>' in str(f.to_string()))
self.assertFalse('end>' in str(f.to_string()))
# when we set a timespan an existing timestamp will be deleted
f.end = y2k
self.assertFalse(now.isoformat() in str(f.to_string()))
self.assertTrue('2000-01-01' in str(f.to_string()))
self.assertTrue('TimeSpan>' in str(f.to_string()))
self.assertFalse('begin>' in str(f.to_string()))
self.assertTrue('end>' in str(f.to_string()))
self.assertFalse('TimeStamp>' in str(f.to_string()))
self.assertFalse('when>' in str(f.to_string()))
# We manipulate our Feature so it has timespan and stamp
ts = kml.TimeStamp(timestamp=now)
f._time_stamp = ts
# this raises an exception as only either timespan or timestamp
# are allowed not both
self.assertRaises(ValueError, f.to_string)
def test_read_timestamp(self):
ts = kml.TimeStamp(ns='')
doc = """
<TimeStamp>
<when>1997</when>
</TimeStamp>
"""
ts.from_string(doc)
self.assertEqual(ts.timestamp[1], 'gYear')
self.assertEqual(ts.timestamp[0], datetime.datetime(1997, 1, 1, 0, 0))
doc = """
<TimeStamp>
<when>1997-07</when>
</TimeStamp>
"""
ts.from_string(doc)
self.assertEqual(ts.timestamp[1], 'gYearMonth')
self.assertEqual(ts.timestamp[0], datetime.datetime(1997, 7, 1, 0, 0))
doc = """
<TimeStamp>
<when>199808</when>
</TimeStamp>
"""
ts.from_string(doc)
self.assertEqual(ts.timestamp[1], 'gYearMonth')
self.assertEqual(ts.timestamp[0], datetime.datetime(1998, 8, 1, 0, 0))
doc = """
<TimeStamp>
<when>1997-07-16</when>
</TimeStamp>
"""
ts.from_string(doc)
self.assertEqual(ts.timestamp[1], 'date')
self.assertEqual(ts.timestamp[0], datetime.datetime(1997, 7, 16, 0, 0))
# dateTime (YYYY-MM-DDThh:mm:ssZ)
# Here, T is the separator between the calendar and the hourly notation
# of time, and Z indicates UTC. (Seconds are required.)
doc = """
<TimeStamp>
<when>1997-07-16T07:30:15Z</when>
</TimeStamp>
"""
ts.from_string(doc)
self.assertEqual(ts.timestamp[1], 'dateTime')
self.assertEqual(ts.timestamp[0], datetime.datetime(
1997, 7, 16, 7, 30, 15,
tzinfo=tzutc()))
doc = """
<TimeStamp>
<when>1997-07-16T10:30:15+03:00</when>
</TimeStamp>
"""
ts.from_string(doc)
self.assertEqual(ts.timestamp[1], 'dateTime')
self.assertEqual(ts.timestamp[0], datetime.datetime(
1997, 7, 16, 10, 30, 15,
tzinfo=tzoffset(None, 10800)))
def test_read_timespan(self):
ts = kml.TimeSpan(ns='')
doc = """
<TimeSpan>
<begin>1876-08-01</begin>
<end>1997-07-16T07:30:15Z</end>
</TimeSpan>
"""
ts.from_string(doc)
self.assertEqual(ts.begin[1], 'date')
self.assertEqual(ts.begin[0], datetime.datetime(1876, 8, 1, 0, 0))
self.assertEqual(ts.end[1], 'dateTime')
self.assertEqual(ts.end[0], datetime.datetime(
1997, 7, 16, 7, 30, 15,
tzinfo=tzutc()))
def test_featurefromstring(self):
d = kml.Document(ns='')
doc = """<Document>
<name>Document.kml</name>
<open>1</open>
<TimeStamp>
<when>1997-07-16T10:30:15+03:00</when>
</TimeStamp>
<TimeSpan>
<begin>1876-08-01</begin>
<end>1997-07-16T07:30:15Z</end>
</TimeSpan>
</Document>"""
d.from_string(doc)
class AtomTestCase(unittest.TestCase):
def test_author(self):
a = atom.Author(name="Christian Ledermann")
self.assertEqual(a.name, "Christian Ledermann")
a.uri = 'http://iwlearn.net'
a.email = '[email protected]'
self.assertTrue("Christian Ledermann" in str(a.to_string()))
self.assertTrue('http://iwlearn.net' in str(a.to_string()))
self.assertTrue('[email protected]' in str(a.to_string()))
self.assertTrue('name>' in str(a.to_string()))
self.assertTrue('uri>' in str(a.to_string()))
self.assertTrue('email>' in str(a.to_string()))
# print (a.to_string())
a.email = 'christian'
self.assertFalse('email>' in str(a.to_string()))
a2 = atom.Author()
a2.from_string(a.to_string())
self.assertEqual(a.to_string(), a2.to_string())
def test_link(self):
l = atom.Link(href="http://localhost/", rel="alternate")
self.assertEqual(l.href, "http://localhost/")
self.assertEqual(l.rel, "alternate")
l.title = "Title"
l.type = "text/html"
l.hreflang = 'en'
l.length = "4096"
self.assertTrue('href="http://localhost/"' in str(l.to_string()))
self.assertTrue('rel="alternate"' in str(l.to_string()))
self.assertTrue('title="Title"' in str(l.to_string()))
self.assertTrue('hreflang="en"' in str(l.to_string()))
self.assertTrue('type="text/html"' in str(l.to_string()))
self.assertTrue('length="4096"' in str(l.to_string()))
self.assertTrue('link' in str(l.to_string()))
self.assertTrue('="http://www.w3.org/2005/Atom"' in str(l.to_string()))
l2 = atom.Link()
l2.from_string(l.to_string())
self.assertEqual(l.to_string(), l2.to_string())
l.href = None
self.assertRaises(ValueError, l.to_string)
class SetGeometryTestCase(unittest.TestCase):
def test_altitude_mode(self):
geom = Geometry()
geom.geometry = Point(0, 1)
self.assertEqual(geom.altitude_mode, None)
self.assertFalse('altitudeMode' in str(geom.to_string()))
geom.altitude_mode = 'unknown'
self.assertRaises(AssertionError, geom.to_string)
geom.altitude_mode = 'clampToSeaFloor'
self.assertRaises(AssertionError, geom.to_string)
geom.altitude_mode = 'relativeToSeaFloor'
self.assertRaises(AssertionError, geom.to_string)
geom.altitude_mode = 'clampToGround'
self.assertFalse('altitudeMode' in str(geom.to_string()))
geom.altitude_mode = 'relativeToGround'
self.assertTrue(
'altitudeMode>relativeToGround</' in str(geom.to_string()))
geom.altitude_mode = 'absolute'
self.assertTrue('altitudeMode>absolute</' in str(geom.to_string()))
def test_extrude(self):
geom = Geometry()
self.assertEqual(geom.extrude, False)
geom.geometry = Point(0, 1)
geom.extrude = False
self.assertFalse('extrude' in str(geom.to_string()))
geom.extrude = True
geom.altitude_mode = 'clampToGround'
self.assertFalse('extrude' in str(geom.to_string()))
geom.altitude_mode = 'relativeToGround'
self.assertTrue('extrude>1</' in str(geom.to_string()))
geom.altitude_mode = 'absolute'
self.assertTrue('extrude>1</' in str(geom.to_string()))
def test_tesselate(self):
geom = Geometry()
self.assertEqual(geom.tessellate, False)
geom.geometry = LineString([(0, 0), (1, 1)])
self.assertFalse('tessellate' in str(geom.to_string()))
geom.altitude_mode = 'clampToGround'
self.assertFalse('tessellate' in str(geom.to_string()))
geom.altitude_mode = 'relativeToGround'
self.assertFalse('tessellate' in str(geom.to_string()))
geom.altitude_mode = 'absolute'
self.assertFalse('tessellate' in str(geom.to_string()))
geom.tessellate = True
geom.altitude_mode = None
self.assertFalse('tessellate' in str(geom.to_string()))
geom.altitude_mode = 'relativeToGround'
self.assertFalse('tessellate' in str(geom.to_string()))
geom.altitude_mode = 'absolute'
self.assertFalse('tessellate' in str(geom.to_string()))
geom.altitude_mode = 'clampToGround'
self.assertTrue('tessellate>1</' in str(geom.to_string()))
# for geometries != LineString tesselate is ignored
geom.geometry = Point(0, 1)
self.assertFalse('tessellate' in str(geom.to_string()))
geom.geometry = Polygon([(0, 0), (1, 0), (1, 1), (0, 0)])
self.assertFalse('tessellate' in str(geom.to_string()))
def test_point(self):
p = Point(0, 1)
g = Geometry(geometry=p)
self.assertEqual(g.geometry, p)
g = Geometry(geometry=p.__geo_interface__)
self.assertEqual(g.geometry.__geo_interface__, p.__geo_interface__)
self.assertTrue('Point' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,1.000000</' in str(g.to_string()))
def test_linestring(self):
l = LineString([(0, 0), (1, 1)])
g = Geometry(geometry=l)
self.assertEqual(g.geometry, l)
self.assertTrue('LineString' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,0.000000 1.000000,1.000000</' in
str(g.to_string()))
g2 = Geometry()
g2.from_string(g.to_string())
self.assertEqual(g.to_string(), g2.to_string())
def test_linearring(self):
l = LinearRing([(0, 0), (1, 0), (1, 1), (0, 0)])
g = Geometry(geometry=l)
self.assertEqual(g.geometry, l)
self.assertTrue('LinearRing' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</'
in str(g.to_string()))
def test_polygon(self):
# without holes
l = Polygon([(0, 0), (1, 0), (1, 1), (0, 0)])
g = Geometry(geometry=l)
self.assertEqual(g.geometry, l)
self.assertTrue('Polygon' in str(g.to_string()))
self.assertTrue('outerBoundaryIs' in str(g.to_string()))
self.assertFalse('innerBoundaryIs' in str(g.to_string()))
self.assertTrue('LinearRing' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</'
in str(g.to_string()))
# with holes
p = Polygon(
[(-1, -1), (2, -1), (2, 2), (-1, -1)], [[(0, 0), (1, 0), (1, 1),
(0, 0)]], )
g = Geometry(geometry=p)
self.assertEqual(g.geometry, p)
self.assertTrue('Polygon' in str(g.to_string()))
self.assertTrue('outerBoundaryIs' in str(g.to_string()))
self.assertTrue('innerBoundaryIs' in str(g.to_string()))
self.assertTrue('LinearRing' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</'
in str(g.to_string()))
self.assertTrue(
'coordinates>-1.000000,-1.000000 2.000000,-1.000000 2.000000,2.000000 -1.000000,-1.000000</'
in str(g.to_string()))
def test_multipoint(self):
p0 = Point(0, 1)
p1 = Point(1, 1)
g = Geometry(geometry=MultiPoint([p0, p1]))
self.assertTrue('MultiGeometry' in str(g.to_string()))
self.assertTrue('Point' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,1.000000</' in str(g.to_string()))
self.assertTrue(
'coordinates>1.000000,1.000000</' in str(g.to_string()))
def test_multilinestring(self):
l0 = LineString([(0, 0), (1, 0)])
l1 = LineString([(0, 1), (1, 1)])
g = Geometry(geometry=MultiLineString([l0, l1]))
self.assertTrue('MultiGeometry' in str(g.to_string()))
self.assertTrue('LineString' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,0.000000 1.000000,0.000000</' in
str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,1.000000 1.000000,1.000000</' in
str(g.to_string()))
def test_multipolygon(self):
# with holes
p0 = Polygon(
[(-1, -1), (2, -1), (2, 2), (-1, -1)], [[(0, 0), (1, 0), (1, 1),
(0, 0)]])
# without holes
p1 = Polygon([(3, 0), (4, 0), (4, 1), (3, 0)])
g = Geometry(geometry=MultiPolygon([p0, p1]))
self.assertTrue('MultiGeometry' in str(g.to_string()))
self.assertTrue('Polygon' in str(g.to_string()))
self.assertTrue('outerBoundaryIs' in str(g.to_string()))
self.assertTrue('innerBoundaryIs' in str(g.to_string()))
self.assertTrue('LinearRing' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</'
in str(g.to_string()))
self.assertTrue(
'coordinates>-1.000000,-1.000000 2.000000,-1.000000 2.000000,2.000000 -1.000000,-1.000000</'
in str(g.to_string()))
self.assertTrue(
'coordinates>3.000000,0.000000 4.000000,0.000000 4.000000,1.000000 3.000000,0.000000</'
in str(g.to_string()))
def test_geometrycollection(self):
po = Polygon([(3, 0), (4, 0), (4, 1), (3, 0)])
lr = LinearRing([(0, -1), (1, -1), (1, 1), (0, -1)])
ls = LineString([(0, 0), (1, 1)])
p = Point(0, 1)
# geo_if = {'type': 'GeometryCollection', 'geometries': [
# po.__geo_interface__, p.__geo_interface__,
# ls.__geo_interface__, lr.__geo_interface__]}
g = Geometry(geometry=GeometryCollection([po, p, ls, lr]))
# g1 = Geometry(geometry=as_shape(geo_if))
# self.assertEqual(g1.__geo_interface__, g.__geo_interface__)
self.assertTrue('MultiGeometry' in str(g.to_string()))
self.assertTrue('Polygon' in str(g.to_string()))
self.assertTrue('outerBoundaryIs' in str(g.to_string()))
self.assertFalse('innerBoundaryIs' in str(g.to_string()))
self.assertTrue('LinearRing' in str(g.to_string()))
self.assertTrue(
'coordinates>3.000000,0.000000 4.000000,0.000000 4.000000,1.000000 3.000000,0.000000</'
in str(g.to_string()))
self.assertTrue('LineString' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,0.000000 1.000000,1.000000</' in
str(g.to_string()))
self.assertTrue('Point' in str(g.to_string()))
self.assertTrue(
'coordinates>0.000000,1.000000</' in str(g.to_string()))
class GetGeometryTestCase(unittest.TestCase):
def test_altitude_mode(self):
doc = """<kml:Point xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:coordinates>0.000000,1.000000</kml:coordinates>
<kml:altitudeMode>clampToGround</kml:altitudeMode>
</kml:Point>"""
g = Geometry()
self.assertEqual(g.altitude_mode, None)
g.from_string(doc)
self.assertEqual(g.altitude_mode, 'clampToGround')
def test_extrude(self):
doc = """<kml:Point xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:coordinates>0.000000,1.000000</kml:coordinates>
<kml:extrude>1</kml:extrude>
</kml:Point>"""
g = Geometry()
self.assertEqual(g.extrude, False)
g.from_string(doc)
self.assertEqual(g.extrude, True)
def test_tesselate(self):
doc = """<kml:Point xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:coordinates>0.000000,1.000000</kml:coordinates>
<kml:tessellate>1</kml:tessellate>
</kml:Point>"""
g = Geometry()
self.assertEqual(g.tessellate, False)
g.from_string(doc)
self.assertEqual(g.tessellate, True)
def test_point(self):
doc = """<kml:Point xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:coordinates>0.000000,1.000000</kml:coordinates>
</kml:Point>"""
g = Geometry()
g.from_string(doc)
self.assertEqual(
g.geometry.__geo_interface__,
{'type': 'Point',
'coordinates': (0.0, 1.0)})
def test_linestring(self):
doc = """<kml:LineString xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:coordinates>0.000000,0.000000 1.000000,1.000000</kml:coordinates>
</kml:LineString>"""
g = Geometry()
g.from_string(doc)
self.assertEqual(
g.geometry.__geo_interface__,
{'type': 'LineString',
'coordinates': ((0.0, 0.0), (1.0, 1.0))})
def test_linearring(self):
doc = """<kml:LinearRing xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</kml:coordinates>
</kml:LinearRing>
"""
g = Geometry()
g.from_string(doc)
self.assertEqual(
g.geometry.__geo_interface__, {
'type': 'LinearRing',
'coordinates': ((0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 0.0))
})
def test_polygon(self):
doc = """<kml:Polygon xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:outerBoundaryIs>
<kml:LinearRing>
<kml:coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</kml:coordinates>
</kml:LinearRing>
</kml:outerBoundaryIs>
</kml:Polygon>
"""
g = Geometry()
g.from_string(doc)
self.assertEqual(
g.geometry.__geo_interface__, {
'type': 'Polygon',
'coordinates': ((
(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 0.0)
), )
})
doc = """<kml:Polygon xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:outerBoundaryIs>
<kml:LinearRing>
<kml:coordinates>-1.000000,-1.000000 2.000000,-1.000000 2.000000,2.000000 -1.000000,-1.000000</kml:coordinates>
</kml:LinearRing>
</kml:outerBoundaryIs>
<kml:innerBoundaryIs>
<kml:LinearRing>
<kml:coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</kml:coordinates>
</kml:LinearRing>
</kml:innerBoundaryIs>
</kml:Polygon>
"""
g.from_string(doc)
self.assertEqual(
g.geometry.__geo_interface__, {
'type': 'Polygon',
'coordinates': (
((-1.0, -1.0), (2.0, -1.0), (2.0, 2.0),
(-1.0, -1.0)), ((0.0, 0.0), (1.0, 0.0), (1.0, 1.0),
(0.0, 0.0)),
)
})
def test_multipoint(self):
doc = """
<kml:MultiGeometry xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:Point>
<kml:coordinates>0.000000,1.000000</kml:coordinates>
</kml:Point>
<kml:Point>
<kml:coordinates>1.000000,1.000000</kml:coordinates>
</kml:Point>
</kml:MultiGeometry>
"""
g = Geometry()
g.from_string(doc)
self.assertEqual(len(g.geometry), 2)
def test_multilinestring(self):
doc = """
<kml:MultiGeometry xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:LineString>
<kml:coordinates>0.000000,0.000000 1.000000,0.000000</kml:coordinates>
</kml:LineString>
<kml:LineString>
<kml:coordinates>0.000000,1.000000 1.000000,1.000000</kml:coordinates>
</kml:LineString>
</kml:MultiGeometry>
"""
g = Geometry()
g.from_string(doc)
self.assertEqual(len(g.geometry), 2)
def test_multipolygon(self):
doc = """
<kml:MultiGeometry xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:Polygon>
<kml:outerBoundaryIs>
<kml:LinearRing>
<kml:coordinates>-1.000000,-1.000000 2.000000,-1.000000 2.000000,2.000000 -1.000000,-1.000000</kml:coordinates>
</kml:LinearRing>
</kml:outerBoundaryIs>
<kml:innerBoundaryIs>
<kml:LinearRing>
<kml:coordinates>0.000000,0.000000 1.000000,0.000000 1.000000,1.000000 0.000000,0.000000</kml:coordinates>
</kml:LinearRing>
</kml:innerBoundaryIs>
</kml:Polygon>
<kml:Polygon>
<kml:outerBoundaryIs>
<kml:LinearRing>
<kml:coordinates>3.000000,0.000000 4.000000,0.000000 4.000000,1.000000 3.000000,0.000000</kml:coordinates>
</kml:LinearRing>
</kml:outerBoundaryIs>
</kml:Polygon>
</kml:MultiGeometry>
"""
g = Geometry()
g.from_string(doc)
self.assertEqual(len(g.geometry), 2)
def test_geometrycollection(self):
doc = """
<kml:MultiGeometry xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:Polygon>
<kml:outerBoundaryIs>
<kml:LinearRing>
<kml:coordinates>3,0 4,0 4,1 3,0</kml:coordinates>
</kml:LinearRing>
</kml:outerBoundaryIs>
</kml:Polygon>
<kml:Point>
<kml:coordinates>0.000000,1.000000</kml:coordinates>
</kml:Point>
<kml:LineString>
<kml:coordinates>0.000000,0.000000 1.000000,1.000000</kml:coordinates>
</kml:LineString>
<kml:LinearRing>
<kml:coordinates>0.0,0.0 1.0,0.0 1.0,1.0 0.0,1.0 0.0,0.0</kml:coordinates>
</kml:LinearRing>
</kml:MultiGeometry>
"""
g = Geometry()
g.from_string(doc)
self.assertEqual(len(g.geometry), 4)
doc = """
<kml:MultiGeometry xmlns:kml="http://www.opengis.net/kml/2.2">
<kml:LinearRing>
<kml:coordinates>3.0,0.0 4.0,0.0 4.0,1.0 3.0,0.0</kml:coordinates>
</kml:LinearRing>
<kml:LinearRing>
<kml:coordinates>0.0,0.0 1.0,0.0 1.0,1.0 0.0,0.0</kml:coordinates>
</kml:LinearRing>
</kml:MultiGeometry>
"""
g = Geometry()
g.from_string(doc)
self.assertEqual(len(g.geometry), 2)
self.assertEqual(g.geometry.geom_type, 'GeometryCollection')
class Force3DTestCase(unittest.TestCase):
def setUp(self):
config.FORCE3D = False
def tearDown(self):
# Important: Set FORCE3D back to False!
config.FORCE3D = False
def test3d(self):
config.FORCE3D = True
ns = ''
p2 = kml.Placemark(ns, 'id', 'name', 'description')
p2.geometry = Polygon([(0, 0), (1, 1), (1, 0)])
p3 = kml.Placemark(ns, 'id', 'name', 'description')
p3.geometry = Polygon([(0, 0, 0), (1, 1, 0), (1, 0, 0)])
self.assertEqual(p2.to_string(), p3.to_string())
def testno3d(self):
config.FORCE3D = False
ns = ''
p2 = kml.Placemark(ns, 'id', 'name', 'description')
p2.geometry = Polygon([(0, 0), (1, 1), (1, 0)])
p3 = kml.Placemark(ns, 'id', 'name', 'description')
p3.geometry = Polygon([(0, 0, 0), (1, 1, 0), (1, 0, 0)])
self.assertNotEqual(p2.to_string(), p3.to_string())
class BaseFeatureTestCase(unittest.TestCase):
def test_address_string(self):
f = kml._Feature()
address = '1600 Amphitheatre Parkway, Mountain View, CA 94043, USA'
f.address = address
self.assertEqual(f.address, address)
def test_address_none(self):
f = kml._Feature()
f.address = None
self.assertEqual(f.address, None)
def test_address_value_error(self):
f = kml._Feature()
with self.assertRaises(ValueError):
f.address = 123
def test_phone_number_string(self):
f = kml._Feature()
f.phoneNumber = '+1-234-567-8901'
self.assertEqual(f.phoneNumber, '+1-234-567-8901')
def test_phone_number_none(self):
f = kml._Feature()
f.phoneNumber = None
self.assertEqual(f.phoneNumber, None)
def test_phone_number_value_error(self):
f = kml._Feature()
with self.assertRaises(ValueError):
f.phoneNumber = 123
class BaseOverlayTestCase(unittest.TestCase):
def test_color_string(self):
o = kml._Overlay(name='An Overlay')
o.color = '00010203'
self.assertEqual(o.color, '00010203')
def test_color_none(self):
o = kml._Overlay(name='An Overlay')
o.color = '00010203'
self.assertEqual(o.color, '00010203')
o.color = None
self.assertEqual(o.color, None)
def test_color_value_error(self):
o = kml._Overlay(name='An Overlay')
with self.assertRaises(ValueError):
o.color = object()
def test_draw_order_string(self):
o = kml._Overlay(name='An Overlay')
o.drawOrder = '1'
self.assertEqual(o.drawOrder, '1')
def test_draw_order_int(self):
o = kml._Overlay(name='An Overlay')
o.drawOrder = 1
self.assertEqual(o.drawOrder, '1')
def test_draw_order_none(self):
o = kml._Overlay(name='An Overlay')
o.drawOrder = '1'
self.assertEqual(o.drawOrder, '1')
o.drawOrder = None
self.assertEqual(o.drawOrder, None)
def test_draw_order_value_error(self):
o = kml._Overlay(name='An Overlay')
with self.assertRaises(ValueError):
o.drawOrder = object()
def test_icon_without_tag(self):
o = kml._Overlay(name='An Overlay')
o.icon = 'http://example.com/'
self.assertEqual(o.icon, '<href>http://example.com/</href>')
def test_icon_with_open_tag(self):
o = kml._Overlay(name='An Overlay')
o.icon = '<href>http://example.com/'
self.assertEqual(o.icon, '<href>http://example.com/</href>')
def test_icon_with_close_tag(self):
o = kml._Overlay(name='An Overlay')
o.icon = 'http://example.com/</href>'
self.assertEqual(o.icon, '<href>http://example.com/</href>')
def test_icon_with_tag(self):
o = kml._Overlay(name='An Overlay')
o.icon = '<href>http://example.com/</href>'
self.assertEqual(o.icon, '<href>http://example.com/</href>')
def test_icon_to_none(self):
o = kml._Overlay(name='An Overlay')
o.icon = '<href>http://example.com/</href>'
self.assertEqual(o.icon, '<href>http://example.com/</href>')
o.icon = None
self.assertEqual(o.icon, None)
def test_icon_raise_exception(self):
o = kml._Overlay(name='An Overlay')
with self.assertRaises(ValueError):
o.icon = 12345
class GroundOverlayTestCase(unittest.TestCase):
def setUp(self):
self.g = kml.GroundOverlay()
def test_altitude_int(self):
self.g.altitude = 123
self.assertEqual(self.g.altitude, '123')
def test_altitude_float(self):
self.g.altitude = 123.4
self.assertEqual(self.g.altitude, '123.4')
def test_altitude_string(self):
self.g.altitude = '123'
self.assertEqual(self.g.altitude, '123')
def test_altitude_value_error(self):
with self.assertRaises(ValueError):
self.g.altitude = object()
def test_altitude_none(self):
self.g.altitude = '123'
self.assertEqual(self.g.altitude, '123')
self.g.altitude = None
self.assertEqual(self.g.altitude, None)
def test_altitude_mode_default(self):
self.assertEqual(self.g.altitudeMode, 'clampToGround')
def test_altitude_mode_error(self):
self.g.altitudeMode = ''
self.assertEqual(self.g.altitudeMode, 'clampToGround')
def test_altitude_mode_clamp(self):
self.g.altitudeMode = 'clampToGround'
self.assertEqual(self.g.altitudeMode, 'clampToGround')
def test_altitude_mode_absolute(self):
self.g.altitudeMode = 'absolute'
self.assertEqual(self.g.altitudeMode, 'absolute')
def test_latlonbox_function(self):
self.g.latLonBox(10, 20, 30, 40, 50)
self.assertEqual(self.g.north, '10')
self.assertEqual(self.g.south, '20')
self.assertEqual(self.g.east, '30')
self.assertEqual(self.g.west, '40')
self.assertEqual(self.g.rotation, '50')
def test_latlonbox_string(self):
self.g.north = '10'
self.g.south = '20'
self.g.east = '30'
self.g.west = '40'
self.g.rotation = '50'
self.assertEqual(self.g.north, '10')
self.assertEqual(self.g.south, '20')
self.assertEqual(self.g.east, '30')
self.assertEqual(self.g.west, '40')
self.assertEqual(self.g.rotation, '50')
def test_latlonbox_int(self):
self.g.north = 10
self.g.south = 20
self.g.east = 30
self.g.west = 40
self.g.rotation = 50
self.assertEqual(self.g.north, '10')
self.assertEqual(self.g.south, '20')
self.assertEqual(self.g.east, '30')
self.assertEqual(self.g.west, '40')
self.assertEqual(self.g.rotation, '50')
def test_latlonbox_float(self):
self.g.north = 10.0
self.g.south = 20.0
self.g.east = 30.0
self.g.west = 40.0
self.g.rotation = 50.0
self.assertEqual(self.g.north, '10.0')
self.assertEqual(self.g.south, '20.0')
self.assertEqual(self.g.east, '30.0')
self.assertEqual(self.g.west, '40.0')
self.assertEqual(self.g.rotation, '50.0')
def test_latlonbox_value_error(self):
with self.assertRaises(ValueError):
self.g.north = object()
with self.assertRaises(ValueError):
self.g.south = object()
with self.assertRaises(ValueError):
self.g.east = object()
with self.assertRaises(ValueError):
self.g.west = object()
with self.assertRaises(ValueError):
self.g.rotation = object()
self.assertEqual(self.g.north, None)
self.assertEqual(self.g.south, None)
self.assertEqual(self.g.east, None)
self.assertEqual(self.g.west, None)
self.assertEqual(self.g.rotation, None)
def test_latlonbox_empty_string(self):
self.g.north = ''
self.g.south = ''
self.g.east = ''
self.g.west = ''
self.g.rotation = ''
self.assertEqual(self.g.north, '')
self.assertEqual(self.g.south, '')
self.assertEqual(self.g.east, '')
self.assertEqual(self.g.west, '')
self.assertEqual(self.g.rotation, '')
def test_latlonbox_none(self):
self.g.north = None
self.g.south = None
self.g.east = None
self.g.west = None
self.g.rotation = None
self.assertEqual(self.g.north, None)
self.assertEqual(self.g.south, None)
self.assertEqual(self.g.east, None)
self.assertEqual(self.g.west, None)
self.assertEqual(self.g.rotation, None)
class GroundOverlayStringTestCase(unittest.TestCase):
def test_default_to_string(self):
g = kml.GroundOverlay()
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_to_string(self):
g = kml.GroundOverlay()
g.icon = 'http://example.com'
g.drawOrder = 1
g.color = '00010203'
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:color>00010203</kml:color>'
'<kml:drawOrder>1</kml:drawOrder>'
'<kml:icon><href>http://example.com</href></kml:icon>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_altitude_from_int(self):
g = kml.GroundOverlay()
g.altitude = 123
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:altitude>123</kml:altitude>'
'<kml:altitudeMode>clampToGround</kml:altitudeMode>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_altitude_from_float(self):
g = kml.GroundOverlay()
g.altitude = 123.4
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:altitude>123.4</kml:altitude>'
'<kml:altitudeMode>clampToGround</kml:altitudeMode>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_altitude_from_string(self):
g = kml.GroundOverlay()
g.altitude = '123.4'
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:altitude>123.4</kml:altitude>'
'<kml:altitudeMode>clampToGround</kml:altitudeMode>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_altitude_mode_absolute(self):
g = kml.GroundOverlay()
g.altitude = '123.4'
g.altitudeMode = 'absolute'
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:altitude>123.4</kml:altitude>'
'<kml:altitudeMode>absolute</kml:altitudeMode>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_altitude_mode_unknown_string(self):
g = kml.GroundOverlay()
g.altitude = '123.4'
g.altitudeMode = 'unknown string'
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:altitude>123.4</kml:altitude>'
'<kml:altitudeMode>clampToGround</kml:altitudeMode>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_altitude_mode_value(self):
g = kml.GroundOverlay()
g.altitude = '123.4'
g.altitudeMode = 1234
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:altitude>123.4</kml:altitude>'
'<kml:altitudeMode>clampToGround</kml:altitudeMode>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_latlonbox_no_rotation(self):
g = kml.GroundOverlay()
g.latLonBox(10, 20, 30, 40)
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:latLonBox>'
'<kml:north>10</kml:north>'
'<kml:south>20</kml:south>'
'<kml:east>30</kml:east>'
'<kml:west>40</kml:west>'
'<kml:rotation>0</kml:rotation>'
'</kml:latLonBox>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_latlonbox_rotation(self):
g = kml.GroundOverlay()
g.latLonBox(10, 20, 30, 40, 50)
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:latLonBox>'
'<kml:north>10</kml:north>'
'<kml:south>20</kml:south>'
'<kml:east>30</kml:east>'
'<kml:west>40</kml:west>'
'<kml:rotation>50</kml:rotation>'
'</kml:latLonBox>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_latlonbox_nswer(self):
g = kml.GroundOverlay()
g.north = 10
g.south = 20
g.east = 30
g.west = 40
g.rotation = 50
expected = kml.GroundOverlay()
expected.from_string(
'<kml:GroundOverlay xmlns:kml="http://www.opengis.net/kml/2.2">'
'<kml:visibility>1</kml:visibility>'
'<kml:latLonBox>'
'<kml:north>10</kml:north>'
'<kml:south>20</kml:south>'
'<kml:east>30</kml:east>'
'<kml:west>40</kml:west>'
'<kml:rotation>50</kml:rotation>'
'</kml:latLonBox>'
'</kml:GroundOverlay>')
self.assertEqual(g.to_string(), expected.to_string())
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BaseClassesTestCase))
suite.addTest(unittest.makeSuite(BuildKmlTestCase))
suite.addTest(unittest.makeSuite(KmlFromStringTestCase))
suite.addTest(unittest.makeSuite(StyleTestCase))
suite.addTest(unittest.makeSuite(StyleFromStringTestCase))
suite.addTest(unittest.makeSuite(DateTimeTestCase))
suite.addTest(unittest.makeSuite(AtomTestCase))
suite.addTest(unittest.makeSuite(SetGeometryTestCase))
suite.addTest(unittest.makeSuite(GetGeometryTestCase))
suite.addTest(unittest.makeSuite(Force3DTestCase))
suite.addTest(unittest.makeSuite(BaseOverlayTestCase))
suite.addTest(unittest.makeSuite(GroundOverlayTestCase))
return suite
if __name__ == '__main__':
unittest.main() | self.assertEqual(len(list(s.simple_fields)), 0)
self.assertRaises(
TypeError, s.append, ('none', 'Integer', 'An Integer')) |
script.min.js | +function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){return a(b.target).is(this)?b.handleObj.handler.apply(this,arguments):void 0}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.2.0",d.prototype.close=function(b){function c(){f.detach().trigger("closed.bs.alert").remove()}var d=a(this),e=d.attr("data-target");e||(e=d.attr("href"),e=e&&e.replace(/.*(?=#[^\s]*$)/,""));var f=a(e);b&&b.preventDefault(),f.length||(f=d.hasClass("alert")?d:d.parent()),f.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",c).emulateTransitionEnd(150):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.2.0",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),d[e](null==f[b]?this.options[b]:f[b]),setTimeout(a.proxy(function(){"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")&&(c.prop("checked")&&this.$element.hasClass("active")?a=!1:b.find(".active").removeClass("active")),a&&c.prop("checked",!this.$element.hasClass("active")).trigger("change")}a&&this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target);d.hasClass("btn")||(d=d.closest(".btn")),b.call(d,"toggle"),c.preventDefault()})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b).on("keydown.bs.carousel",a.proxy(this.keydown,this)),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=this.sliding=this.interval=this.$active=this.$items=null,"hover"==this.options.pause&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.2.0",c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0},c.prototype.keydown=function(a){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.to=function(b){var c=this,d=this.getItemIndex(this.$active=this.$element.find(".item.active"));return b>this.$items.length-1||0>b?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){c.to(b)}):d==b?this.pause().cycle():this.slide(b>d?"next":"prev",a(this.$items[b]))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){return this.sliding?void 0:this.slide("next")},c.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},c.prototype.slide=function(b,c){var d=this.$element.find(".item.active"),e=c||d[b](),f=this.interval,g="next"==b?"left":"right",h="next"==b?"first":"last",i=this;if(!e.length){if(!this.options.wrap)return;e=this.$element.find(".item")[h]()}if(e.hasClass("active"))return this.sliding=!1;var j=e[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:g});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,f&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(e)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:g});return a.support.transition&&this.$element.hasClass("slide")?(e.addClass(b),e[0].offsetWidth,d.addClass(g),e.addClass(g),d.one("bsTransitionEnd",function(){e.removeClass([b,g].join(" ")).addClass("active"),d.removeClass(["active",g].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(1e3*d.css("transition-duration").slice(0,-1))):(d.removeClass("active"),e.addClass("active"),this.sliding=!1,this.$element.trigger(m)),f&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this},a(document).on("click.bs.carousel.data-api","[data-slide], [data-slide-to]",function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}}),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.collapse"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b);!e&&f.toggle&&"show"==b&&(b=!b),e||d.data("bs.collapse",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.transitioning=null,this.options.parent&&(this.$parent=a(this.options.parent)),this.options.toggle&&this.toggle()};c.VERSION="3.2.0",c.DEFAULTS={toggle:!0},c.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},c.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var c=a.Event("show.bs.collapse");if(this.$element.trigger(c),!c.isDefaultPrevented()){var d=this.$parent&&this.$parent.find("> .panel > .in");if(d&&d.length){var e=d.data("bs.collapse");if(e&&e.transitioning)return;b.call(d,"hide"),e||d.data("bs.collapse",null)}var f=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[f](0),this.transitioning=1;var g=function(){this.$element.removeClass("collapsing").addClass("collapse in")[f](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return g.call(this);var h=a.camelCase(["scroll",f].join("-"));this.$element.one("bsTransitionEnd",a.proxy(g,this)).emulateTransitionEnd(350)[f](this.$element[0][h])}}},c.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse").removeClass("in"),this.transitioning=1;var d=function(){this.transitioning=0,this.$element.trigger("hidden.bs.collapse").removeClass("collapsing").addClass("collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(d,this)).emulateTransitionEnd(350):d.call(this)}}},c.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()};var d=a.fn.collapse;a.fn.collapse=b,a.fn.collapse.Constructor=c,a.fn.collapse.noConflict=function(){return a.fn.collapse=d,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(c){var d,e=a(this),f=e.attr("data-target")||c.preventDefault()||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""),g=a(f),h=g.data("bs.collapse"),i=h?"toggle":e.data(),j=e.attr("data-parent"),k=j&&a(j);h&&h.transitioning||(k&&k.find('[data-toggle="collapse"][data-parent="'+j+'"]').not(e).addClass("collapsed"),e[g.hasClass("in")?"addClass":"removeClass"]("collapsed")),b.call(g,i)})}(jQuery),+function(a){"use strict";function b(b){b&&3===b.which||(a(e).remove(),a(f).each(function(){var d=c(a(this)),e={relatedTarget:this};d.hasClass("open")&&(d.trigger(b=a.Event("hide.bs.dropdown",e)),b.isDefaultPrevented()||d.removeClass("open").trigger("hidden.bs.dropdown",e))}))}function c(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.2.0",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=c(e),g=f.hasClass("open");if(b(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a('<div class="dropdown-backdrop"/>').insertAfter(a(this)).on("click",b);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus"),f.toggleClass("open").trigger("shown.bs.dropdown",h)}return!1}},g.prototype.keydown=function(b){if(/(38|40|27)/.test(b.keyCode)){var d=a(this);if(b.preventDefault(),b.stopPropagation(),!d.is(".disabled, :disabled")){var e=c(d),g=e.hasClass("open");if(!g||g&&27==b.keyCode)return 27==b.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.divider):visible a",i=e.find('[role="menu"]'+h+', [role="listbox"]'+h);if(i.length){var j=i.index(i.filter(":focus"));38==b.keyCode&&j>0&&j--,40==b.keyCode&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",b).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f+', [role="menu"], [role="listbox"]',g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$backdrop=this.isShown=null,this.scrollbarWidth=0,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.2.0",c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var c=this,d=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(d),this.isShown||d.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.$body.addClass("modal-open"),this.setScrollbar(),this.escape(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.backdrop(function(){var d=a.support.transition&&c.$element.hasClass("fade");c.$element.parent().length||c.$element.appendTo(c.$body),c.$element.show().scrollTop(0),d&&c.$element[0].offsetWidth,c.$element.addClass("in").attr("aria-hidden",!1),c.enforceFocus();var e=a.Event("shown.bs.modal",{relatedTarget:b});d?c.$element.find(".modal-dialog").one("bsTransitionEnd",function(){c.$element.trigger("focus").trigger(e)}).emulateTransitionEnd(300):c.$element.trigger("focus").trigger(e)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.$body.removeClass("modal-open"),this.resetScrollbar(),this.escape(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").attr("aria-hidden",!0).off("click.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(300):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keyup.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keyup.dismiss.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var c=this,d=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var e=a.support.transition&&d;if(this.$backdrop=a('<div class="modal-backdrop '+d+'" />').appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus.call(this.$element[0]):this.hide.call(this))},this)),e&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;e?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(150):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var f=function(){c.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",f).emulateTransitionEnd(150):f()}else b&&b()},c.prototype.checkScrollbar=function(){document.body.clientWidth>=window.innerWidth||(this.scrollbarWidth=this.scrollbarWidth||this.measureScrollbar())},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.scrollbarWidth&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right","")},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;(e||"destroy"!=b)&&(e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=this.options=this.enabled=this.timeout=this.hoverState=this.$element=null,this.init("tooltip",a,b)};c.VERSION="3.2.0",c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(this.options.viewport.selector||this.options.viewport);for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show()},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var c=a.contains(document.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!c)return;var d=this,e=this.tip(),f=this.getUID(this.type);this.setContent(),e.attr("id",f),this.$element.attr("aria-describedby",f),this.options.animation&&e.addClass("fade");var g="function"==typeof this.options.placement?this.options.placement.call(this,e[0],this.$element[0]):this.options.placement,h=/\s?auto?\s?/i,i=h.test(g);i&&(g=g.replace(h,"")||"top"),e.detach().css({top:0,left:0,display:"block"}).addClass(g).data("bs."+this.type,this),this.options.container?e.appendTo(this.options.container):e.insertAfter(this.$element);var j=this.getPosition(),k=e[0].offsetWidth,l=e[0].offsetHeight;if(i){var m=g,n=this.$element.parent(),o=this.getPosition(n);g="bottom"==g&&j.top+j.height+l-o.scroll>o.height?"top":"top"==g&&j.top-o.scroll-l<0?"bottom":"right"==g&&j.right+k>o.width?"left":"left"==g&&j.left-k<o.left?"right":g,e.removeClass(m).addClass(g)}var p=this.getCalculatedOffset(g,j,k,l);this.applyPlacement(p,g);var q=function(){d.$element.trigger("shown.bs."+d.type),d.hoverState=null};a.support.transition&&this.$tip.hasClass("fade")?e.one("bsTransitionEnd",q).emulateTransitionEnd(150):q()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top=b.top+g,b.left=b.left+h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=k.left?2*k.left-e+i:2*k.top-f+j,m=k.left?"left":"top",n=k.left?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(l,d[0][n],m)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c,a?50*(1-a/b)+"%":"")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(){function b(){"in"!=c.hoverState&&d.detach(),c.$element.trigger("hidden.bs."+c.type)}var c=this,d=this.tip(),e=a.Event("hide.bs."+this.type);return this.$element.removeAttr("aria-describedby"),this.$element.trigger(e),e.isDefaultPrevented()?void 0:(d.removeClass("in"),a.support.transition&&this.$tip.hasClass("fade")?d.one("bsTransitionEnd",b).emulateTransitionEnd(150):b(),this.hoverState=null,this)},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName;return a.extend({},"function"==typeof c.getBoundingClientRect?c.getBoundingClientRect():null,{scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop(),width:d?a(window).width():b.outerWidth(),height:d?a(window).height():b.outerHeight()},d?{top:0,left:0}:b.offset())},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.width&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){return this.$tip=this.$tip||a(this.options.template)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.validate=function(){this.$element[0].parentNode||(this.hide(),this.$element=null,this.options=null)},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){clearTimeout(this.timeout),this.hide().$element.off("."+this.type).removeData("bs."+this.type)};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;(e||"destroy"!=b)&&(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.2.0",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").empty()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")},c.prototype.tip=function(){return this.$tip||(this.$tip=a(this.options.template)),this.$tip};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){var e=a.proxy(this.process,this);this.$body=a("body"),this.$scrollElement=a(a(c).is("body")?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",e),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.2.0",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b="offset",c=0;a.isWindow(this.$scrollElement[0])||(b="position",c=this.$scrollElement.scrollTop()),this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight();var d=this;this.$body.find(this.selector).map(function(){var d=a(this),e=d.data("target")||d.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[b]().top+c,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){d.offsets.push(this[0]),d.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<=e[0])return g!=(a=f[0])&&this.activate(a);for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(!e[a+1]||b<=e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){this.activeTarget=b,a(this.selector).parentsUntil(this.options.target,".active").removeClass("active");var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.2.0",c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a")[0],f=a.Event("show.bs.tab",{relatedTarget:e});if(b.trigger(f),!f.isDefaultPrevented()){var g=a(d);this.activate(b.closest("li"),c),this.activate(g,g.parent(),function(){b.trigger({type:"shown.bs.tab",relatedTarget:e})})}}},c.prototype.activate=function(b,c,d){function e(){f.removeClass("active").find("> .dropdown-menu > .active").removeClass("active"),b.addClass("active"),g?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu")&&b.closest("li.dropdown").addClass("active"),d&&d()}var f=c.find("> .active"),g=d&&a.support.transition&&f.hasClass("fade");g?f.one("bsTransitionEnd",e).emulateTransitionEnd(150):e(),f.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this},a(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"]',function(c){c.preventDefault(),b.call(a(this),"show")})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=this.unpin=this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.2.0",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=a(document).height(),d=this.$target.scrollTop(),e=this.$element.offset(),f=this.options.offset,g=f.top,h=f.bottom;"object"!=typeof f&&(h=g=f),"function"==typeof g&&(g=f.top(this.$element)),"function"==typeof h&&(h=f.bottom(this.$element));var i=null!=this.unpin&&d+this.unpin<=e.top?!1:null!=h&&e.top+this.$element.height()>=b-h?"bottom":null!=g&&g>=d?"top":!1;if(this.affixed!==i){null!=this.unpin&&this.$element.css("top","");var j="affix"+(i?"-"+i:""),k=a.Event(j+".bs.affix");this.$element.trigger(k),k.isDefaultPrevented()||(this.affixed=i,this.unpin="bottom"==i?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(j).trigger(a.Event(j.replace("affix","affixed"))),"bottom"==i&&this.$element.offset({top:b-this.$element.height()-h}))}}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},d.offsetBottom&&(d.offset.bottom=d.offsetBottom),d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery),window.Modernizr=function(a,b,c){function d(a){s.cssText=a}function e(a,b){return typeof a===b}function f(a,b){return!!~(""+a).indexOf(b)}function g(a,b){for(var d in a){var e=a[d];if(!f(e,"-")&&s[e]!==c)return"pfx"==b?e:!0}return!1}function h(a,b,d){for(var f in a){var g=b[a[f]];if(g!==c)return d===!1?a[f]:e(g,"function")?g.bind(d||b):g}return!1}function i(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),f=(a+" "+u.join(d+" ")+d).split(" ");
| return e(b,"string")||e(b,"undefined")?g(f,b):(f=(a+" "+v.join(d+" ")+d).split(" "),h(f,b,c))}var j,k,l,m="2.6.2",n={},o=!0,p=b.documentElement,q="modernizr",r=b.createElement(q),s=r.style,t=({}.toString,"Webkit Moz O ms"),u=t.split(" "),v=t.toLowerCase().split(" "),w={},x=[],y=x.slice,z={}.hasOwnProperty;l=e(z,"undefined")||e(z.call,"undefined")?function(a,b){return b in a&&e(a.constructor.prototype[b],"undefined")}:function(a,b){return z.call(a,b)},Function.prototype.bind||(Function.prototype.bind=function(a){var b=this;if("function"!=typeof b)throw new TypeError;var c=y.call(arguments,1),d=function(){if(this instanceof d){var e=function(){};e.prototype=b.prototype;var f=new e,g=b.apply(f,c.concat(y.call(arguments)));return Object(g)===g?g:f}return b.apply(a,c.concat(y.call(arguments)))};return d}),w.csstransitions=function(){return i("transition")};for(var A in w)l(w,A)&&(k=A.toLowerCase(),n[k]=w[A](),x.push((n[k]?"":"no-")+k));return n.addTest=function(a,b){if("object"==typeof a)for(var d in a)l(a,d)&&n.addTest(d,a[d]);else{if(a=a.toLowerCase(),n[a]!==c)return n;b="function"==typeof b?b():b,"undefined"!=typeof o&&o&&(p.className+=" "+(b?"":"no-")+a),n[a]=b}return n},d(""),r=j=null,function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=r.elements;return"string"==typeof a?a.split(" "):a}function e(a){var b=q[a[o]];return b||(b={},p++,a[o]=p,q[p]=b),b}function f(a,c,d){if(c||(c=b),k)return c.createElement(a);d||(d=e(c));var f;return f=d.cache[a]?d.cache[a].cloneNode():n.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),f.canHaveChildren&&!m.test(a)?d.frag.appendChild(f):f}function g(a,c){if(a||(a=b),k)return a.createDocumentFragment();c=c||e(a);for(var f=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)f.createElement(h[g]);return f}function h(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return r.shivMethods?f(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/\w+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(r,b.frag)}function i(a){a||(a=b);var d=e(a);return r.shivCSS&&!j&&!d.hasCSS&&(d.hasCSS=!!c(a,"article,aside,figcaption,figure,footer,header,hgroup,nav,section{display:block}mark{background:#FF0;color:#000}")),k||h(a,d),a}var j,k,l=a.html5||{},m=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,n=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,o="_html5shiv",p=0,q={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",j="hidden"in a,k=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){j=!0,k=!0}}();var r={elements:l.elements||"abbr article aside audio bdi canvas data datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video",shivCSS:l.shivCSS!==!1,supportsUnknownElements:k,shivMethods:l.shivMethods!==!1,type:"default",shivDocument:i,createElement:f,createDocumentFragment:g};a.html5=r,i(b)}(this,b),n._version=m,n._domPrefixes=v,n._cssomPrefixes=u,n.testProp=function(a){return g([a])},n.testAllProps=i,n.prefixed=function(a,b,c){return b?i(a,b,c):i(a,"pfx")},p.className=p.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(o?" js "+x.join(" "):""),n}(this,this.document),function(a,b,c){function d(a){return"[object Function]"==q.call(a)}function e(a){return"string"==typeof a}function f(){}function g(a){return!a||"loaded"==a||"complete"==a||"uninitialized"==a}function h(){var a=r.shift();s=1,a?a.t?o(function(){("c"==a.t?m.injectCss:m.injectJs)(a.s,0,a.a,a.x,a.e,1)},0):(a(),h()):s=0}function i(a,c,d,e,f,i,j){function k(b){if(!n&&g(l.readyState)&&(t.r=n=1,!s&&h(),l.onload=l.onreadystatechange=null,b)){"img"!=a&&o(function(){v.removeChild(l)},50);for(var d in A[c])A[c].hasOwnProperty(d)&&A[c][d].onload()}}var j=j||m.errorTimeout,l=b.createElement(a),n=0,q=0,t={t:d,s:c,e:f,a:i,x:j};1===A[c]&&(q=1,A[c]=[]),"object"==a?l.data=c:(l.src=c,l.type=a),l.width=l.height="0",l.onerror=l.onload=l.onreadystatechange=function(){k.call(this,q)},r.splice(e,0,t),"img"!=a&&(q||2===A[c]?(v.insertBefore(l,u?null:p),o(k,j)):A[c].push(l))}function j(a,b,c,d,f){return s=0,b=b||"j",e(a)?i("c"==b?x:w,a,b,this.i++,c,d,f):(r.splice(this.i++,0,a),1==r.length&&h()),this}function k(){var a=m;return a.loader={load:j,i:0},a}var l,m,n=b.documentElement,o=a.setTimeout,p=b.getElementsByTagName("script")[0],q={}.toString,r=[],s=0,t="MozAppearance"in n.style,u=t&&!!b.createRange().compareNode,v=u?n:p.parentNode,n=a.opera&&"[object Opera]"==q.call(a.opera),n=!!b.attachEvent&&!n,w=t?"object":n?"script":"img",x=n?"script":w,y=Array.isArray||function(a){return"[object Array]"==q.call(a)},z=[],A={},B={timeout:function(a,b){return b.length&&(a.timeout=b[0]),a}};m=function(a){function b(a){var b,c,d,a=a.split("!"),e=z.length,f=a.pop(),g=a.length,f={url:f,origUrl:f,prefixes:a};for(c=0;g>c;c++)d=a[c].split("="),(b=B[d.shift()])&&(f=b(f,d));for(c=0;e>c;c++)f=z[c](f);return f}function g(a,e,f,g,h){var i=b(a),j=i.autoCallback;i.url.split(".").pop().split("?").shift(),i.bypass||(e&&(e=d(e)?e:e[a]||e[g]||e[a.split("/").pop().split("?")[0]]),i.instead?i.instead(a,e,f,g,h):(A[i.url]?i.noexec=!0:A[i.url]=1,f.load(i.url,i.forceCSS||!i.forceJS&&"css"==i.url.split(".").pop().split("?").shift()?"c":c,i.noexec,i.attrs,i.timeout),(d(e)||d(j))&&f.load(function(){k(),e&&e(i.origUrl,h,g),j&&j(i.origUrl,h,g),A[i.url]=2})))}function h(a,b){function c(a,c){if(a){if(e(a))c||(l=function(){var a=[].slice.call(arguments);m.apply(this,a),n()}),g(a,l,b,0,j);else if(Object(a)===a)for(i in h=function(){var b,c=0;for(b in a)a.hasOwnProperty(b)&&c++;return c}(),a)a.hasOwnProperty(i)&&(!c&&!--h&&(d(l)?l=function(){var a=[].slice.call(arguments);m.apply(this,a),n()}:l[i]=function(a){return function(){var b=[].slice.call(arguments);a&&a.apply(this,b),n()}}(m[i])),g(a[i],l,b,i,j))}else!c&&n()}var h,i,j=!!a.test,k=a.load||a.both,l=a.callback||f,m=l,n=a.complete||f;c(j?a.yep:a.nope,!!k),k&&c(k)}var i,j,l=this.yepnope.loader;if(e(a))g(a,0,l,0);else if(y(a))for(i=0;i<a.length;i++)j=a[i],e(j)?g(j,0,l,0):y(j)?m(j):Object(j)===j&&h(j,l);else Object(a)===a&&h(a,l)},m.addPrefix=function(a,b){B[a]=b},m.addFilter=function(a){z.push(a)},m.errorTimeout=1e4,null==b.readyState&&b.addEventListener&&(b.readyState="loading",b.addEventListener("DOMContentLoaded",l=function(){b.removeEventListener("DOMContentLoaded",l,0),b.readyState="complete"},0)),a.yepnope=k(),a.yepnope.executeStack=h,a.yepnope.injectJs=function(a,c,d,e,i,j){var k,l,n=b.createElement("script"),e=e||m.errorTimeout;n.src=a;for(l in d)n.setAttribute(l,d[l]);c=j?h:c||f,n.onreadystatechange=n.onload=function(){!k&&g(n.readyState)&&(k=1,c(),n.onload=n.onreadystatechange=null)},o(function(){k||(k=1,c(1))},e),i?n.onload():p.parentNode.insertBefore(n,p)},a.yepnope.injectCss=function(a,c,d,e,g,i){var j,e=b.createElement("link"),c=i?h:c||f;e.href=a,e.rel="stylesheet",e.type="text/css";for(j in d)e.setAttribute(j,d[j]);g||(p.parentNode.insertBefore(e,p),o(c,0))}}(this,document),Modernizr.load=function(){yepnope.apply(window,[].slice.call(arguments,0))},function(a){var b={common:{init:function(){}},home:{init:function(){}},about_us:{init:function(){}}},c={fire:function(a,c,d){var e=b;c=void 0===c?"init":c,""!==a&&e[a]&&"function"==typeof e[a][c]&&e[a][c](d)},loadEvents:function(){c.fire("common"),a.each(document.body.className.replace(/-/g,"_").split(/\s+/),function(a,b){c.fire(b)})}};a(document).ready(c.loadEvents)}(jQuery); |
|
frustum.rs | use crate::algebra::{Matrix4, Vector3};
use crate::{
math::{aabb::AxisAlignedBoundingBox, plane::Plane},
visitor::{Visit, VisitResult, Visitor},
};
use nalgebra::Point3;
#[derive(Copy, Clone)]
pub struct Frustum {
/// 0 - left, 1 - right, 2 - top, 3 - bottom, 4 - far, 5 - near
planes: [Plane; 6],
}
impl Default for Frustum {
fn default() -> Self {
Self::from(Matrix4::new_perspective(
1.0,
std::f32::consts::FRAC_PI_2,
0.01,
1024.0,
))
.unwrap()
}
}
impl Frustum {
pub fn from(m: Matrix4<f32>) -> Result<Self, ()> {
Ok(Self {
planes: [
Plane::from_abcd(m[3] + m[0], m[7] + m[4], m[11] + m[8], m[15] + m[12])?,
Plane::from_abcd(m[3] - m[0], m[7] - m[4], m[11] - m[8], m[15] - m[12])?,
Plane::from_abcd(m[3] - m[1], m[7] - m[5], m[11] - m[9], m[15] - m[13])?,
Plane::from_abcd(m[3] + m[1], m[7] + m[5], m[11] + m[9], m[15] + m[13])?,
Plane::from_abcd(m[3] - m[2], m[7] - m[6], m[11] - m[10], m[15] - m[14])?,
Plane::from_abcd(m[3] + m[2], m[7] + m[6], m[11] + m[10], m[15] + m[14])?,
],
})
}
#[inline]
pub fn left(&self) -> &Plane {
self.planes.get(0).unwrap()
}
#[inline]
pub fn right(&self) -> &Plane {
self.planes.get(1).unwrap()
}
#[inline]
pub fn top(&self) -> &Plane {
self.planes.get(2).unwrap()
}
#[inline]
pub fn bottom(&self) -> &Plane {
self.planes.get(3).unwrap()
}
#[inline]
pub fn far(&self) -> &Plane {
self.planes.get(4).unwrap()
}
#[inline]
pub fn near(&self) -> &Plane {
self.planes.get(5).unwrap()
}
#[inline]
pub fn planes(&self) -> &[Plane] {
&self.planes
}
#[inline]
pub fn left_top_front_corner(&self) -> Vector3<f32> {
self.left().intersection_point(self.top(), self.far())
}
#[inline]
pub fn left_bottom_front_corner(&self) -> Vector3<f32> {
self.left().intersection_point(self.bottom(), self.far())
}
#[inline]
pub fn right_bottom_front_corner(&self) -> Vector3<f32> {
self.right().intersection_point(self.bottom(), self.far())
}
#[inline]
pub fn right_top_front_corner(&self) -> Vector3<f32> {
self.right().intersection_point(self.top(), self.far())
}
#[inline]
pub fn left_top_back_corner(&self) -> Vector3<f32> {
self.left().intersection_point(self.top(), self.near())
}
#[inline]
pub fn left_bottom_back_corner(&self) -> Vector3<f32> {
self.left().intersection_point(self.bottom(), self.near())
}
#[inline]
pub fn right_bottom_back_corner(&self) -> Vector3<f32> {
self.right().intersection_point(self.bottom(), self.near())
}
#[inline]
pub fn right_top_back_corner(&self) -> Vector3<f32> {
self.right().intersection_point(self.top(), self.near())
}
pub fn is_intersects_point_cloud(&self, points: &[Vector3<f32>]) -> bool {
for plane in self.planes.iter() {
let mut back_points = 0;
for point in points {
if plane.dot(point) <= 0.0 {
back_points += 1;
if back_points >= points.len() {
// All points are behind current plane.
return false;
}
}
}
}
true
}
pub fn is_intersects_aabb(&self, aabb: &AxisAlignedBoundingBox) -> bool {
let corners = [
Vector3::new(aabb.min.x, aabb.min.y, aabb.min.z),
Vector3::new(aabb.min.x, aabb.min.y, aabb.max.z),
Vector3::new(aabb.max.x, aabb.min.y, aabb.max.z),
Vector3::new(aabb.max.x, aabb.min.y, aabb.min.z),
Vector3::new(aabb.min.x, aabb.max.y, aabb.min.z),
Vector3::new(aabb.min.x, aabb.max.y, aabb.max.z),
Vector3::new(aabb.max.x, aabb.max.y, aabb.max.z),
Vector3::new(aabb.max.x, aabb.max.y, aabb.min.z),
];
self.is_intersects_point_cloud(&corners)
}
pub fn is_intersects_aabb_offset(
&self,
aabb: &AxisAlignedBoundingBox,
offset: Vector3<f32>,
) -> bool {
let corners = [
Vector3::new(aabb.min.x, aabb.min.y, aabb.min.z) + offset,
Vector3::new(aabb.min.x, aabb.min.y, aabb.max.z) + offset,
Vector3::new(aabb.max.x, aabb.min.y, aabb.max.z) + offset,
Vector3::new(aabb.max.x, aabb.min.y, aabb.min.z) + offset,
Vector3::new(aabb.min.x, aabb.max.y, aabb.min.z) + offset,
Vector3::new(aabb.min.x, aabb.max.y, aabb.max.z) + offset,
Vector3::new(aabb.max.x, aabb.max.y, aabb.max.z) + offset,
Vector3::new(aabb.max.x, aabb.max.y, aabb.min.z) + offset,
];
self.is_intersects_point_cloud(&corners)
}
pub fn is_intersects_aabb_transform(
&self,
aabb: &AxisAlignedBoundingBox,
transform: &Matrix4<f32>,
) -> bool {
let corners = [
transform
.transform_point(&Point3::new(aabb.min.x, aabb.min.y, aabb.min.z))
.coords,
transform
.transform_point(&Point3::new(aabb.min.x, aabb.min.y, aabb.max.z))
.coords,
transform
.transform_point(&Point3::new(aabb.max.x, aabb.min.y, aabb.max.z))
.coords,
transform
.transform_point(&Point3::new(aabb.max.x, aabb.min.y, aabb.min.z))
.coords,
transform
.transform_point(&Point3::new(aabb.min.x, aabb.max.y, aabb.min.z))
.coords,
transform
.transform_point(&Point3::new(aabb.min.x, aabb.max.y, aabb.max.z))
.coords,
transform
.transform_point(&Point3::new(aabb.max.x, aabb.max.y, aabb.max.z))
.coords,
transform
.transform_point(&Point3::new(aabb.max.x, aabb.max.y, aabb.min.z))
.coords,
];
self.is_intersects_point_cloud(&corners)
}
pub fn is_contains_point(&self, pt: Vector3<f32>) -> bool {
for plane in self.planes.iter() {
if plane.dot(&pt) <= 0.0 {
return false;
}
}
true
}
pub fn is_intersects_sphere(&self, p: Vector3<f32>, r: f32) -> bool |
}
impl Visit for Frustum {
fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult {
visitor.enter_region(name)?;
self.planes[0].visit("Left", visitor)?;
self.planes[1].visit("Right", visitor)?;
self.planes[2].visit("Top", visitor)?;
self.planes[3].visit("Bottom", visitor)?;
self.planes[4].visit("Far", visitor)?;
self.planes[5].visit("Near", visitor)?;
visitor.leave_region()
}
}
| {
for plane in self.planes.iter() {
let d = plane.dot(&p);
if d < -r {
return false;
}
if d.abs() < r {
return true;
}
}
true
} |
dest_prop.rs | //! Propagates assignment destinations backwards in the CFG to eliminate redundant assignments.
//!
//! # Motivation
//!
//! MIR building can insert a lot of redundant copies, and Rust code in general often tends to move
//! values around a lot. The result is a lot of assignments of the form `dest = {move} src;` in MIR.
//! MIR building for constants in particular tends to create additional locals that are only used
//! inside a single block to shuffle a value around unnecessarily.
//!
//! LLVM by itself is not good enough at eliminating these redundant copies (eg. see
//! <https://github.com/rust-lang/rust/issues/32966>), so this leaves some performance on the table
//! that we can regain by implementing an optimization for removing these assign statements in rustc
//! itself. When this optimization runs fast enough, it can also speed up the constant evaluation
//! and code generation phases of rustc due to the reduced number of statements and locals.
//!
//! # The Optimization
//!
//! Conceptually, this optimization is "destination propagation". It is similar to the Named Return
//! Value Optimization, or NRVO, known from the C++ world, except that it isn't limited to return
//! values or the return place `_0`. On a very high level, independent of the actual implementation
//! details, it does the following:
//!
//! 1) Identify `dest = src;` statements that can be soundly eliminated.
//! 2) Replace all mentions of `src` with `dest` ("unifying" them and propagating the destination
//! backwards).
//! 3) Delete the `dest = src;` statement (by making it a `nop`).
//!
//! Step 1) is by far the hardest, so it is explained in more detail below.
//!
//! ## Soundness
//!
//! Given an `Assign` statement `dest = src;`, where `dest` is a `Place` and `src` is an `Rvalue`,
//! there are a few requirements that must hold for the optimization to be sound:
//!
//! * `dest` must not contain any *indirection* through a pointer. It must access part of the base
//! local. Otherwise it might point to arbitrary memory that is hard to track.
//!
//! It must also not contain any indexing projections, since those take an arbitrary `Local` as
//! the index, and that local might only be initialized shortly before `dest` is used.
//!
//! Subtle case: If `dest` is a, or projects through a union, then we have to make sure that there
//! remains an assignment to it, since that sets the "active field" of the union. But if `src` is
//! a ZST, it might not be initialized, so there might not be any use of it before the assignment,
//! and performing the optimization would simply delete the assignment, leaving `dest`
//! uninitialized.
//!
//! * `src` must be a bare `Local` without any indirections or field projections (FIXME: Is this a
//! fundamental restriction or just current impl state?). It can be copied or moved by the
//! assignment.
//!
//! * The `dest` and `src` locals must never be [*live*][liveness] at the same time. If they are, it
//! means that they both hold a (potentially different) value that is needed by a future use of
//! the locals. Unifying them would overwrite one of the values.
//!
//! Note that computing liveness of locals that have had their address taken is more difficult:
//! Short of doing full escape analysis on the address/pointer/reference, the pass would need to
//! assume that any operation that can potentially involve opaque user code (such as function
//! calls, destructors, and inline assembly) may access any local that had its address taken
//! before that point.
//!
//! Here, the first two conditions are simple structural requirements on the `Assign` statements
//! that can be trivially checked. The liveness requirement however is more difficult and costly to
//! check.
//!
//! ## Previous Work
//!
//! A [previous attempt] at implementing an optimization like this turned out to be a significant
//! regression in compiler performance. Fixing the regressions introduced a lot of undesirable
//! complexity to the implementation.
//!
//! A [subsequent approach] tried to avoid the costly computation by limiting itself to acyclic
//! CFGs, but still turned out to be far too costly to run due to suboptimal performance within
//! individual basic blocks, requiring a walk across the entire block for every assignment found
//! within the block. For the `tuple-stress` benchmark, which has 458745 statements in a single
//! block, this proved to be far too costly.
//!
//! Since the first attempt at this, the compiler has improved dramatically, and new analysis
//! frameworks have been added that should make this approach viable without requiring a limited
//! approach that only works for some classes of CFGs:
//! - rustc now has a powerful dataflow analysis framework that can handle forwards and backwards
//! analyses efficiently.
//! - Layout optimizations for generators have been added to improve code generation for
//! async/await, which are very similar in spirit to what this optimization does. Both walk the
//! MIR and record conflicting uses of locals in a `BitMatrix`.
//!
//! Also, rustc now has a simple NRVO pass (see `nrvo.rs`), which handles a subset of the cases that
//! this destination propagation pass handles, proving that similar optimizations can be performed
//! on MIR.
//!
//! ## Pre/Post Optimization
//!
//! It is recommended to run `SimplifyCfg` and then `SimplifyLocals` some time after this pass, as
//! it replaces the eliminated assign statements with `nop`s and leaves unused locals behind.
//!
//! [liveness]: https://en.wikipedia.org/wiki/Live_variable_analysis
//! [previous attempt]: https://github.com/rust-lang/rust/pull/47954
//! [subsequent approach]: https://github.com/rust-lang/rust/pull/71003
use crate::MirPass;
use itertools::Itertools;
use rustc_data_structures::unify::{InPlaceUnificationTable, UnifyKey};
use rustc_index::{
bit_set::{BitMatrix, BitSet},
vec::IndexVec,
};
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::{dump_mir, PassWhere};
use rustc_middle::mir::{
traversal, Body, InlineAsmOperand, Local, LocalKind, Location, Operand, Place, PlaceElem,
Rvalue, Statement, StatementKind, Terminator, TerminatorKind,
};
use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::impls::{MaybeInitializedLocals, MaybeLiveLocals};
use rustc_mir_dataflow::Analysis;
// Empirical measurements have resulted in some observations:
// - Running on a body with a single block and 500 locals takes barely any time
// - Running on a body with ~400 blocks and ~300 relevant locals takes "too long"
// ...so we just limit both to somewhat reasonable-ish looking values.
const MAX_LOCALS: usize = 500;
const MAX_BLOCKS: usize = 250;
pub struct DestinationPropagation;
impl<'tcx> MirPass<'tcx> for DestinationPropagation {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
// FIXME(#79191, #82678)
if !tcx.sess.opts.debugging_opts.unsound_mir_opts {
return;
}
// Only run at mir-opt-level=3 or higher for now (we don't fix up debuginfo and remove
// storage statements at the moment).
if tcx.sess.mir_opt_level() < 3 {
return;
}
let def_id = body.source.def_id();
let candidates = find_candidates(tcx, body);
if candidates.is_empty() {
debug!("{:?}: no dest prop candidates, done", def_id);
return;
}
// Collect all locals we care about. We only compute conflicts for these to save time.
let mut relevant_locals = BitSet::new_empty(body.local_decls.len());
for CandidateAssignment { dest, src, loc: _ } in &candidates {
relevant_locals.insert(dest.local);
relevant_locals.insert(*src);
}
// This pass unfortunately has `O(l² * s)` performance, where `l` is the number of locals
// and `s` is the number of statements and terminators in the function.
// To prevent blowing up compile times too much, we bail out when there are too many locals.
let relevant = relevant_locals.count();
debug!(
"{:?}: {} locals ({} relevant), {} blocks",
def_id,
body.local_decls.len(),
relevant,
body.basic_blocks().len()
);
if relevant > MAX_LOCALS {
warn!(
"too many candidate locals in {:?} ({}, max is {}), not optimizing",
def_id, relevant, MAX_LOCALS
);
return;
}
if body.basic_blocks().len() > MAX_BLOCKS {
warn!(
"too many blocks in {:?} ({}, max is {}), not optimizing",
def_id,
body.basic_blocks().len(),
MAX_BLOCKS
);
return;
}
let mut conflicts = Conflicts::build(tcx, body, &relevant_locals);
let mut replacements = Replacements::new(body.local_decls.len());
for candidate @ CandidateAssignment { dest, src, loc } in candidates {
// Merge locals that don't conflict.
if !conflicts.can_unify(dest.local, src) {
debug!("at assignment {:?}, conflict {:?} vs. {:?}", loc, dest.local, src);
continue;
}
if replacements.for_src(candidate.src).is_some() {
debug!("src {:?} already has replacement", candidate.src);
continue;
}
if !tcx.consider_optimizing(|| {
format!("DestinationPropagation {:?} {:?}", def_id, candidate)
}) {
break;
}
replacements.push(candidate);
conflicts.unify(candidate.src, candidate.dest.local);
}
replacements.flatten(tcx);
debug!("replacements {:?}", replacements.map);
Replacer { tcx, replacements, place_elem_cache: Vec::new() }.visit_body(body);
// FIXME fix debug info
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
struct UnifyLocal(Local);
impl From<Local> for UnifyLocal {
fn from(l: Local) -> Self {
Self(l)
}
}
impl UnifyKey for UnifyLocal {
type Value = ();
fn index(&self) -> u32 {
self.0.as_u32()
}
fn from_index(u: u32) -> Self {
Self(Local::from_u32(u))
}
fn tag() -> &'static str {
"UnifyLocal"
}
}
struct Replacements<'tcx> {
/// Maps locals to their replacement.
map: IndexVec<Local, Option<Place<'tcx>>>,
/// Whose locals' live ranges to kill.
kill: BitSet<Local>,
}
impl Replacements<'tcx> {
fn new(locals: usize) -> Self {
Self { map: IndexVec::from_elem_n(None, locals), kill: BitSet::new_empty(locals) }
}
fn push(&mut self, candidate: CandidateAssignment<'tcx>) {
trace!("Replacements::push({:?})", candidate);
let entry = &mut self.map[candidate.src];
assert!(entry.is_none());
*entry = Some(candidate.dest);
self.kill.insert(candidate.src);
self.kill.insert(candidate.dest.local);
}
/// Applies the stored replacements to all replacements, until no replacements would result in
/// locals that need further replacements when applied.
fn flatten(&mut self, tcx: TyCtxt<'tcx>) {
// Note: This assumes that there are no cycles in the replacements, which is enforced via
// `self.unified_locals`. Otherwise this can cause an infinite loop.
for local in self.map.indices() {
if let Some(replacement) = self.map[local] {
// Substitute the base local of `replacement` until fixpoint.
let mut base = replacement.local;
let mut reversed_projection_slices = Vec::with_capacity(1);
while let Some(replacement_for_replacement) = self.map[base] {
base = replacement_for_replacement.local;
reversed_projection_slices.push(replacement_for_replacement.projection);
}
let projection: Vec<_> = reversed_projection_slices
.iter()
.rev()
.flat_map(|projs| projs.iter())
.chain(replacement.projection.iter())
.collect();
let projection = tcx.intern_place_elems(&projection);
// Replace with the final `Place`.
self.map[local] = Some(Place { local: base, projection });
}
}
}
fn for_src(&self, src: Local) -> Option<Place<'tcx>> {
self.map[src]
}
}
struct Replacer<'tcx> {
tcx: TyCtxt<'tcx>,
replacements: Replacements<'tcx>,
place_elem_cache: Vec<PlaceElem<'tcx>>,
}
impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_local(&mut self, local: &mut Local, context: PlaceContext, location: Location) {
if context.is_use() && self.replacements.for_src(*local).is_some() {
bug!(
"use of local {:?} should have been replaced by visit_place; context={:?}, loc={:?}",
local,
context,
location,
);
}
}
fn process_projection_elem(
&mut self,
elem: PlaceElem<'tcx>,
_: Location,
) -> Option<PlaceElem<'tcx>> {
match elem {
PlaceElem::Index(local) => {
if let Some(replacement) = self.replacements.for_src(local) {
bug!(
"cannot replace {:?} with {:?} in index projection {:?}",
local,
replacement,
elem,
);
} else {
None
}
}
_ => None,
}
}
fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
if let Some(replacement) = self.replacements.for_src(place.local) {
// Rebase `place`s projections onto `replacement`'s.
self.place_elem_cache.clear();
self.place_elem_cache.extend(replacement.projection.iter().chain(place.projection));
let projection = self.tcx.intern_place_elems(&self.place_elem_cache);
let new_place = Place { local: replacement.local, projection };
debug!("Replacer: {:?} -> {:?}", place, new_place);
*place = new_place;
}
self.super_place(place, context, location);
}
fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
self.super_statement(statement, location);
match &statement.kind {
// FIXME: Don't delete storage statements, merge the live ranges instead
StatementKind::StorageDead(local) | StatementKind::StorageLive(local)
if self.replacements.kill.contains(*local) =>
{
statement.make_nop()
}
StatementKind::Assign(box (dest, rvalue)) => {
match rvalue {
Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) => {
// These might've been turned into self-assignments by the replacement
// (this includes the original statement we wanted to eliminate).
if dest == place {
debug!("{:?} turned into self-assignment, deleting", location);
statement.make_nop();
}
}
_ => {}
}
}
_ => {}
}
}
}
struct Conflicts<'a> {
relevant_locals: &'a BitSet<Local>,
/// The conflict matrix. It is always symmetric and the adjacency matrix of the corresponding
/// conflict graph.
matrix: BitMatrix<Local, Local>,
/// Preallocated `BitSet` used by `unify`.
unify_cache: BitSet<Local>,
/// Tracks locals that have been merged together to prevent cycles and propagate conflicts.
unified_locals: InPlaceUnificationTable<UnifyLocal>,
}
| fn build<'tcx>(
tcx: TyCtxt<'tcx>,
body: &'_ Body<'tcx>,
relevant_locals: &'a BitSet<Local>,
) -> Self {
// We don't have to look out for locals that have their address taken, since
// `find_candidates` already takes care of that.
let conflicts = BitMatrix::from_row_n(
&BitSet::new_empty(body.local_decls.len()),
body.local_decls.len(),
);
let mut init = MaybeInitializedLocals
.into_engine(tcx, body)
.iterate_to_fixpoint()
.into_results_cursor(body);
let mut live =
MaybeLiveLocals.into_engine(tcx, body).iterate_to_fixpoint().into_results_cursor(body);
let mut reachable = None;
dump_mir(tcx, None, "DestinationPropagation-dataflow", &"", body, |pass_where, w| {
let reachable = reachable.get_or_insert_with(|| traversal::reachable_as_bitset(body));
match pass_where {
PassWhere::BeforeLocation(loc) if reachable.contains(loc.block) => {
init.seek_before_primary_effect(loc);
live.seek_after_primary_effect(loc);
writeln!(w, " // init: {:?}", init.get())?;
writeln!(w, " // live: {:?}", live.get())?;
}
PassWhere::AfterTerminator(bb) if reachable.contains(bb) => {
let loc = body.terminator_loc(bb);
init.seek_after_primary_effect(loc);
live.seek_before_primary_effect(loc);
writeln!(w, " // init: {:?}", init.get())?;
writeln!(w, " // live: {:?}", live.get())?;
}
PassWhere::BeforeBlock(bb) if reachable.contains(bb) => {
init.seek_to_block_start(bb);
live.seek_to_block_start(bb);
writeln!(w, " // init: {:?}", init.get())?;
writeln!(w, " // live: {:?}", live.get())?;
}
PassWhere::BeforeCFG | PassWhere::AfterCFG | PassWhere::AfterLocation(_) => {}
PassWhere::BeforeLocation(_) | PassWhere::AfterTerminator(_) => {
writeln!(w, " // init: <unreachable>")?;
writeln!(w, " // live: <unreachable>")?;
}
PassWhere::BeforeBlock(_) => {
writeln!(w, " // init: <unreachable>")?;
writeln!(w, " // live: <unreachable>")?;
}
}
Ok(())
});
let mut this = Self {
relevant_locals,
matrix: conflicts,
unify_cache: BitSet::new_empty(body.local_decls.len()),
unified_locals: {
let mut table = InPlaceUnificationTable::new();
// Pre-fill table with all locals (this creates N nodes / "connected" components,
// "graph"-ically speaking).
for local in 0..body.local_decls.len() {
assert_eq!(table.new_key(()), UnifyLocal(Local::from_usize(local)));
}
table
},
};
let mut live_and_init_locals = Vec::new();
// Visit only reachable basic blocks. The exact order is not important.
for (block, data) in traversal::preorder(body) {
// We need to observe the dataflow state *before* all possible locations (statement or
// terminator) in each basic block, and then observe the state *after* the terminator
// effect is applied. As long as neither `init` nor `borrowed` has a "before" effect,
// we will observe all possible dataflow states.
// Since liveness is a backwards analysis, we need to walk the results backwards. To do
// that, we first collect in the `MaybeInitializedLocals` results in a forwards
// traversal.
live_and_init_locals.resize_with(data.statements.len() + 1, || {
BitSet::new_empty(body.local_decls.len())
});
// First, go forwards for `MaybeInitializedLocals` and apply intra-statement/terminator
// conflicts.
for (i, statement) in data.statements.iter().enumerate() {
this.record_statement_conflicts(statement);
let loc = Location { block, statement_index: i };
init.seek_before_primary_effect(loc);
live_and_init_locals[i].clone_from(init.get());
}
this.record_terminator_conflicts(data.terminator());
let term_loc = Location { block, statement_index: data.statements.len() };
init.seek_before_primary_effect(term_loc);
live_and_init_locals[term_loc.statement_index].clone_from(init.get());
// Now, go backwards and union with the liveness results.
for statement_index in (0..=data.statements.len()).rev() {
let loc = Location { block, statement_index };
live.seek_after_primary_effect(loc);
live_and_init_locals[statement_index].intersect(live.get());
trace!("record conflicts at {:?}", loc);
this.record_dataflow_conflicts(&mut live_and_init_locals[statement_index]);
}
init.seek_to_block_end(block);
live.seek_to_block_end(block);
let mut conflicts = init.get().clone();
conflicts.intersect(live.get());
trace!("record conflicts at end of {:?}", block);
this.record_dataflow_conflicts(&mut conflicts);
}
this
}
fn record_dataflow_conflicts(&mut self, new_conflicts: &mut BitSet<Local>) {
// Remove all locals that are not candidates.
new_conflicts.intersect(self.relevant_locals);
for local in new_conflicts.iter() {
self.matrix.union_row_with(&new_conflicts, local);
}
}
fn record_local_conflict(&mut self, a: Local, b: Local, why: &str) {
trace!("conflict {:?} <-> {:?} due to {}", a, b, why);
self.matrix.insert(a, b);
self.matrix.insert(b, a);
}
/// Records locals that must not overlap during the evaluation of `stmt`. These locals conflict
/// and must not be merged.
fn record_statement_conflicts(&mut self, stmt: &Statement<'_>) {
match &stmt.kind {
// While the left and right sides of an assignment must not overlap, we do not mark
// conflicts here as that would make this optimization useless. When we optimize, we
// eliminate the resulting self-assignments automatically.
StatementKind::Assign(_) => {}
StatementKind::LlvmInlineAsm(asm) => {
// Inputs and outputs must not overlap.
for (_, input) in &*asm.inputs {
if let Some(in_place) = input.place() {
if !in_place.is_indirect() {
for out_place in &*asm.outputs {
if !out_place.is_indirect() && !in_place.is_indirect() {
self.record_local_conflict(
in_place.local,
out_place.local,
"aliasing llvm_asm! operands",
);
}
}
}
}
}
}
StatementKind::SetDiscriminant { .. }
| StatementKind::StorageLive(..)
| StatementKind::StorageDead(..)
| StatementKind::Retag(..)
| StatementKind::FakeRead(..)
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
| StatementKind::CopyNonOverlapping(..)
| StatementKind::Nop => {}
}
}
fn record_terminator_conflicts(&mut self, term: &Terminator<'_>) {
match &term.kind {
TerminatorKind::DropAndReplace {
place: dropped_place,
value,
target: _,
unwind: _,
} => {
if let Some(place) = value.place() {
if !place.is_indirect() && !dropped_place.is_indirect() {
self.record_local_conflict(
place.local,
dropped_place.local,
"DropAndReplace operand overlap",
);
}
}
}
TerminatorKind::Yield { value, resume: _, resume_arg, drop: _ } => {
if let Some(place) = value.place() {
if !place.is_indirect() && !resume_arg.is_indirect() {
self.record_local_conflict(
place.local,
resume_arg.local,
"Yield operand overlap",
);
}
}
}
TerminatorKind::Call {
func,
args,
destination: Some((dest_place, _)),
cleanup: _,
from_hir_call: _,
fn_span: _,
} => {
// No arguments may overlap with the destination.
for arg in args.iter().chain(Some(func)) {
if let Some(place) = arg.place() {
if !place.is_indirect() && !dest_place.is_indirect() {
self.record_local_conflict(
dest_place.local,
place.local,
"call dest/arg overlap",
);
}
}
}
}
TerminatorKind::InlineAsm {
template: _,
operands,
options: _,
line_spans: _,
destination: _,
} => {
// The intended semantics here aren't documented, we just assume that nothing that
// could be written to by the assembly may overlap with any other operands.
for op in operands {
match op {
InlineAsmOperand::Out { reg: _, late: _, place: Some(dest_place) }
| InlineAsmOperand::InOut {
reg: _,
late: _,
in_value: _,
out_place: Some(dest_place),
} => {
// For output place `place`, add all places accessed by the inline asm.
for op in operands {
match op {
InlineAsmOperand::In { reg: _, value } => {
if let Some(p) = value.place() {
if !p.is_indirect() && !dest_place.is_indirect() {
self.record_local_conflict(
p.local,
dest_place.local,
"asm! operand overlap",
);
}
}
}
InlineAsmOperand::Out {
reg: _,
late: _,
place: Some(place),
} => {
if !place.is_indirect() && !dest_place.is_indirect() {
self.record_local_conflict(
place.local,
dest_place.local,
"asm! operand overlap",
);
}
}
InlineAsmOperand::InOut {
reg: _,
late: _,
in_value,
out_place,
} => {
if let Some(place) = in_value.place() {
if !place.is_indirect() && !dest_place.is_indirect() {
self.record_local_conflict(
place.local,
dest_place.local,
"asm! operand overlap",
);
}
}
if let Some(place) = out_place {
if !place.is_indirect() && !dest_place.is_indirect() {
self.record_local_conflict(
place.local,
dest_place.local,
"asm! operand overlap",
);
}
}
}
InlineAsmOperand::Out { reg: _, late: _, place: None }
| InlineAsmOperand::Const { value: _ }
| InlineAsmOperand::SymFn { value: _ }
| InlineAsmOperand::SymStatic { def_id: _ } => {}
}
}
}
InlineAsmOperand::InOut {
reg: _,
late: _,
in_value: _,
out_place: None,
}
| InlineAsmOperand::In { reg: _, value: _ }
| InlineAsmOperand::Out { reg: _, late: _, place: None }
| InlineAsmOperand::Const { value: _ }
| InlineAsmOperand::SymFn { value: _ }
| InlineAsmOperand::SymStatic { def_id: _ } => {}
}
}
}
TerminatorKind::Goto { .. }
| TerminatorKind::Call { destination: None, .. }
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Resume
| TerminatorKind::Abort
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
| TerminatorKind::Assert { .. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. } => {}
}
}
/// Checks whether `a` and `b` may be merged. Returns `false` if there's a conflict.
fn can_unify(&mut self, a: Local, b: Local) -> bool {
// After some locals have been unified, their conflicts are only tracked in the root key,
// so look that up.
let a = self.unified_locals.find(a).0;
let b = self.unified_locals.find(b).0;
if a == b {
// Already merged (part of the same connected component).
return false;
}
if self.matrix.contains(a, b) {
// Conflict (derived via dataflow, intra-statement conflicts, or inherited from another
// local during unification).
return false;
}
true
}
/// Merges the conflicts of `a` and `b`, so that each one inherits all conflicts of the other.
///
/// `can_unify` must have returned `true` for the same locals, or this may panic or lead to
/// miscompiles.
///
/// This is called when the pass makes the decision to unify `a` and `b` (or parts of `a` and
/// `b`) and is needed to ensure that future unification decisions take potentially newly
/// introduced conflicts into account.
///
/// For an example, assume we have locals `_0`, `_1`, `_2`, and `_3`. There are these conflicts:
///
/// * `_0` <-> `_1`
/// * `_1` <-> `_2`
/// * `_3` <-> `_0`
///
/// We then decide to merge `_2` with `_3` since they don't conflict. Then we decide to merge
/// `_2` with `_0`, which also doesn't have a conflict in the above list. However `_2` is now
/// `_3`, which does conflict with `_0`.
fn unify(&mut self, a: Local, b: Local) {
trace!("unify({:?}, {:?})", a, b);
// Get the root local of the connected components. The root local stores the conflicts of
// all locals in the connected component (and *is stored* as the conflicting local of other
// locals).
let a = self.unified_locals.find(a).0;
let b = self.unified_locals.find(b).0;
assert_ne!(a, b);
trace!("roots: a={:?}, b={:?}", a, b);
trace!("{:?} conflicts: {:?}", a, self.matrix.iter(a).format(", "));
trace!("{:?} conflicts: {:?}", b, self.matrix.iter(b).format(", "));
self.unified_locals.union(a, b);
let root = self.unified_locals.find(a).0;
assert!(root == a || root == b);
// Make all locals that conflict with `a` also conflict with `b`, and vice versa.
self.unify_cache.clear();
for conflicts_with_a in self.matrix.iter(a) {
self.unify_cache.insert(conflicts_with_a);
}
for conflicts_with_b in self.matrix.iter(b) {
self.unify_cache.insert(conflicts_with_b);
}
for conflicts_with_a_or_b in self.unify_cache.iter() {
// Set both `a` and `b` for this local's row.
self.matrix.insert(conflicts_with_a_or_b, a);
self.matrix.insert(conflicts_with_a_or_b, b);
}
// Write the locals `a` conflicts with to `b`'s row.
self.matrix.union_rows(a, b);
// Write the locals `b` conflicts with to `a`'s row.
self.matrix.union_rows(b, a);
}
}
/// A `dest = {move} src;` statement at `loc`.
///
/// We want to consider merging `dest` and `src` due to this assignment.
#[derive(Debug, Copy, Clone)]
struct CandidateAssignment<'tcx> {
/// Does not contain indirection or indexing (so the only local it contains is the place base).
dest: Place<'tcx>,
src: Local,
loc: Location,
}
/// Scans the MIR for assignments between locals that we might want to consider merging.
///
/// This will filter out assignments that do not match the right form (as described in the top-level
/// comment) and also throw out assignments that involve a local that has its address taken or is
/// otherwise ineligible (eg. locals used as array indices are ignored because we cannot propagate
/// arbitrary places into array indices).
fn find_candidates<'a, 'tcx>(
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
) -> Vec<CandidateAssignment<'tcx>> {
let mut visitor = FindAssignments {
tcx,
body,
candidates: Vec::new(),
ever_borrowed_locals: ever_borrowed_locals(body),
locals_used_as_array_index: locals_used_as_array_index(body),
};
visitor.visit_body(body);
visitor.candidates
}
struct FindAssignments<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
body: &'a Body<'tcx>,
candidates: Vec<CandidateAssignment<'tcx>>,
ever_borrowed_locals: BitSet<Local>,
locals_used_as_array_index: BitSet<Local>,
}
impl<'a, 'tcx> Visitor<'tcx> for FindAssignments<'a, 'tcx> {
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
if let StatementKind::Assign(box (
dest,
Rvalue::Use(Operand::Copy(src) | Operand::Move(src)),
)) = &statement.kind
{
// `dest` must not have pointer indirection.
if dest.is_indirect() {
return;
}
// `src` must be a plain local.
if !src.projection.is_empty() {
return;
}
// Since we want to replace `src` with `dest`, `src` must not be required.
if is_local_required(src.local, self.body) {
return;
}
// Can't optimize if both locals ever have their address taken (can introduce
// aliasing).
// FIXME: This can be smarter and take `StorageDead` into account (which
// invalidates borrows).
if self.ever_borrowed_locals.contains(dest.local)
|| self.ever_borrowed_locals.contains(src.local)
{
return;
}
assert_ne!(dest.local, src.local, "self-assignments are UB");
// We can't replace locals occurring in `PlaceElem::Index` for now.
if self.locals_used_as_array_index.contains(src.local) {
return;
}
// Handle the "subtle case" described above by rejecting any `dest` that is or
// projects through a union.
let mut place_ty = PlaceTy::from_ty(self.body.local_decls[dest.local].ty);
if place_ty.ty.is_union() {
return;
}
for elem in dest.projection {
if let PlaceElem::Index(_) = elem {
// `dest` contains an indexing projection.
return;
}
place_ty = place_ty.projection_ty(self.tcx, elem);
if place_ty.ty.is_union() {
return;
}
}
self.candidates.push(CandidateAssignment {
dest: *dest,
src: src.local,
loc: location,
});
}
}
}
/// Some locals are part of the function's interface and can not be removed.
///
/// Note that these locals *can* still be merged with non-required locals by removing that other
/// local.
fn is_local_required(local: Local, body: &Body<'_>) -> bool {
match body.local_kind(local) {
LocalKind::Arg | LocalKind::ReturnPointer => true,
LocalKind::Var | LocalKind::Temp => false,
}
}
/// Walks MIR to find all locals that have their address taken anywhere.
fn ever_borrowed_locals(body: &Body<'_>) -> BitSet<Local> {
let mut visitor = BorrowCollector { locals: BitSet::new_empty(body.local_decls.len()) };
visitor.visit_body(body);
visitor.locals
}
struct BorrowCollector {
locals: BitSet<Local>,
}
impl<'tcx> Visitor<'tcx> for BorrowCollector {
fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
self.super_rvalue(rvalue, location);
match rvalue {
Rvalue::AddressOf(_, borrowed_place) | Rvalue::Ref(_, _, borrowed_place) => {
if !borrowed_place.is_indirect() {
self.locals.insert(borrowed_place.local);
}
}
Rvalue::Cast(..)
| Rvalue::ShallowInitBox(..)
| Rvalue::Use(..)
| Rvalue::Repeat(..)
| Rvalue::Len(..)
| Rvalue::BinaryOp(..)
| Rvalue::CheckedBinaryOp(..)
| Rvalue::NullaryOp(..)
| Rvalue::UnaryOp(..)
| Rvalue::Discriminant(..)
| Rvalue::Aggregate(..)
| Rvalue::ThreadLocalRef(..) => {}
}
}
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
self.super_terminator(terminator, location);
match terminator.kind {
TerminatorKind::Drop { place: dropped_place, .. }
| TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
self.locals.insert(dropped_place.local);
}
TerminatorKind::Abort
| TerminatorKind::Assert { .. }
| TerminatorKind::Call { .. }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Goto { .. }
| TerminatorKind::Resume
| TerminatorKind::Return
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Unreachable
| TerminatorKind::Yield { .. }
| TerminatorKind::InlineAsm { .. } => {}
}
}
}
/// `PlaceElem::Index` only stores a `Local`, so we can't replace that with a full `Place`.
///
/// Collect locals used as indices so we don't generate candidates that are impossible to apply
/// later.
fn locals_used_as_array_index(body: &Body<'_>) -> BitSet<Local> {
let mut visitor = IndexCollector { locals: BitSet::new_empty(body.local_decls.len()) };
visitor.visit_body(body);
visitor.locals
}
struct IndexCollector {
locals: BitSet<Local>,
}
impl<'tcx> Visitor<'tcx> for IndexCollector {
fn visit_projection_elem(
&mut self,
local: Local,
proj_base: &[PlaceElem<'tcx>],
elem: PlaceElem<'tcx>,
context: PlaceContext,
location: Location,
) {
if let PlaceElem::Index(i) = elem {
self.locals.insert(i);
}
self.super_projection_elem(local, proj_base, elem, context, location);
}
} | impl Conflicts<'a> { |
views.py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015, Radmon.
Use of this source code is governed by the MIT license that can be
found in the LICENSE file.
"""
from flask import render_template
from ..auth import auth
from ..blog import blog
def index():
return render_template('site/index.html')
def about():
|
def init_app(app):
app.add_url_rule('/', 'site.index', index)
app.add_url_rule('/about', 'site.about', about)
app.register_blueprint(auth)
app.register_blueprint(blog)
| return render_template('site/about.html') |
item.rs | /*
* Onshape REST API
*
* The Onshape REST API consumed by all clients.
*
* The version of the OpenAPI document: 1.104
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Item {
#[serde(rename = "id", skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
impl Item {
pub fn | () -> Item {
Item {
id: None,
}
}
}
| new |
laco.py | a = int(input("Digite um número: "))
if(a < 10):
p | lif(a == 10):
print("O valor de a é igual a 10")
else:
print("O valor de a é maior que 10")
| rint("O valor de a é menor que 10")
e |
mpi_run.py | # Copyright 2020 Uber Technologies, Inc. All Rights Reserved.
# | #
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
from horovod.run.mpi_run import mpi_run as hr_mpi_run
from horovod.run.common.util import codec, secret
def mpi_run(settings, nics, driver, env, stdout=None, stderr=None):
"""
Runs mpirun.
:param settings: Settings for running MPI.
Note: settings.num_proc and settings.hosts must not be None.
:param nics: Interfaces to include by MPI.
:param driver: The Spark driver service that tasks are connected to.
:param env: Environment dictionary to use for running MPI.
:param stdout: Stdout of the mpi process.
Only used when settings.run_func_mode is True.
:param stderr: Stderr of the mpi process.
Only used when settings.run_func_mode is True.
"""
if env is None:
env = os.environ.copy()
# Pass secret key through the environment variables.
env[secret.HOROVOD_SECRET_KEY] = codec.dumps_base64(settings.key)
rsh_agent = (sys.executable,
'-m', 'horovod.spark.driver.mpirun_rsh',
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings))
settings.extra_mpi_args = ('{extra_mpi_args} -x NCCL_DEBUG=INFO -mca plm_rsh_agent "{rsh_agent}"'
.format(extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else '',
rsh_agent=' '.join(rsh_agent)))
command = (sys.executable,
'-m', 'horovod.spark.task.mpirun_exec_fn',
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings))
hr_mpi_run(settings, nics, env, command, stdout=stdout, stderr=stderr) | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at |
libafl_cc.rs | use libafl_cc::{ClangWrapper, CompilerWrapper};
use std::env;
pub fn | () {
let args: Vec<String> = env::args().collect();
if args.len() > 1 {
let mut dir = env::current_exe().unwrap();
let wrapper_name = dir.file_name().unwrap().to_str().unwrap();
let is_cpp = match wrapper_name[wrapper_name.len()-2..].to_lowercase().as_str() {
"cc" => false,
"++" | "pp" | "xx" => true,
_ => panic!("Could not figure out if c or c++ warpper was called. Expected {:?} to end with c or cxx", dir),
};
dir.pop();
let mut cc = ClangWrapper::new();
if let Some(code) = cc
.cpp(is_cpp)
// silence the compiler wrapper output, needed for some configure scripts.
.silence(true)
.parse_args(&args)
.expect("Failed to parse the command line")
.link_staticlib(&dir, "tutorial")
.add_arg("-fsanitize-coverage=trace-pc-guard")
.run()
.expect("Failed to run the wrapped compiler")
{
std::process::exit(code);
}
} else {
panic!("LibAFL CC: No Arguments given");
}
}
| main |
loss_helper.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Donny You, RainbowSecret
## Microsoft Research
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pdb
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from lib.utils.tools.logger import Logger as Log
class WeightedFSOhemCELoss(nn.Module):
def __init__(self, configer):
super().__init__()
self.configer = configer
self.thresh = self.configer.get('loss', 'params')['ohem_thresh']
self.reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
self.reduction = self.configer.get('loss', 'params')['ce_reduction']
def forward(self, predict, target, min_kept=1, weight=None, ignore_index=-1, **kwargs):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
"""
prob_out = F.softmax(predict, dim=1)
tmp_target = target.clone()
tmp_target[tmp_target == ignore_index] = 0
prob = prob_out.gather(1, tmp_target.unsqueeze(1))
mask = target.contiguous().view(-1,) != ignore_index
sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()
min_threshold = sort_prob[min(min_kept, sort_prob.numel() - 1)]
threshold = max(min_threshold, self.thresh)
loss_matrix = F.cross_entropy(predict, target, weight=weight, ignore_index=ignore_index, reduction='none').contiguous().view(-1,)
sort_loss_matrix = loss_matrix[mask][sort_indices]
select_loss_matrix = sort_loss_matrix[sort_prob < threshold]
if self.reduction == 'sum':
return select_loss_matrix.sum()
elif self.reduction == 'elementwise_mean':
return select_loss_matrix.mean()
else:
raise NotImplementedError('Reduction Error!')
# Cross-entropy Loss
class FSCELoss(nn.Module):
def __init__(self, configer=None):
super(FSCELoss, self).__init__()
self.configer = configer
weight = None
if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):
weight = self.configer.get('loss', 'params')['ce_weight']
weight = torch.FloatTensor(weight).cuda()
| if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
reduction = self.configer.get('loss', 'params')['ce_reduction']
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, inputs, *targets, weights=None, **kwargs):
loss = 0.0
if isinstance(inputs, tuple) or isinstance(inputs, list):
if weights is None:
weights = [1.0] * len(inputs)
for i in range(len(inputs)):
if len(targets) > 1:
target = self._scale_target(targets[i], (inputs[i].size(2), inputs[i].size(3)))
loss += weights[i] * self.ce_loss(inputs[i], target)
else:
target = self._scale_target(targets[0], (inputs[i].size(2), inputs[i].size(3)))
loss += weights[i] * self.ce_loss(inputs[i], target)
else:
target = self._scale_target(targets[0], (inputs.size(2), inputs.size(3)))
loss = self.ce_loss(inputs, target)
return loss
@staticmethod
def _scale_target(targets_, scaled_size):
targets = targets_.clone().unsqueeze(1).float()
targets = F.interpolate(targets, size=scaled_size, mode='nearest')
return targets.squeeze(1).long()
class FSOhemCELoss(nn.Module):
def __init__(self, configer):
super(FSOhemCELoss, self).__init__()
self.configer = configer
self.thresh = self.configer.get('loss', 'params')['ohem_thresh']
self.min_kept = max(1, self.configer.get('loss', 'params')['ohem_minkeep'])
weight = None
if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):
weight = self.configer.get('loss', 'params')['ce_weight']
weight = torch.FloatTensor(weight).cuda()
self.reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
self.reduction = self.configer.get('loss', 'params')['ce_reduction']
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
self.ignore_label = ignore_index
self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction='none')
def forward(self, predict, target, **kwargs):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
prob_out = F.softmax(predict, dim=1)
tmp_target = target.clone()
tmp_target[tmp_target == self.ignore_label] = 0
prob = prob_out.gather(1, tmp_target.unsqueeze(1))
mask = target.contiguous().view(-1,) != self.ignore_label
sort_prob, sort_indices = prob.contiguous().view(-1,)[mask].contiguous().sort()
min_threshold = sort_prob[min(self.min_kept, sort_prob.numel() - 1)]
threshold = max(min_threshold, self.thresh)
loss_matirx = self.ce_loss(predict, target).contiguous().view(-1,)
sort_loss_matirx = loss_matirx[mask][sort_indices]
select_loss_matrix = sort_loss_matirx[sort_prob < threshold]
if self.reduction == 'sum':
return select_loss_matrix.sum()
elif self.reduction == 'elementwise_mean':
return select_loss_matrix.mean()
else:
raise NotImplementedError('Reduction Error!')
class FSAuxOhemCELoss(nn.Module):
def __init__(self, configer=None):
super(FSAuxOhemCELoss, self).__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
if self.configer.get('loss', 'loss_type') == 'fs_auxohemce_loss':
self.ohem_ce_loss = FSOhemCELoss(self.configer)
else:
assert self.configer.get('loss', 'loss_type') == 'fs_auxslowohemce_loss'
self.ohem_ce_loss = FSSlowOhemCELoss(self.configer)
def forward(self, inputs, targets, **kwargs):
aux_out, seg_out = inputs
seg_loss = self.ohem_ce_loss(seg_out, targets)
aux_loss = self.ce_loss(aux_out, targets)
loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss
loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss
return loss
class FSAuxCELoss(nn.Module):
def __init__(self, configer=None):
super(FSAuxCELoss, self).__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
def forward(self, inputs, targets, **kwargs):
aux_out, seg_out = inputs
seg_loss = self.ce_loss(seg_out, targets)
aux_loss = self.ce_loss(aux_out, targets)
loss = self.configer.get('network', 'loss_weights')['seg_loss'] * seg_loss
loss = loss + self.configer.get('network', 'loss_weights')['aux_loss'] * aux_loss
return loss
class SegFixLoss(nn.Module):
"""
We predict a binary mask to categorize the boundary pixels as class 1 and otherwise as class 0
Based on the pixels predicted as 1 within the binary mask, we further predict the direction for these
pixels.
"""
def __init__(self, configer=None):
super().__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
def calc_weights(self, label_map, num_classes):
weights = []
for i in range(num_classes):
weights.append((label_map == i).sum().data)
weights = torch.FloatTensor(weights)
weights_sum = weights.sum()
return (1 - weights / weights_sum).cuda()
def forward(self, inputs, targets, **kwargs):
from lib.utils.helpers.offset_helper import DTOffsetHelper
pred_mask, pred_direction = inputs
seg_label_map, distance_map, angle_map = targets[0], targets[1], targets[2]
gt_mask = DTOffsetHelper.distance_to_mask_label(distance_map, seg_label_map, return_tensor=True)
gt_size = gt_mask.shape[1:]
mask_weights = self.calc_weights(gt_mask, 2)
pred_direction = F.interpolate(pred_direction, size=gt_size, mode="bilinear", align_corners=True)
pred_mask = F.interpolate(pred_mask, size=gt_size, mode="bilinear", align_corners=True)
mask_loss = F.cross_entropy(pred_mask, gt_mask, weight=mask_weights, ignore_index=-1)
mask_threshold = float(os.environ.get('mask_threshold', 0.5))
binary_pred_mask = torch.softmax(pred_mask, dim=1)[:, 1, :, :] > mask_threshold
gt_direction = DTOffsetHelper.angle_to_direction_label(
angle_map,
seg_label_map=seg_label_map,
extra_ignore_mask=(binary_pred_mask == 0),
return_tensor=True
)
direction_loss_mask = gt_direction != -1
direction_weights = self.calc_weights(gt_direction[direction_loss_mask], pred_direction.size(1))
direction_loss = F.cross_entropy(pred_direction, gt_direction, weight=direction_weights, ignore_index=-1)
if self.training \
and self.configer.get('iters') % self.configer.get('solver', 'display_iter') == 0 \
and torch.cuda.current_device() == 0:
Log.info('mask loss: {} direction loss: {}.'.format(mask_loss, direction_loss))
mask_weight = float(os.environ.get('mask_weight', 1))
direction_weight = float(os.environ.get('direction_weight', 1))
return mask_weight * mask_loss + direction_weight * direction_loss | reduction = 'elementwise_mean' |
add-edit.component.ts | import { Component, OnInit } from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { AccountService, AlertService } from '@app/_services';
@Component({ templateUrl: 'add-edit.component.html' })
export class AddEditComponent implements OnInit {
form: FormGroup;
id: string;
isAddMode: boolean;
loading = false;
submitted = false;
constructor(
private formBuilder: FormBuilder,
private route: ActivatedRoute,
private router: Router,
private accountService: AccountService,
private alertService: AlertService,
) {}
ngOnInit() {
this.id = this.route.snapshot.params['id'];
this.isAddMode = !this.id;
// password not required in edit mode
const passwordValidators = [Validators.minLength(6)];
if (this.isAddMode) {
passwordValidators.push(Validators.required);
}
this.form = this.formBuilder.group({ | lastName: ['', Validators.required],
username: ['', Validators.required],
password: ['', passwordValidators]
});
if (!this.isAddMode) {
this.accountService.getById(this.id)
.subscribe(x => {
this.f.firstName.setValue(x.firstName);
this.f.lastName.setValue(x.lastName);
this.f.username.setValue(x.username);
});
}
}
// convenience getter for easy access to form fields
get f() { return this.form.controls; }
onSubmit() {
this.submitted = true;
// reset alerts on submit
this.alertService.clear();
// stop here if form is invalid
if (this.form.invalid) {
return;
}
this.loading = true;
if (this.isAddMode) {
this.createUser();
} else {
this.updateUser();
}
}
private createUser() {
this.accountService.create(this.form.value)
.subscribe(
data => {
this.alertService.success('User added successfully', { keepAfterRouteChange: true });
this.router.navigate(['.', { relativeTo: this.route }]);
},
error => {
this.alertService.error(error);
this.loading = false;
});
}
private updateUser() {
this.accountService.update(this.id, this.form.value)
.subscribe(
data => {
console.log(this.id)
this.alertService.success('Update successful', { keepAfterRouteChange: true });
this.router.navigate(['..', { relativeTo: this.route }]);
},
error => {
this.alertService.error(error);
this.loading = false;
});
}
} | firstName: ['', Validators.required], |
summaries.py | # coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
DEFAULT_N_BINS = 10
def compute_summaries(clf, X, W, n_bins=DEFAULT_N_BINS):
proba = clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=n_bins)
return count
class ClassifierSummaryComputer():
def __init__(self, clf, n_bins=DEFAULT_N_BINS):
self.clf = clf
self.n_bins = n_bins
def __call__(self, X, W):
|
class HistogramSummaryComputer():
def __init__(self, n_bins=DEFAULT_N_BINS):
self.n_bins = n_bins
def fit(self, X):
self.edges_list = []
for i in range(X.shape[1]):
x = X[:, i]
maximum = np.max(x)
minimum = np.min(x)
diff = maximum - minimum
maximum = maximum + diff / self.n_bins # be a bit more inclusive
minimum = minimum - diff / self.n_bins # be a bit more inclusive
count, bin_edges = np.histogram(x, range=(minimum, maximum), bins=self.n_bins)
self.edges_list.append(bin_edges)
return self
def predict(self, X, W):
counts = []
for i, bin_edges in enumerate(self.edges_list):
x = X[:, i]
count, _ = np.histogram(x, bins=bin_edges, weights=W)
counts.extend(count)
return counts
def __call__(self, X, W):
counts = self.predict(X, W)
return np.array(counts)
| proba = self.clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=self.n_bins)
return count |
train.py | import toml
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from logzero import logger
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import transforms
from model import Model
from data import load_data, CovidChestxrayDataset
def check_grad(parameters):
grad = 0
cnt = 0
for p in parameters:
grad += p.grad.norm()
cnt += 1
return grad / cnt
def train():
with open("config.toml") as f:
config = toml.load(f)
base_dir = config["data"]["base_dir"]
epochs = config["train"]["epochs"]
batch_size = config["train"]["batch_size"]
lr = config["train"]["lr"]
betas = config["train"]["betas"]
in_filters = config["model"]["in_filters"]
image_size = config["model"]["image_size"]
filters = config["model"]["filters"]
num_classes = config["model"]["num_classes"]
kernel_size = config["model"]["kernel_size"]
padding = config["model"]["padding"]
num_resblocks = config["model"]["num_resblocks"]
device = "cuda" if torch.cuda.is_available() else "cpu"
records = load_data(base_dir)
train_records, test_records = train_test_split(records, test_size=0.2)
train_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomAffine(10, translate=[0.1, 0.1], shear=0.1),
transforms.ColorJitter(brightness=0.7, contrast=0.7),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
test_transform = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])
trainset = CovidChestxrayDataset(train_records, base_dir, train_transform)
testset = CovidChestxrayDataset(test_records, base_dir, test_transform)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
testloader = DataLoader(testset, batch_size=1, shuffle=False)
net = Model(in_filters, image_size, filters, kernel_size, padding, num_resblocks, num_classes)
net.to(device)
criterion = nn.NLLLoss()
optimizer = optim.AdamW(net.parameters(), lr=lr, betas=betas, weight_decay=1e-2)
for epoch in range(epochs):
net.train()
train_loss = 0
train_targets = []
train_probs = [] | grad = 0
for batch in trainloader:
img, label = batch
train_targets += label.numpy().tolist()
img, label = img.to(device), label.to(device)
optimizer.zero_grad()
pred = net(img)
loss = criterion(pred, label)
loss.backward()
grad += check_grad(net.parameters())
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
train_loss += loss.item()
train_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()
train_probs += pred.cpu().detach().numpy()[:, 1].tolist()
acc = accuracy_score(train_targets, train_preds)
f1 = f1_score(train_targets, train_preds, average="macro")
auc = roc_auc_score(train_targets, train_probs)
logger.info(f"Epoch {epoch+1} Train loss {train_loss/len(trainloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%, grad {grad/len(trainloader)}")
net.eval()
test_loss = 0
test_targets = []
test_preds = []
test_probs = []
for batch in testloader:
img, label = batch
test_targets += label.numpy().tolist()
img, label = img.to(device), label.to(device)
with torch.no_grad():
pred = net(img)
loss = criterion(pred, label)
test_loss += loss.item()
test_preds += pred.cpu().detach().numpy().argmax(axis=1).tolist()
test_probs += pred.cpu().detach().numpy()[:, 1].tolist()
acc = accuracy_score(test_targets, test_preds)
f1 = f1_score(test_targets, test_preds, average="macro")
auc = roc_auc_score(test_targets, test_probs)
logger.info(f"Epoch {epoch+1} Test loss {test_loss/len(testloader):.5}, Acc {acc*100:.3}%, F1 {f1*100:.3}%, AUC {auc*100:.4}%")
torch.save(net.state_dict, "net.pt")
if __name__ == "__main__":
train() | train_preds = [] |
flags_run_local_instance.go | package cmd
import (
"debug/elf"
"fmt"
"net"
"strconv"
"time"
"github.com/nanovms/ops/log"
"github.com/nanovms/ops/types"
"github.com/go-errors/errors"
api "github.com/nanovms/ops/lepton"
"github.com/spf13/pflag"
)
// RunLocalInstanceCommandFlags consolidates all command flags required to run a local instance in one struct
type RunLocalInstanceCommandFlags struct {
Accel bool
Bridged bool
BridgeName string
Debug bool
Force bool
GDBPort int
MissingFiles bool
NoTrace []string
Ports []string
SkipBuild bool
Smp int
SyscallSummary bool
TapName string
Trace bool
Verbose bool
}
// MergeToConfig overrides configuration passed by argument with command flags values
func (flags *RunLocalInstanceCommandFlags) MergeToConfig(c *types.Config) error {
c.Debugflags = []string{}
if flags.Trace {
c.Debugflags = []string{"trace", "debugsyscalls", "futex_trace", "fault"}
}
if flags.Debug {
c.RunConfig.Debug = true
c.Debugflags = append(c.Debugflags, "noaslr")
if len(c.ProgramPath) > 0 {
var elfFile *elf.File
elfFile, err := api.GetElfFileInfo(c.ProgramPath)
if err != nil {
return err
}
if api.IsDynamicLinked(elfFile) {
log.Errorf("Program %s must be linked statically", c.ProgramPath)
}
if !api.HasDebuggingSymbols(elfFile) {
log.Errorf("Program %s must be compiled with debugging symbols", c.ProgramPath)
}
} else {
log.Errorf("Debug executable not found (is this a package?)")
}
}
if flags.SyscallSummary {
c.Debugflags = append(c.Debugflags, "syscall_summary")
}
if flags.MissingFiles {
c.Debugflags = append(c.Debugflags, "missing_files")
}
if flags.Smp > 0 {
c.RunConfig.CPUs = flags.Smp
}
if flags.GDBPort != 0 {
c.RunConfig.GdbPort = flags.GDBPort
}
if flags.TapName != "" {
c.RunConfig.TapName = flags.TapName
}
if flags.BridgeName != "" {
c.RunConfig.BridgeName = flags.BridgeName
}
if len(flags.NoTrace) > 0 {
c.NoTrace = flags.NoTrace
}
c.RunConfig.Verbose = flags.Verbose
c.RunConfig.Bridged = flags.Bridged
c.RunConfig.Accel = flags.Accel
c.Force = flags.Force
ports, err := PrepareNetworkPorts(flags.Ports)
if err != nil {
return err
}
for _, p := range ports {
i, err := strconv.Atoi(p)
if err == nil && i == flags.GDBPort {
errstr := fmt.Sprintf("Port %d is forwarded and cannot be used as gdb port", flags.GDBPort)
return errors.New(errstr)
}
portAlreadyExists := false
for _, rconfigPort := range c.RunConfig.Ports {
if rconfigPort == p {
portAlreadyExists = true
break
}
}
if !portAlreadyExists {
c.RunConfig.Ports = append(c.RunConfig.Ports, p)
}
}
for _, port := range flags.Ports {
conn, err := net.DialTimeout("tcp", ":"+port, time.Second)
if err != nil {
continue // assume port is not being used
}
if conn != nil {
conn.Close()
message := fmt.Sprintf("Port %v is being used by other application", port)
pid, err := checkPortUserPID(port)
if err != nil |
if pid != "" {
message += fmt.Sprintf(" (PID %s)", pid)
}
return errors.New(message)
}
}
return nil
}
// NewRunLocalInstanceCommandFlags returns an instance of RunLocalInstanceCommandFlags initialized with command flags values
func NewRunLocalInstanceCommandFlags(cmdFlags *pflag.FlagSet) (flags *RunLocalInstanceCommandFlags) {
var err error
flags = &RunLocalInstanceCommandFlags{}
flags.Bridged, err = cmdFlags.GetBool("bridged")
if err != nil {
exitWithError(err.Error())
}
flags.Debug, err = cmdFlags.GetBool("debug")
if err != nil {
exitWithError(err.Error())
}
if flags.Debug {
flags.Accel = false
} else {
flags.Accel, err = cmdFlags.GetBool("accel")
if err != nil {
exitWithError(err.Error())
}
}
flags.Force, err = cmdFlags.GetBool("force")
if err != nil {
exitWithError(err.Error())
}
flags.GDBPort, err = cmdFlags.GetInt("gdbport")
if err != nil {
exitWithError(err.Error())
}
flags.MissingFiles, err = cmdFlags.GetBool("missing-files")
if err != nil {
exitWithError(err.Error())
}
flags.NoTrace, err = cmdFlags.GetStringArray("no-trace")
if err != nil {
exitWithError(err.Error())
}
flags.Ports, err = cmdFlags.GetStringArray("port")
if err != nil {
exitWithError(err.Error())
}
flags.SkipBuild, err = cmdFlags.GetBool("skipbuild")
if err != nil {
exitWithError(err.Error())
}
flags.Smp, err = cmdFlags.GetInt("smp")
if err != nil {
exitWithError(err.Error())
}
flags.SyscallSummary, err = cmdFlags.GetBool("syscall-summary")
if err != nil {
exitWithError(err.Error())
}
flags.TapName, err = cmdFlags.GetString("tapname")
if err != nil {
exitWithError(err.Error())
}
flags.BridgeName, err = cmdFlags.GetString("bridgename")
if err != nil {
exitWithError(err.Error())
}
flags.Trace, err = cmdFlags.GetBool("trace")
if err != nil {
exitWithError(err.Error())
}
flags.Verbose, err = cmdFlags.GetBool("verbose")
if err != nil {
exitWithError(err.Error())
}
return
}
// PersistRunLocalInstanceCommandFlags append a command the required flags to run an image
func PersistRunLocalInstanceCommandFlags(cmdFlags *pflag.FlagSet) {
cmdFlags.StringArrayP("port", "p", nil, "port to forward")
cmdFlags.BoolP("force", "f", false, "update images")
cmdFlags.BoolP("debug", "d", false, "enable interactive debugger")
cmdFlags.BoolP("trace", "", false, "enable required flags to trace")
cmdFlags.IntP("gdbport", "g", 0, "qemu TCP port used for GDB interface")
cmdFlags.StringArrayP("no-trace", "", nil, "do not trace syscall")
cmdFlags.BoolP("verbose", "v", false, "verbose")
cmdFlags.BoolP("bridged", "b", false, "bridge networking")
cmdFlags.StringP("bridgename", "", "", "bridge name")
cmdFlags.StringP("tapname", "t", "", "tap device name")
cmdFlags.BoolP("skipbuild", "s", false, "skip building image")
cmdFlags.Bool("accel", true, "use cpu virtualization extension")
cmdFlags.IntP("smp", "", 1, "number of threads to use")
cmdFlags.Bool("syscall-summary", false, "print syscall summary on exit")
cmdFlags.Bool("missing-files", false, "print list of files not found on image at exit")
}
// isIPAddressValid checks whether IP address is valid
func isIPAddressValid(ip string) bool {
if net.ParseIP(ip) == nil {
return false
}
return true
}
| {
return err
} |
PipelineMetadataContainer.tsx | /*-
* #%L
* Baleen 3
* %%
* Copyright (C) 2020 Dstl
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. | *
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import React, { useState } from 'react'
import { PipelineMetadataCard } from '../components/PipelineMetadataCard'
import { PipelineMetadata } from '../types'
export interface PipelineMetadataContainerProps {
/**
* The pipelines to show
*/
readonly pipelineMetadata: PipelineMetadata
/**
* Delete the pipeline
*/
readonly deletePipeline: (pipeline: PipelineMetadata) => Promise<void>
/**
* Start the pipeline
*/
readonly startPipeline: (pipeline: PipelineMetadata) => Promise<void>
/**
* Stop the pipeline
*/
readonly stopPipeline: (pipeline: PipelineMetadata) => Promise<void>
}
export const PipelineMetadataContainer: React.FC<PipelineMetadataContainerProps> =
({ pipelineMetadata, deletePipeline, startPipeline, stopPipeline }) => {
const [error, setError] = useState<Error>()
const [isDeleting, setDeleting] = useState(false)
const onDelete = async (): Promise<void> => {
setDeleting(true)
try {
await deletePipeline(pipelineMetadata)
} catch (error) {
setDeleting(false)
setError(error)
}
}
const onStart = async (): Promise<void> => {
try {
await startPipeline(pipelineMetadata)
} catch (error) {
setError(error)
}
}
const onStop = async (): Promise<void> => {
try {
await stopPipeline(pipelineMetadata)
} catch (error) {
setError(error)
}
}
return (
<PipelineMetadataCard
pipelineMetadata={pipelineMetadata}
onDelete={onDelete}
onStart={onStart}
onStop={onStop}
isDeleting={isDeleting}
error={error}
/>
)
} | * You may obtain a copy of the License at |
model_host.go | package model
import (
"encoding/json"
"errors"
"github.com/huaweicloud/huaweicloud-sdk-go-v3/core/converter"
"strings"
)
type Host struct {
// 云主机id
AgentId *string `json:"agent_id,omitempty"`
// 云主机id
HostId *string `json:"host_id,omitempty"`
// 云主机名称
HostName *string `json:"host_name,omitempty"`
// 云主机私有IP
HostIp *string `json:"host_ip,omitempty"`
// 云主机公网IP
PublicIp *string `json:"public_ip,omitempty"`
// 所属企业项目名称
EnterpriseProjectName *string `json:"enterprise_project_name,omitempty"`
// 服务器组名称
GroupName *string `json:"group_name,omitempty"`
// 服务到期时间
ExpireTime *int64 `json:"expire_time,omitempty"`
// 策略组名称
PolicyGroupName *string `json:"policy_group_name,omitempty"`
// 云主机状态:正在运行:ACTIVE; 关机:SHUTOFF; 创建中:BUILDING; 故障:ERROR;
HostStatus *HostHostStatus `json:"host_status,omitempty"`
// 客户端状态, 未注册:not_register; 在线:online; 离线:offline; 所有状态:all;
AgentStatus *HostAgentStatus `json:"agent_status,omitempty"`
// 云主机开通的版本,hss.version.null:无; hss.version.basic:基础班;hss.version.enterprise:企业版;hss.version.premium:旗舰版;hss.version.wtp:网页防篡改版
Version *HostVersion `json:"version,omitempty"`
// 防护状态, opened:开启;opened:关闭
ProtectStatus *HostProtectStatus `json:"protect_status,omitempty"`
// 系统镜像
OsImage *string `json:"os_image,omitempty"`
// 系统类型
OsType *string `json:"os_type,omitempty"`
// 操作系统位数
OsBit *string `json:"os_bit,omitempty"`
// 云主机安全检测结果:undetect:未检测;clean:无风险;risk:有风险
DetectResult *HostDetectResult `json:"detect_result,omitempty"`
// 资产风险个数
RiskPortNum *int32 `json:"risk_port_num,omitempty"`
// 漏洞风险个数
RiskVulNum *int32 `json:"risk_vul_num,omitempty"`
// 入侵风险个数
RiskIntrusionNum *int32 `json:"risk_intrusion_num,omitempty"`
// 基线风险个数
RiskBaselineNum *int32 `json:"risk_baseline_num,omitempty"`
// 计费模式:packet_cycle:包年包月;on_demand:按需付费
ChargingMode *HostChargingMode `json:"charging_mode,omitempty"`
// 云服务资源实例ID(UUID)
ResourceId *string `json:"resource_id,omitempty"`
}
func (o Host) String() string {
data, err := json.Marshal(o)
if err != nil {
return "Host struct{}"
}
return strings.Join([]string{"Host", string(data)}, " ")
}
type HostHostStatus struct {
value string
}
type HostHostStatusEnum struct {
ACTIVEHOST_STATUS_ACTIVE HostHostStatus
SHUTOFFHOST_STATUS_SHUTOFF HostHostStatus
BUILDINGHOST_STATUS_BUILDING HostHostStatus
ERRORHOST_STATUS_ERROR HostHostStatus
}
func GetHostHostStatusEnum() HostHostStatusEnum {
return HostHostStatusEnum{
ACTIVEHOST_STATUS_ACTIVE: HostHostStatus{
value: "ACTIVE#HOST_STATUS_ACTIVE",
},
SHUTOFFHOST_STATUS_SHUTOFF: HostHostStatus{
value: "SHUTOFF#HOST_STATUS_SHUTOFF",
},
BUILDINGHOST_STATUS_BUILDING: HostHostStatus{
value: "BUILDING#HOST_STATUS_BUILDING",
},
ERRORHOST_STATUS_ERROR: HostHostStatus{
value: "ERROR#HOST_STATUS_ERROR",
},
}
}
func (c HostHostStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(c.value)
}
func (c *HostHostStatus) UnmarshalJSON(b []byte) error {
myConverter := converter.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
}
type HostAgentStatus struct {
value string
}
type HostAgentStatusEnum struct {
UNINSTALLAGENT_STATUS_UNINSTALL HostAgentStatus
ONLINEAGENT_STATUS_ONLINE HostAgentStatus
OFFLINEAGENT_STATUS_OFFLINE HostAgentStatus
}
func GetHostAgentStatusEnum() HostAgentStatusEnum {
return HostAgentStatusEnum{
UNINSTALLAGENT_STATUS_UNINSTALL: HostAgentStatus{
value: "uninstall#AGENT_STATUS_UNINSTALL",
},
ONLINEAGENT_STATUS_ONLINE: HostAgentStatus{
value: "online#AGENT_STATUS_ONLINE",
},
OFFLINEAGENT_STATUS_OFFLINE: HostAgentStatus{
value: "offline#AGENT_STATUS_OFFLINE",
},
}
}
func (c HostAgentStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(c.value)
}
func (c *HostAgentStatus) UnmarshalJSON(b []byte) error {
myConverter := converter.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
}
type HostVersion struct {
value string
}
type HostVersionEnum struct {
HSS_VERSION_NULLVERSION_NULL HostVersion
HSS_VERSION_BASICVERSION_BASIC HostVersion
HSS_VERSION_ENTERPRISEVERSION_ENTERPRISE HostVersion
HSS_VERSION_PREMIUMVERSION_PREMIUM HostVersion
HSS_VERSION_WTPVERSION_WTP HostVersion
}
func GetHostVersionEnum() HostVersionEnum {
return HostVersionEnum{
HSS_VERSION_NULLVERSION_NULL: HostVersion{
value: "hss.version.null#VERSION_NULL",
},
HSS_VERSION_BASICVERSION_BASIC: HostVersion{
value: "hss.version.basic#VERSION_BASIC",
},
HSS_VERSION_ENTERPRISEVERSION_ENTERPRISE: HostVersion{
value: "hss.version.enterprise#VERSION_ENTERPRISE",
},
HSS_VERSION_PREMIUMVERSION_PREMIUM: HostVersion{
value: "hss.version.premium#VERSION_PREMIUM",
},
HSS_VERSION_WTPVERSION_WTP: HostVersion{
value: "hss.version.wtp#VERSION_WTP",
},
}
}
func (c HostVersion) MarshalJSON() ([]byte, error) {
return json.Marshal(c.value)
}
func (c *HostVersion) UnmarshalJSON(b []byte) error {
myConverter := converter.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
}
type HostProtectStatus struct {
value string
}
type HostProtectStatusEnum struct {
CLOSEDPROTECT_STATUS_CLOSED HostProtectStatus
OPENEDPROTECT_STATUS_OPENED HostProtectStatus
}
func GetHostProtectStatusEnum() HostProtectStatusEnum {
return HostProtectStatusEnum{
CLOSEDPROTECT_STATUS_CLOSED: HostProtectStatus{
value: "closed#PROTECT_STATUS_CLOSED",
},
OPENEDPROTECT_STATUS_OPENED: HostProtectStatus{
value: "opened#PROTECT_STATUS_OPENED",
},
}
}
func (c HostProtectStatus) MarshalJSON() ([]byte, error) {
return json.Marshal(c.value)
}
func (c *HostProtectStatus) UnmarshalJSON(b []byte) error {
myConverter := converter.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
}
type HostDetectResult struct {
value string
}
type HostDetectResultEnum struct {
UNDETECTDETECT_RESULT_UNDETECT HostDetectResult
CLEANDETECT_RESULT_CLEAN HostDetectResult
RISKDETECT_RESULT_RISK HostDetectResult
}
func GetHostDetectResultEnum() HostDetectResultEnum {
return HostDetectResultEnum{
UNDETECTDETECT_RESULT_UNDETECT: HostDetectResult{
value: "undetect#DETECT_RESULT_UNDETECT",
},
CLEANDETECT_RESULT_CLEAN: HostDetectResult{
value: "clean#DETECT_RESULT_CLEAN",
},
RISKDETECT_RESULT_RISK: HostDetectResult{
value: "risk#DETECT_RESULT_RISK",
},
}
}
func (c HostDetectResult) MarshalJSON() ([]byte, error) {
return json.Marshal(c.value)
}
func (c *HostDetectResult) UnmarshalJSON(b []byte) error {
myConverter := converter.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert enum data to string error")
}
}
type HostChargingMode struct {
value string
}
type HostChargingModeEnum struct {
PACKET_CYCLECHARGING_MODE_PACKET_CYCLE HostChargingMode
ON_DEMANDCHARGING_MODE_ON_DEMAND HostChargingMode
}
func GetHostChargingModeEnum() HostChargingModeEnum {
return HostChargingModeEnum{
PACKET_CYCLECHARGING_MODE_PACKET_CYCLE: HostChargingMode{
value: "packet_cycle#CHARGING_MODE_PACKET_CYCLE",
},
ON_DEMANDCHARGING_MODE_ON_DEMAND: HostChargingMode{
value: "on_demand#CHARGING_MODE_ON_DEMAND",
},
}
}
func (c HostChargingMode) MarshalJSON() ([]byte, error) {
return json.Marshal(c.value)
}
func (c *HostChargingMode) UnmarshalJSON(b []byte) error {
myConverter := converte | um data to string error")
}
}
| r.StringConverterFactory("string")
if myConverter != nil {
val, err := myConverter.CovertStringToInterface(strings.Trim(string(b[:]), "\""))
if err == nil {
c.value = val.(string)
return nil
}
return err
} else {
return errors.New("convert en |
KRBError.go | // Package messages implements Kerberos 5 message types and methods.
package messages
import (
"fmt"
"time"
"github.com/jcmturner/gofork/encoding/asn1"
"gopkg.in/L11R/gokrb5.v7/iana"
"gopkg.in/L11R/gokrb5.v7/iana/asnAppTag"
"gopkg.in/L11R/gokrb5.v7/iana/errorcode"
"gopkg.in/L11R/gokrb5.v7/iana/msgtype"
"gopkg.in/L11R/gokrb5.v7/krberror"
"gopkg.in/L11R/gokrb5.v7/types"
)
// KRBError implements RFC 4120 KRB_ERROR: https://tools.ietf.org/html/rfc4120#section-5.9.1.
type KRBError struct {
PVNO int `asn1:"explicit,tag:0"`
MsgType int `asn1:"explicit,tag:1"`
CTime time.Time `asn1:"generalized,optional,explicit,tag:2"`
Cusec int `asn1:"optional,explicit,tag:3"`
STime time.Time `asn1:"generalized,explicit,tag:4"`
Susec int `asn1:"explicit,tag:5"`
ErrorCode int32 `asn1:"explicit,tag:6"`
CRealm string `asn1:"generalstring,optional,explicit,tag:7"`
CName types.PrincipalName `asn1:"optional,explicit,tag:8"`
Realm string `asn1:"generalstring,explicit,tag:9"`
SName types.PrincipalName `asn1:"explicit,tag:10"`
EText string `asn1:"generalstring,optional,explicit,tag:11"`
EData []byte `asn1:"optional,explicit,tag:12"`
}
// NewKRBError creates a new KRBError.
func NewKRBError(sname types.PrincipalName, realm string, code int32, etext string) KRBError {
t := time.Now().UTC()
return KRBError{
PVNO: iana.PVNO,
MsgType: msgtype.KRB_ERROR,
STime: t,
Susec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
ErrorCode: code,
SName: sname,
Realm: realm,
EText: etext,
}
}
// Unmarshal bytes b into the KRBError struct.
func (k *KRBError) Unmarshal(b []byte) error {
_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBError))
if err != nil {
return krberror.Errorf(err, krberror.EncodingError, "KRB_ERROR unmarshal error")
}
expectedMsgType := msgtype.KRB_ERROR
if k.MsgType != expectedMsgType {
return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_ERROR. Expected: %v; Actual: %v", expectedMsgType, k.MsgType)
}
return nil
}
// Error method implementing error interface on KRBError struct.
func (k KRBError) Error() string {
etxt := fmt.Sprintf("KRB Error: %s", errorcode.Lookup(k.ErrorCode))
if k.EText != "" |
return etxt
}
func processUnmarshalReplyError(b []byte, err error) error {
switch err.(type) {
case asn1.StructuralError:
var krberr KRBError
tmperr := krberr.Unmarshal(b)
if tmperr != nil {
return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply")
}
return krberr
default:
return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply")
}
}
| {
etxt = fmt.Sprintf("%s - %s", etxt, k.EText)
} |
loading.tsx | import React from 'react';
import { ActivityIndicator, View } from 'react-native';
export default class | extends React.Component {
render() {
return (
<View>
<ActivityIndicator size="large" animating />
</View>
);
}
}
| Loading |
mod.rs | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2020 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with any kind of table data.
This module contains the struct `Table`, used to manage the decoded data of a table. For internal use only.
!*/
use bincode::serialize;
use csv::{QuoteStyle, ReaderBuilder, WriterBuilder};
use serde_derive::{Serialize, Deserialize};
use std::collections::BTreeMap;
use std::{fmt, fmt::Display};
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Write};
use std::path::PathBuf;
use rpfm_error::{Error, ErrorKind, Result};
use crate::assembly_kit::table_data::RawTable;
use crate::common::{decoder::Decoder, encoder::Encoder, parse_str_as_bool};
use crate::schema::*;
pub mod animtable;
pub mod anim_fragment;
pub mod db;
pub mod loc;
pub mod matched_combat;
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct contains the data of a Table-like PackedFile after being decoded.
///
/// This is for internal use. If you need to interact with this in any way, do it through the PackedFile that contains it, not directly.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Table {
/// A copy of the `Definition` this table uses, so we don't have to check the schema everywhere.
definition: Definition,
/// The decoded entries of the table. This list is a Vec(rows) of a Vec(fields of a row) of DecodedData (decoded field).
entries: Vec<Vec<DecodedData>>,
}
/// This enum is used to store different types of data in a unified way. Used, for example, to store the data from each field in a DB Table.
///
/// NOTE: `Sequence` it's a recursive type. A Sequence/List means you got a repeated sequence of fields
/// inside a single field. Used, for example, in certain model tables.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum DecodedData {
Boolean(bool),
F32(f32),
I16(i16),
I32(i32),
I64(i64),
StringU8(String),
StringU16(String),
OptionalStringU8(String),
OptionalStringU16(String),
SequenceU16(Table),
SequenceU32(Table)
}
//----------------------------------------------------------------//
// Implementations for `DecodedData`.
//----------------------------------------------------------------//
/// Display implementation of `DecodedData`.
impl Display for DecodedData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DecodedData::Boolean(_) => write!(f, "Boolean"),
DecodedData::F32(_) => write!(f, "F32"),
DecodedData::I16(_) => write!(f, "I16"),
DecodedData::I32(_) => write!(f, "I32"),
DecodedData::I64(_) => write!(f, "I64"),
DecodedData::StringU8(_) => write!(f, "StringU8"),
DecodedData::StringU16(_) => write!(f, "StringU16"),
DecodedData::OptionalStringU8(_) => write!(f, "OptionalStringU8"),
DecodedData::OptionalStringU16(_) => write!(f, "OptionalStringU16"),
DecodedData::SequenceU16(_) => write!(f, "SequenceU16"),
DecodedData::SequenceU32(_) => write!(f, "SequenceU32"),
}
}
}
/// PartialEq implementation of `DecodedData`. We need this implementation due to the float comparison being... special.
impl PartialEq for DecodedData {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(DecodedData::Boolean(x), DecodedData::Boolean(y)) => x == y,
(DecodedData::F32(x), DecodedData::F32(y)) => ((x * 1_000_000f32).round() / 1_000_000f32) == ((y * 1_000_000f32).round() / 1_000_000f32),
(DecodedData::I16(x), DecodedData::I16(y)) => x == y,
(DecodedData::I32(x), DecodedData::I32(y)) => x == y,
(DecodedData::I64(x), DecodedData::I64(y)) => x == y,
(DecodedData::StringU8(x), DecodedData::StringU8(y)) => x == y,
(DecodedData::StringU16(x), DecodedData::StringU16(y)) => x == y,
(DecodedData::OptionalStringU8(x), DecodedData::OptionalStringU8(y)) => x == y,
(DecodedData::OptionalStringU16(x), DecodedData::OptionalStringU16(y)) => x == y,
(DecodedData::SequenceU16(x), DecodedData::SequenceU16(y)) => x == y,
(DecodedData::SequenceU32(x), DecodedData::SequenceU32(y)) => x == y,
_ => false
}
}
}
/// Implementation of `DecodedData`.
impl DecodedData {
/// Default implementation of `DecodedData`.
pub fn default(field_type: &FieldType) -> Self {
match field_type {
FieldType::Boolean => DecodedData::Boolean(false),
FieldType::F32 => DecodedData::F32(0.0),
FieldType::I16 => DecodedData::I16(0),
FieldType::I32 => DecodedData::I32(0),
FieldType::I64 => DecodedData::I64(0),
FieldType::StringU8 => DecodedData::StringU8("".to_owned()),
FieldType::StringU16 => DecodedData::StringU16("".to_owned()),
FieldType::OptionalStringU8 => DecodedData::OptionalStringU8("".to_owned()),
FieldType::OptionalStringU16 => DecodedData::OptionalStringU16("".to_owned()),
FieldType::SequenceU16(definition) => DecodedData::SequenceU16(Table::new(definition)),
FieldType::SequenceU32(definition) => DecodedData::SequenceU32(Table::new(definition)),
}
}
/// This functions checks if the type of an specific `DecodedData` is the one it should have, according to the provided `FieldType`.
pub fn is_field_type_correct(&self, field_type: &FieldType) -> bool {
match self {
DecodedData::Boolean(_) => field_type == &FieldType::Boolean,
DecodedData::F32(_) => field_type == &FieldType::F32,
DecodedData::I16(_) => field_type == &FieldType::I16,
DecodedData::I32(_) => field_type == &FieldType::I32,
DecodedData::I64(_) => field_type == &FieldType::I64,
DecodedData::StringU8(_) => field_type == &FieldType::StringU8,
DecodedData::StringU16(_) => field_type == &FieldType::StringU16,
DecodedData::OptionalStringU8(_) => field_type == &FieldType::OptionalStringU8,
DecodedData::OptionalStringU16(_) => field_type == &FieldType::OptionalStringU16,
DecodedData::SequenceU16(_) => if let FieldType::SequenceU16(_) = field_type { true } else { false },
DecodedData::SequenceU32(_) => if let FieldType::SequenceU32(_) = field_type { true } else { false },
}
}
/// This function tries to convert the provided data to the provided fieldtype. This can fail in so many ways you should always check the result.
///
/// NOTE: If you pass the same type as it already has, this becomes an expensive way of cloning.
pub fn convert_between_types(&self, new_field_type: &FieldType) -> Result<Self> {
match self {
Self::Boolean(ref data) => match new_field_type {
FieldType::Boolean => Ok(self.clone()),
FieldType::F32 => Ok(Self::F32(if *data { 1.0 } else { 0.0 })),
FieldType::I16 => Ok(Self::I16(if *data { 1 } else { 0 })),
FieldType::I32 => Ok(Self::I32(if *data { 1 } else { 0 })),
FieldType::I64 => Ok(Self::I64(if *data { 1 } else { 0 })),
FieldType::StringU8 => Ok(Self::StringU8(data.to_string())),
FieldType::StringU16 => Ok(Self::StringU16(data.to_string())),
FieldType::OptionalStringU8 => Ok(Self::OptionalStringU8(data.to_string())),
FieldType::OptionalStringU16 => Ok(Self::OptionalStringU16(data.to_string())),
FieldType::SequenceU16(_) => Err(ErrorKind::Generic.into()),
FieldType::SequenceU32(_) => Err(ErrorKind::Generic.into()),
}
Self::F32(ref data) => match new_field_type {
FieldType::Boolean => Ok(Self::Boolean(data > &1.0)),
FieldType::F32 => Ok(self.clone()),
FieldType::I16 => Ok(Self::I16(*data as i16)),
FieldType::I32 => Ok(Self::I32(*data as i32)),
FieldType::I64 => Ok(Self::I64(*data as i64)),
FieldType::StringU8 => Ok(Self::StringU8(data.to_string())),
FieldType::StringU16 => Ok(Self::StringU16(data.to_string())),
FieldType::OptionalStringU8 => Ok(Self::OptionalStringU8(data.to_string())),
FieldType::OptionalStringU16 => Ok(Self::OptionalStringU16(data.to_string())),
FieldType::SequenceU16(_) => Err(ErrorKind::Generic.into()),
FieldType::SequenceU32(_) => Err(ErrorKind::Generic.into()),
}
Self::I16(ref data) => match new_field_type {
FieldType::Boolean => Ok(Self::Boolean(data > &1)),
FieldType::F32 => Ok(Self::F32(*data as f32)),
FieldType::I16 => Ok(self.clone()),
FieldType::I32 => Ok(Self::I32(*data as i32)),
FieldType::I64 => Ok(Self::I64(*data as i64)),
FieldType::StringU8 => Ok(Self::StringU8(data.to_string())),
FieldType::StringU16 => Ok(Self::StringU16(data.to_string())),
FieldType::OptionalStringU8 => Ok(Self::OptionalStringU8(data.to_string())),
FieldType::OptionalStringU16 => Ok(Self::OptionalStringU16(data.to_string())),
FieldType::SequenceU16(_) => Err(ErrorKind::Generic.into()),
FieldType::SequenceU32(_) => Err(ErrorKind::Generic.into()),
}
Self::I32(ref data) => match new_field_type {
FieldType::Boolean => Ok(Self::Boolean(data > &1)),
FieldType::F32 => Ok(Self::F32(*data as f32)),
FieldType::I16 => Ok(Self::I16(*data as i16)),
FieldType::I32 => Ok(self.clone()),
FieldType::I64 => Ok(Self::I64(*data as i64)),
FieldType::StringU8 => Ok(Self::StringU8(data.to_string())),
FieldType::StringU16 => Ok(Self::StringU16(data.to_string())),
FieldType::OptionalStringU8 => Ok(Self::OptionalStringU8(data.to_string())),
FieldType::OptionalStringU16 => Ok(Self::OptionalStringU16(data.to_string())),
FieldType::SequenceU16(_) => Err(ErrorKind::Generic.into()),
FieldType::SequenceU32(_) => Err(ErrorKind::Generic.into()),
}
Self::I64(ref data) => match new_field_type {
FieldType::Boolean => Ok(Self::Boolean(data > &1)),
FieldType::F32 => Ok(Self::F32(*data as f32)),
FieldType::I16 => Ok(Self::I16(*data as i16)),
FieldType::I32 => Ok(Self::I32(*data as i32)),
FieldType::I64 => Ok(self.clone()),
FieldType::StringU8 => Ok(Self::StringU8(data.to_string())),
FieldType::StringU16 => Ok(Self::StringU16(data.to_string())),
FieldType::OptionalStringU8 => Ok(Self::OptionalStringU8(data.to_string())),
FieldType::OptionalStringU16 => Ok(Self::OptionalStringU16(data.to_string())),
FieldType::SequenceU16(_) => Err(ErrorKind::Generic.into()),
FieldType::SequenceU32(_) => Err(ErrorKind::Generic.into()),
}
Self::StringU8(ref data) |
Self::StringU16(ref data) |
Self::OptionalStringU8(ref data) |
Self::OptionalStringU16(ref data) => match new_field_type {
FieldType::Boolean => Ok(Self::Boolean(parse_str_as_bool(data)?)),
FieldType::F32 => Ok(Self::F32(data.parse::<f32>()?)),
FieldType::I16 => Ok(Self::I16(data.parse::<i16>()?)),
FieldType::I32 => Ok(Self::I32(data.parse::<i32>()?)),
FieldType::I64 => Ok(Self::I64(data.parse::<i64>()?)),
FieldType::StringU8 => Ok(Self::StringU8(data.to_string())),
FieldType::StringU16 => Ok(Self::StringU16(data.to_string())),
FieldType::OptionalStringU8 => Ok(Self::OptionalStringU8(data.to_string())),
FieldType::OptionalStringU16 => Ok(Self::OptionalStringU16(data.to_string())),
FieldType::SequenceU16(_) => Err(ErrorKind::Generic.into()),
FieldType::SequenceU32(_) => Err(ErrorKind::Generic.into()),
}
/*
Self::SequenceU16(ref data) => match new_field_type {
FieldType::SequenceU16(ref definition) => Ok(self.clone()),
FieldType::SequenceU32(ref definition) => Err(ErrorKind::Generic.into()),
_ => Err(ErrorKind::Generic.into()),
}
Self::SequenceU32(ref data) => match new_field_type {
FieldType::SequenceU16(ref definition) => Err(ErrorKind::Generic.into()),
FieldType::SequenceU32(ref definition) => Ok(self.clone()),
_ => Err(ErrorKind::Generic.into()),
}*/
_ => Err(ErrorKind::Generic.into()),
}
}
/// This function prints whatever you have in each variants to a String.
pub fn data_to_string(&self) -> String {
match self {
DecodedData::Boolean(data) => data.to_string(),
DecodedData::F32(data) => data.to_string(),
DecodedData::I16(data) => data.to_string(),
DecodedData::I32(data) => data.to_string(),
DecodedData::I64(data) => data.to_string(),
DecodedData::StringU8(data) |
DecodedData::StringU16(data) |
DecodedData::OptionalStringU8(data) |
DecodedData::OptionalStringU16(data) => data.to_owned(),
DecodedData::SequenceU16(_) => "SequenceU16".to_owned(),
DecodedData::SequenceU32(_) => "SequenceU32".to_owned(),
}
}
}
//----------------------------------------------------------------//
// Implementations for `Table`.
//----------------------------------------------------------------//
/// Implementation of `Table`.
impl Table {
/// This function creates a new Table from an existing definition.
pub fn new(definition: &Definition) -> Self {
Table {
definition: definition.clone(),
entries: vec![],
}
}
/// This function returns a copy of the definition of this Table.
pub fn get_definition(&self) -> Definition {
self.definition.clone()
}
/// This function returns a reference to the definition of this Table.
pub fn get_ref_definition(&self) -> &Definition {
&self.definition
}
| self.entries.to_vec()
}
/// This function returns a reference to the entries of this Table.
pub fn get_ref_table_data(&self) -> &[Vec<DecodedData>] {
&self.entries
}
/// This function returns the amount of entries in this Table.
pub fn get_entry_count(&self) -> usize {
self.entries.len()
}
/// This function replaces the definition of this table with the one provided.
///
/// This updates the table's data to follow the format marked by the new definition, so you can use it to *update* the version of your table.
pub fn set_definition(&mut self, new_definition: &Definition) {
// It's simple: we compare both schemas, and get the original and final positions of each column.
// If a row is new, his original position is -1. If has been removed, his final position is -1.
let mut positions: Vec<(i32, i32)> = vec![];
for (new_pos, new_field) in new_definition.get_fields_processed().iter().enumerate() {
if let Some(old_pos) = self.definition.get_fields_processed().iter().position(|x| x.get_name() == new_field.get_name()) {
positions.push((old_pos as i32, new_pos as i32))
} else { positions.push((-1, new_pos as i32)); }
}
// Then, for each field in the old definition, check if exists in the new one.
for (old_pos, old_field) in self.definition.get_fields_processed().iter().enumerate() {
if !new_definition.get_fields_processed().iter().any(|x| x.get_name() == old_field.get_name()) { positions.push((old_pos as i32, -1)); }
}
// We sort the columns by their destination.
positions.sort_by_key(|x| x.1);
// Then, we create the new data using the old one and the column changes.
let mut new_entries: Vec<Vec<DecodedData>> = vec![];
for row in &mut self.entries {
let mut entry = vec![];
for (old_pos, new_pos) in &positions {
// If the new position is -1, it means the column got removed. We skip it.
if *new_pos == -1 { continue; }
// If the old position is -1, it means we got a new column. We need to get his type and create a `Default` field with it.
else if *old_pos == -1 {
entry.push(DecodedData::default(&new_definition.get_fields_processed()[*new_pos as usize].get_ref_field_type()));
}
// Otherwise, we got a moved column. Grab his field from the old data and put it in his new place.
else {
entry.push(row[*old_pos as usize].clone());
}
}
new_entries.push(entry);
}
// Then, we finally replace our definition and our data.
self.definition = new_definition.clone();
self.entries = new_entries;
}
/// This function replaces the data of this table with the one provided.
///
/// This can (and will) fail if the data is not of the format defined by the definition of the table.
pub fn set_table_data(&mut self, data: &[Vec<DecodedData>]) -> Result<()> {
for row in data {
// First, we need to make sure all rows we have are exactly what we expect.
let fields_processed = self.definition.get_fields_processed();
if row.len() != fields_processed.len() { return Err(ErrorKind::TableRowWrongFieldCount(fields_processed.len() as u32, row.len() as u32).into()) }
for (index, cell) in row.iter().enumerate() {
// Next, we need to ensure each file is of the type we expected.
let field = if let Some(field) = fields_processed.get(index) { field } else { return Err(ErrorKind::Generic.into()) };
if !cell.is_field_type_correct(field.get_ref_field_type()) {
return Err(ErrorKind::TableWrongFieldType(format!("{}", cell), format!("{}", field.get_ref_field_type())).into())
}
}
}
// If we passed all the checks, replace the data.
self.entries = data.to_vec();
Ok(())
}
/// This function decodes all the fields of a table from raw bytes.
///
/// If return_incomplete == true, this function will return an error with the incompletely decoded table when it fails.
fn decode(&mut self,
data: &[u8],
entry_count: u32,
mut index: &mut usize,
return_incomplete: bool,
) -> Result<()> {
// Do not specify size here, because a badly written definition can end up triggering an OOM crash if we do.
self.entries = vec![];
for row in 0..entry_count {
let mut decoded_row = Vec::with_capacity(self.definition.get_ref_fields().len());
for column in 0..self.definition.get_ref_fields().len() {
let field = &self.definition.get_ref_fields()[column];
let decoded_cell = match field.get_ref_field_type() {
FieldType::Boolean => {
if let Ok(data) = data.decode_packedfile_bool(*index, &mut index) { Ok(DecodedData::Boolean(data)) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as a <b><i>Boolean</b></i> value: the value is not a boolean, or there are insufficient bytes left to decode it as a boolean value.</p>", row + 1, column + 1))) }
}
FieldType::F32 => {
if let Ok(data) = data.decode_packedfile_float_f32(*index, &mut index) { Ok(DecodedData::F32(data)) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as a <b><i>F32</b></i> value: the value is not a valid F32, or there are insufficient bytes left to decode it as a F32 value.</p>", row + 1, column + 1))) }
}
FieldType::I16 => {
if let Ok(data) = data.decode_packedfile_integer_i16(*index, &mut index) { Ok(DecodedData::I16(data)) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as a <b><i>I16</b></i> value: the value is not a valid I16, or there are insufficient bytes left to decode it as an I16 value.</p>", row + 1, column + 1))) }
}
FieldType::I32 => {
if let Ok(data) = data.decode_packedfile_integer_i32(*index, &mut index) { Ok(DecodedData::I32(data)) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as a <b><i>I32</b></i> value: the value is not a valid I32, or there are insufficient bytes left to decode it as an I32 value.</p>", row + 1, column + 1))) }
}
FieldType::I64 => {
if let Ok(data) = data.decode_packedfile_integer_i64(*index, &mut index) { Ok(DecodedData::I64(data)) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as a <b><i>I64</b></i> value: either the value is not a valid I64, or there are insufficient bytes left to decode it as an I64 value.</p>", row + 1, column + 1))) }
}
FieldType::StringU8 => {
if let Ok(data) = data.decode_packedfile_string_u8(*index, &mut index) { Ok(DecodedData::StringU8(Self::escape_special_chars(&data))) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as an <b><i>UTF-8 String</b></i> value: the value is not a valid UTF-8 String, or there are insufficient bytes left to decode it as an UTF-8 String.</p>", row + 1, column + 1))) }
}
FieldType::StringU16 => {
if let Ok(data) = data.decode_packedfile_string_u16(*index, &mut index) { Ok(DecodedData::StringU16(Self::escape_special_chars(&data))) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as an <b><i>UTF-16 String</b></i> value: the value is not a valid UTF-16 String, or there are insufficient bytes left to decode it as an UTF-16 String.</p>", row + 1, column + 1))) }
}
FieldType::OptionalStringU8 => {
if let Ok(data) = data.decode_packedfile_optional_string_u8(*index, &mut index) { Ok(DecodedData::OptionalStringU8(Self::escape_special_chars(&data))) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as an <b><i>Optional UTF-8 String</b></i> value: the value is not a valid Optional UTF-8 String, or there are insufficient bytes left to decode it as an Optional UTF-8 String.</p>", row + 1, column + 1))) }
}
FieldType::OptionalStringU16 => {
if let Ok(data) = data.decode_packedfile_optional_string_u16(*index, &mut index) { Ok(DecodedData::OptionalStringU16(Self::escape_special_chars(&data))) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to decode the <i><b>Row {}, Cell {}</b></i> as an <b><i>Optional UTF-16 String</b></i> value: the value is not a valid Optional UTF-16 String, or there are insufficient bytes left to decode it as an Optional UTF-16 String.</p>", row + 1, column + 1))) }
}
// This type is just a recursive type.
FieldType::SequenceU16(definition) => {
if let Ok(entry_count) = data.decode_packedfile_integer_u16(*index, &mut index) {
let mut sub_table = Table::new(definition);
sub_table.decode(&data, entry_count.into(), index, return_incomplete)?;
Ok(DecodedData::SequenceU16(sub_table)) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to get the Entry Count of<i><b>Row {}, Cell {}</b></i>: the value is not a valid U32, or there are insufficient bytes left to decode it as an U32 value.</p>", row + 1, column + 1))) }
}
// This type is just a recursive type.
FieldType::SequenceU32(definition) => {
if let Ok(entry_count) = data.decode_packedfile_integer_u32(*index, &mut index) {
let mut sub_table = Table::new(definition);
sub_table.decode(&data, entry_count, index, return_incomplete)?;
Ok(DecodedData::SequenceU32(sub_table)) }
else { Err(ErrorKind::HelperDecodingEncodingError(format!("<p>Error trying to get the Entry Count of<i><b>Row {}, Cell {}</b></i>: the value is not a valid U32, or there are insufficient bytes left to decode it as an U32 value.</p>", row + 1, column + 1))) }
}
};
match decoded_cell {
Ok(data) => {
// If the field is a bitwise, split it into multiple fields. This is currently limited to integer types.
if field.get_is_bitwise() > 1 {
let data = match data {
DecodedData::I16(ref data) => *data as i64,
DecodedData::I32(ref data) => *data as i64,
DecodedData::I64(ref data) => *data,
_ => return Err(ErrorKind::Generic.into())
};
for bitwise_column in 0..field.get_is_bitwise() {
decoded_row.push(DecodedData::Boolean(data & (1 << bitwise_column) != 0));
}
}
// If the field has enum values, we turn it into a string. Same as before, only for integer types.
else if !field.get_enum_values().is_empty() {
let data = match data {
DecodedData::I16(ref data) => *data as i32,
DecodedData::I32(ref data) => *data,
DecodedData::I64(ref data) => *data as i32,
_ => return Err(ErrorKind::Generic.into())
};
match field.get_enum_values().get(&data) {
Some(data) => decoded_row.push(DecodedData::StringU8(data.to_owned())),
None => decoded_row.push(DecodedData::StringU8(data.to_string()))
}
}
else {
decoded_row.push(data);
}
},
Err(error) => if return_incomplete { return Err(ErrorKind::TableIncompleteError(format!("{}", error), serialize(self)?).into()) }
else { return Err(error.into()) }
}
}
self.entries.push(decoded_row);
}
Ok(())
}
/// This function encodes all the fields of a table to raw bytes.
fn encode(&self, mut packed_file: &mut Vec<u8>) -> Result<()> {
let fields = self.definition.get_ref_fields();
let fields_processed = self.definition.get_fields_processed();
for row in &self.entries {
// First, we need to make sure all rows we're going to encode are exactly what we expect.
if row.len() != fields_processed.len() { return Err(ErrorKind::TableRowWrongFieldCount(fields_processed.len() as u32, row.len() as u32).into()) }
let mut data_column = 0;
for field in fields {
if field.get_is_bitwise() > 1 {
let mut data: i64 = 0;
for bitwise_column in 0..field.get_is_bitwise() {
if let DecodedData::Boolean(boolean) = row[data_column] {
if boolean {
data |= 1 << bitwise_column;
}
}
else {
return Err(ErrorKind::TableWrongFieldType(format!("{}", row[data_column]), format!("{}", field.get_ref_field_type())).into())
}
data_column += 1;
}
// If there are no problems, encode the data.
match field.get_field_type() {
FieldType::I16 => packed_file.encode_integer_i16(data as i16),
FieldType::I32 => packed_file.encode_integer_i32(data as i32),
FieldType::I64 => packed_file.encode_integer_i64(data),
_ => return Err(ErrorKind::TableWrongFieldType(format!("{}", row[data_column]), format!("{}", field.get_ref_field_type())).into())
}
}
else {
match row[data_column] {
DecodedData::Boolean(data) => packed_file.encode_bool(data),
DecodedData::F32(data) => packed_file.encode_float_f32(data),
DecodedData::I16(data) => packed_file.encode_integer_i16(data),
DecodedData::I32(data) => packed_file.encode_integer_i32(data),
DecodedData::I64(data) => packed_file.encode_integer_i64(data),
DecodedData::StringU8(ref data) |
DecodedData::StringU16(ref data) |
DecodedData::OptionalStringU8(ref data) |
DecodedData::OptionalStringU16(ref data) => {
// If the field has enum values, try to match them. If the matching fails, try to just encode them.
// If that fails, put a default value on that cell.
let values = field.get_enum_values();
if !values.is_empty() {
let data = match values.iter().find(|(_, y)| y.to_lowercase() == data.to_lowercase()) {
Some((x, _)) => {
match field.get_field_type() {
FieldType::I16 => DecodedData::I16(*x as i16),
FieldType::I32 => DecodedData::I32(*x),
FieldType::I64 => DecodedData::I64(*x as i64),
_ => return Err(ErrorKind::TableWrongFieldType(format!("{}", row[data_column]), format!("{}", field.get_ref_field_type())).into())
}
}
None => match row[data_column].convert_between_types(field.get_ref_field_type()) {
Ok(data) => data,
Err(_) => DecodedData::default(field.get_ref_field_type())
}
};
// If there are no problems, encode the data.
match data {
DecodedData::I16(data) => packed_file.encode_integer_i16(data),
DecodedData::I32(data) => packed_file.encode_integer_i32(data),
DecodedData::I64(data) => packed_file.encode_integer_i64(data),
_ => return Err(ErrorKind::TableWrongFieldType(format!("{}", row[data_column]), format!("{}", field.get_ref_field_type())).into())
}
}
else {
// If there are no problems, encode the data.
match row[data_column] {
DecodedData::StringU8(ref data) => packed_file.encode_packedfile_string_u8(&Self::unescape_special_chars(&data)),
DecodedData::StringU16(ref data) => packed_file.encode_packedfile_string_u16(&Self::unescape_special_chars(&data)),
DecodedData::OptionalStringU8(ref data) => packed_file.encode_packedfile_optional_string_u8(&Self::unescape_special_chars(&data)),
DecodedData::OptionalStringU16(ref data) => packed_file.encode_packedfile_optional_string_u16(&Self::unescape_special_chars(&data)),
_ => return Err(ErrorKind::TableWrongFieldType(format!("{}", row[data_column]), format!("{}", field.get_ref_field_type())).into())
}
}
}
DecodedData::SequenceU16(ref data) => {
if let FieldType::SequenceU16(_) = fields[data_column].get_ref_field_type() {
packed_file.encode_integer_u16(data.entries.len() as u16);
data.encode(&mut packed_file)?;
}
},
DecodedData::SequenceU32(ref data) => {
if let FieldType::SequenceU32(_) = fields[data_column].get_ref_field_type() {
packed_file.encode_integer_u32(data.entries.len() as u32);
data.encode(&mut packed_file)?;
}
},
}
data_column += 1;
}
}
}
Ok(())
}
/// This function returns a new empty row for the provided definition.
pub fn get_new_row(definition: &Definition) -> Vec<DecodedData> {
definition.get_ref_fields().iter()
.map(|field|
match field.get_ref_field_type() {
FieldType::Boolean => {
if let Some(default_value) = field.get_default_value() {
if default_value.to_lowercase() == "true" {
vec![DecodedData::Boolean(true)]
} else {
vec![DecodedData::Boolean(false)]
}
} else {
vec![DecodedData::Boolean(false)]
}
}
FieldType::F32 => {
if let Some(default_value) = field.get_default_value() {
if let Ok(default_value) = default_value.parse::<f32>() {
vec![DecodedData::F32(default_value); 1]
} else {
vec![DecodedData::F32(0.0); 1]
}
} else {
vec![DecodedData::F32(0.0); 1]
}
},
FieldType::I16 => {
if field.get_is_bitwise() > 1 {
vec![DecodedData::Boolean(false); field.get_is_bitwise() as usize]
}
else {
if let Some(default_value) = field.get_default_value() {
if let Ok(default_value) = default_value.parse::<i16>() {
vec![DecodedData::I16(default_value); 1]
} else {
vec![DecodedData::I16(0); 1]
}
} else {
vec![DecodedData::I16(0); 1]
}
}
},
FieldType::I32 => {
if field.get_is_bitwise() > 1 {
vec![DecodedData::Boolean(false); field.get_is_bitwise() as usize]
}
else {
if let Some(default_value) = field.get_default_value() {
if let Ok(default_value) = default_value.parse::<i32>() {
vec![DecodedData::I32(default_value); 1]
} else {
vec![DecodedData::I32(0); 1]
}
} else {
vec![DecodedData::I32(0); 1]
}
}
},
FieldType::I64 => {
if field.get_is_bitwise() > 1 {
vec![DecodedData::Boolean(false); field.get_is_bitwise() as usize]
}
else {
if let Some(default_value) = field.get_default_value() {
if let Ok(default_value) = default_value.parse::<i64>() {
vec![DecodedData::I64(default_value); 1]
} else {
vec![DecodedData::I64(0); 1]
}
} else {
vec![DecodedData::I64(0); 1]
}
}
},
FieldType::StringU8 => {
if let Some(default_value) = field.get_default_value() {
vec![DecodedData::StringU8(default_value.to_owned()); 1]
} else {
vec![DecodedData::StringU8(String::new()); 1]
}
}
FieldType::StringU16 => {
if let Some(default_value) = field.get_default_value() {
vec![DecodedData::StringU16(default_value.to_owned()); 1]
} else {
vec![DecodedData::StringU16(String::new()); 1]
}
}
FieldType::OptionalStringU8 => {
if let Some(default_value) = field.get_default_value() {
vec![DecodedData::OptionalStringU8(default_value.to_owned()); 1]
} else {
vec![DecodedData::OptionalStringU8(String::new()); 1]
}
}
FieldType::OptionalStringU16 => {
if let Some(default_value) = field.get_default_value() {
vec![DecodedData::OptionalStringU16(default_value.to_owned()); 1]
} else {
vec![DecodedData::OptionalStringU16(String::new()); 1]
}
},
FieldType::SequenceU16(ref definition) => vec![DecodedData::SequenceU16(Table::new(&definition)); 1],
FieldType::SequenceU32(ref definition) => vec![DecodedData::SequenceU32(Table::new(&definition)); 1]
}
)
.flatten()
.collect()
}
//----------------------------------------------------------------//
// TSV Functions for PackedFiles.
//----------------------------------------------------------------//
/// This function imports a TSV file into a decoded table.
fn import_tsv(
definition: &Definition,
path: &PathBuf,
name: &str,
) -> Result<Self> {
// We want the reader to have no quotes, tab as delimiter and custom headers, because otherwise
// Excel, Libreoffice and all the programs that edit this kind of files break them on save.
let mut reader = ReaderBuilder::new()
.delimiter(b'\t')
.quoting(false)
.has_headers(false)
.flexible(true)
.from_path(&path)?;
// If we succesfully load the TSV file into a reader, check the first two lines to ensure
// it's a valid TSV for our specific table.
let mut entries = vec![];
for (row, record) in reader.records().enumerate() {
if let Ok(record) = record {
// The first line should contain the "table_folder_name"/"Loc PackedFile/PackFile List", and the version (1 for Locs).
// If it doesn't match with the name we provided, return an error.
if row == 0 {
if record.get(0).unwrap_or("error") != name { return Err(ErrorKind::ImportTSVWrongTypeTable.into()); }
if record.get(1).unwrap_or("-1").parse::<i32>().map_err(|_| Error::from(ErrorKind::ImportTSVInvalidVersion))? != definition.get_version() {
return Err(ErrorKind::ImportTSVWrongVersion.into());
}
}
// The second line contains the column headers. Is just to help people in other programs, so we skip it.
else if row == 1 { continue }
// Then read the rest of the rows as a normal TSV.
else if record.len() == definition.get_fields_processed().len() {
let mut entry = vec![];
for (column, field) in record.iter().enumerate() {
match definition.get_fields_processed()[column].get_ref_field_type() {
FieldType::Boolean => {
let value = field.to_lowercase();
if value == "true" || value == "1" { entry.push(DecodedData::Boolean(true)); }
else if value == "false" || value == "0" { entry.push(DecodedData::Boolean(false)); }
else { return Err(ErrorKind::ImportTSVIncorrectRow(row, column).into()); }
}
FieldType::F32 => entry.push(DecodedData::F32(field.parse::<f32>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::I16 => entry.push(DecodedData::I16(field.parse::<i16>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::I32 => entry.push(DecodedData::I32(field.parse::<i32>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::I64 => entry.push(DecodedData::I64(field.parse::<i64>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::StringU8 => entry.push(DecodedData::StringU8(field.to_owned())),
FieldType::StringU16 => entry.push(DecodedData::StringU16(field.to_owned())),
FieldType::OptionalStringU8 => entry.push(DecodedData::OptionalStringU8(field.to_owned())),
FieldType::OptionalStringU16 => entry.push(DecodedData::OptionalStringU16(field.to_owned())),
// For now fail on Sequences. These are a bit special and I don't know if the're even possible in TSV.
FieldType::SequenceU16(_) => return Err(ErrorKind::ImportTSVIncorrectRow(row, column).into()),
FieldType::SequenceU32(_) => return Err(ErrorKind::ImportTSVIncorrectRow(row, column).into())
}
}
entries.push(entry);
}
// If it fails here, return an error with the len of the record instead a field.
else { return Err(ErrorKind::ImportTSVIncorrectRow(row, record.len()).into()); }
}
else { return Err(ErrorKind::ImportTSVIncorrectRow(row, 0).into()); }
}
// If we reached this point without errors, we replace the old data with the new one and return success.
let mut table = Table::new(definition);
table.entries = entries;
Ok(table)
}
/// This function imports a TSV file into a new Table File.
fn import_tsv_to_binary_file(
schema: &Schema,
source_path: &PathBuf,
destination_path: &PathBuf,
) -> Result<()> {
// We want the reader to have no quotes, tab as delimiter and custom headers, because otherwise
// Excel, Libreoffice and all the programs that edit this kind of files break them on save.
let mut reader = ReaderBuilder::new()
.delimiter(b'\t')
.quoting(false)
.has_headers(true)
.flexible(true)
.from_path(&source_path)?;
// If we succesfully load the TSV file into a reader, check the first line to ensure it's a valid TSV file.
let table_type;
let table_version;
{
let headers = reader.headers()?;
table_type = if let Some(table_type) = headers.get(0) { table_type.to_owned() } else { return Err(ErrorKind::ImportTSVWrongTypeTable.into()) };
table_version = if let Some(table_version) = headers.get(1) { table_version.parse::<i32>().map_err(|_| Error::from(ErrorKind::ImportTSVInvalidVersion))? } else { return Err(ErrorKind::ImportTSVInvalidVersion.into()) };
}
// Get his definition depending on his first line's contents.
let definition = if table_type == loc::TSV_NAME_LOC { schema.get_ref_versioned_file_loc()?.get_version(table_version)?.clone() }
else { schema.get_ref_versioned_file_db(&table_type)?.get_version(table_version)?.clone() };
// Try to import the entries of the file.
let mut entries = vec![];
for (row, record) in reader.records().enumerate() {
if let Ok(record) = record {
// The second line contains the column headers. Is just to help people in other programs, not needed to be check.
if row == 0 { continue }
// Then read the rest of the rows as a normal TSV.
else if record.len() == definition.get_fields_processed().len() {
let mut entry = vec![];
for (column, field) in record.iter().enumerate() {
match definition.get_fields_processed()[column].get_ref_field_type() {
FieldType::Boolean => {
let value = field.to_lowercase();
if value == "true" || value == "1" { entry.push(DecodedData::Boolean(true)); }
else if value == "false" || value == "0" { entry.push(DecodedData::Boolean(false)); }
else { return Err(ErrorKind::ImportTSVIncorrectRow(row, column).into()); }
}
FieldType::F32 => entry.push(DecodedData::F32(field.parse::<f32>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::I16 => entry.push(DecodedData::I16(field.parse::<i16>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::I32 => entry.push(DecodedData::I32(field.parse::<i32>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::I64 => entry.push(DecodedData::I64(field.parse::<i64>().map_err(|_| Error::from(ErrorKind::ImportTSVIncorrectRow(row, column)))?)),
FieldType::StringU8 => entry.push(DecodedData::StringU8(field.to_owned())),
FieldType::StringU16 => entry.push(DecodedData::StringU16(field.to_owned())),
FieldType::OptionalStringU8 => entry.push(DecodedData::OptionalStringU8(field.to_owned())),
FieldType::OptionalStringU16 => entry.push(DecodedData::OptionalStringU16(field.to_owned())),
FieldType::SequenceU16(_) |
FieldType::SequenceU32(_) => return Err(ErrorKind::ImportTSVIncorrectRow(row, column).into())
}
}
entries.push(entry);
}
// If it fails here, return an error with the len of the record instead a field.
else { return Err(ErrorKind::ImportTSVIncorrectRow(row, record.len()).into()); }
}
else { return Err(ErrorKind::ImportTSVIncorrectRow(row, 0).into()); }
}
// If we reached this point without errors, we create the File in memory and add the entries to it.
let data = if table_type == loc::TSV_NAME_LOC {
let mut file = loc::Loc::new(&definition);
file.set_table_data(&entries)?;
file.save()
}
else {
let mut file = db::DB::new(&table_type, None, &definition);
file.set_table_data(&entries)?;
file.save()
}?;
// Then, we try to write it on disk. If there is an error, report it.
let mut file = BufWriter::new(File::create(&destination_path)?);
file.write_all(&data)?;
// If all worked, return success.
Ok(())
}
/// This function exports the provided data to a TSV file.
fn export_tsv(
&self,
path: &PathBuf,
table_name: &str,
) -> Result<()> {
// We want the writer to have no quotes, tab as delimiter and custom headers, because otherwise
// Excel, Libreoffice and all the programs that edit this kind of files break them on save.
let mut writer = WriterBuilder::new()
.delimiter(b'\t')
.quote_style(QuoteStyle::Never)
.has_headers(false)
.flexible(true)
.from_writer(vec![]);
// We serialize the info of the table (name and version) in the first line, and the column names in the second one.
writer.serialize((table_name, self.definition.get_version()))?;
writer.serialize(self.definition.get_ref_fields().iter().map(|x| x.get_name().to_owned()).collect::<Vec<String>>())?;
// Then we serialize each entry in the DB Table.
for entry in &self.entries { writer.serialize(&entry)?; }
// Then, we try to write it on disk. If there is an error, report it.
let mut file = File::create(&path)?;
file.write_all(String::from_utf8(writer.into_inner().unwrap())?.as_bytes())?;
Ok(())
}
/// This function exports the provided file to a TSV file..
fn export_tsv_from_binary_file(
schema: &Schema,
source_path: &PathBuf,
destination_path: &PathBuf
) -> Result<()> {
// We want the writer to have no quotes, tab as delimiter and custom headers, because otherwise
// Excel, Libreoffice and all the programs that edit this kind of files break them on save.
let mut writer = WriterBuilder::new()
.delimiter(b'\t')
.quote_style(QuoteStyle::Never)
.has_headers(false)
.flexible(true)
.from_path(destination_path)?;
// We don't know what type this file is, so we try to decode it as a Loc. If that fails, we try
// to decode it as a DB using the name of his parent folder. If that fails too, run before it explodes!
let mut file = BufReader::new(File::open(source_path)?);
let mut data = vec![];
file.read_to_end(&mut data)?;
let (table_type, version, entries) = if let Ok(data) = loc::Loc::read(&data, schema, false) {
(loc::TSV_NAME_LOC, data.get_definition().get_version(), data.get_table_data())
}
else {
let table_type = source_path.parent().unwrap().file_name().unwrap().to_str().unwrap();
if let Ok(data) = db::DB::read(&data, table_type, schema, false) { (table_type, data.get_definition().get_version(), data.get_table_data()) }
else { return Err(ErrorKind::ImportTSVWrongTypeTable.into()) }
};
let definition = if table_type == loc::TSV_NAME_LOC { schema.get_ref_versioned_file_loc()?.get_version(version)?.clone() }
else { schema.get_ref_versioned_file_db(&table_type)?.get_version(version)?.clone() };
// We serialize the info of the table (name and version) in the first line, and the column names in the second one.
writer.serialize((&table_type, version))?;
writer.serialize(definition.get_ref_fields().iter().map(|x| x.get_name().to_owned()).collect::<Vec<String>>())?;
// Then we serialize each entry in the DB Table.
for entry in entries { writer.serialize(&entry)?; }
writer.flush().map_err(From::from)
}
/// This function escapes certain characters of the provided string.
fn escape_special_chars(data: &str)-> String {
let mut output = Vec::with_capacity(data.len() + 10);
for c in data.as_bytes() {
match c {
b'\n' => output.extend_from_slice(b"\\\\n"),
b'\t' => output.extend_from_slice(b"\\\\t"),
_ => output.push(*c),
}
}
unsafe { String::from_utf8_unchecked(output) }
}
/// This function unescapes certain characters of the provided string.
fn unescape_special_chars(data: &str)-> String {
data.replace("\\\\t", "\t").replace("\\\\n", "\n")
}
}
/// Implementation of `From<&RawTable>` for `Table`.
impl From<&RawTable> for Table {
fn from(raw_table: &RawTable) -> Self {
if let Some(ref raw_definition) = raw_table.definition {
let mut table = Self::new(&From::from(raw_definition));
for row in &raw_table.rows {
let mut entry = vec![];
// Some games (Thrones, Attila, Rome 2 and Shogun 2) may have missing fields when said field is empty.
// To compensate it, if we don't find a field from the definition in the table, we add it empty.
for field_def in table.definition.get_ref_fields() {
let mut exists = false;
for field in &row.fields {
if field_def.get_name() == field.field_name {
exists = true;
entry.push(match field_def.get_ref_field_type() {
FieldType::Boolean => DecodedData::Boolean(field.field_data == "true" || field.field_data == "1"),
FieldType::F32 => DecodedData::F32(if let Ok(data) = field.field_data.parse::<f32>() { data } else { 0.0 }),
FieldType::I16 => DecodedData::I16(if let Ok(data) = field.field_data.parse::<i16>() { data } else { 0 }),
FieldType::I32 => DecodedData::I32(if let Ok(data) = field.field_data.parse::<i32>() { data } else { 0 }),
FieldType::I64 => DecodedData::I64(if let Ok(data) = field.field_data.parse::<i64>() { data } else { 0 }),
FieldType::StringU8 => DecodedData::StringU8(if field.field_data == "Frodo Best Waifu" { String::new() } else { field.field_data.to_string() }),
FieldType::StringU16 => DecodedData::StringU16(if field.field_data == "Frodo Best Waifu" { String::new() } else { field.field_data.to_string() }),
FieldType::OptionalStringU8 => DecodedData::OptionalStringU8(if field.field_data == "Frodo Best Waifu" { String::new() } else { field.field_data.to_string() }),
FieldType::OptionalStringU16 => DecodedData::OptionalStringU16(if field.field_data == "Frodo Best Waifu" { String::new() } else { field.field_data.to_string() }),
// This type is not used in the raw tables so, if we find it, we skip it.
FieldType::SequenceU16(_) | FieldType::SequenceU32(_) => continue,
});
break;
}
}
// If the field doesn't exist, we create it empty.
if !exists {
entry.push(DecodedData::OptionalStringU8(String::new()));
}
}
table.entries.push(entry);
}
table
}
else {
Self::new(&Definition::new(-1))
}
}
} | /// This function returns a copy of the entries of this Table.
pub fn get_table_data(&self) -> Vec<Vec<DecodedData>> { |
knnclassifier_iris.go | package main
import (
"fmt"
"github.com/sjwhitworth/golearn/base"
"github.com/sjwhitworth/golearn/evaluation"
"github.com/sjwhitworth/golearn/knn"
)
func | () {
rawData, err := base.ParseCSVToInstances("../datasets/iris_headers.csv", true)
if err != nil {
panic(err)
}
//Initialises a new KNN classifier
cls := knn.NewKnnClassifier("euclidean", 2)
//Do a training-test split
trainData, testData := base.InstancesTrainTestSplit(rawData, 0.50)
cls.Fit(trainData)
//Calculates the Euclidean distance and returns the most popular label
predictions := cls.Predict(testData)
fmt.Println(predictions)
// Prints precision/recall metrics
confusionMat := evaluation.GetConfusionMatrix(testData, predictions)
fmt.Println(evaluation.GetSummary(confusionMat))
}
| main |
metrics.py | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.monasca import utils as monascautils
from rally.task import validation
"""Scenarios for monasca Metrics API."""
@validation.add("required_services",
services=[consts.Service.MONASCA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="MonascaMetrics.list_metrics")
class | (monascautils.MonascaScenario):
def run(self, **kwargs):
"""Fetch user's metrics.
:param kwargs: optional arguments for list query:
name, dimensions, start_time, etc
"""
self._list_metrics(**kwargs)
| ListMetrics |
lib.rs | #![deny(missing_docs, missing_debug_implementations, warnings)]
#![doc(html_root_url = "https://docs.rs/tokio-io/0.1.12")]
//! Core I/O traits and combinators when working with Tokio.
//!
//! A description of the high-level I/O combinators can be [found online] in
//! addition to a description of the [low level details].
//!
//! [found online]: https://tokio.rs/docs/getting-started/core/
//! [low level details]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/
#[macro_use]
extern crate log;
#[macro_use]
extern crate futures;
extern crate bytes;
use std::io as std_io;
use futures::{Future, Stream};
/// A convenience typedef around a `Future` whose error component is `io::Error`
pub type IoFuture<T> = Box<dyn Future<Item = T, Error = std_io::Error> + Send>;
/// A convenience typedef around a `Stream` whose error component is `io::Error`
pub type IoStream<T> = Box<dyn Stream<Item = T, Error = std_io::Error> + Send>;
/// A convenience macro for working with `io::Result<T>` from the `Read` and
/// `Write` traits.
///
/// This macro takes `io::Result<T>` as input, and returns `T` as the output. If
/// the input type is of the `Err` variant, then `Poll::NotReady` is returned if
/// it indicates `WouldBlock` or otherwise `Err` is returned.
#[macro_export]
macro_rules! try_nb {
($e:expr) => {
match $e {
Ok(t) => t,
Err(ref e) if e.kind() == ::std::io::ErrorKind::WouldBlock => {
return Ok(::futures::Async::NotReady);
}
Err(e) => return Err(e.into()),
}
};
}
pub mod codec;
pub mod io;
pub mod _tokio_codec;
mod allow_std;
mod async_read;
mod async_write;
mod framed;
mod framed_read;
mod framed_write;
mod length_delimited;
mod lines;
mod split;
mod window;
pub use self::async_read::AsyncRead;
pub use self::async_write::AsyncWrite;
fn _assert_objects() {
fn | <T>() {}
_assert::<Box<dyn AsyncRead>>();
_assert::<Box<dyn AsyncWrite>>();
}
| _assert |
TableList_20200509173612.js | import React from "react"; | // @material-ui/core components
import { makeStyles } from "@material-ui/core/styles";
// core components
import GridItem from "components/Grid/GridItem.js";
import GridContainer from "components/Grid/GridContainer.js";
import Table from "components/Table/Table.js";
import Card from "components/Card/Card.js";
import CardHeader from "components/Card/CardHeader.js";
import CardBody from "components/Card/CardBody.js";
import Button from "@material-ui/core/Button";
import Modal from '@material-ui/core/Modal';
const styles = {
cardCategoryWhite: {
"&,& a,& a:hover,& a:focus": {
color: "rgba(255,255,255,.62)",
margin: "0",
fontSize: "14px",
marginTop: "0",
marginBottom: "0"
},
"& a,& a:hover,& a:focus": {
color: "#FFFFFF"
}
},
cardTitleWhite: {
color: "#FFFFFF",
marginTop: "0px",
minHeight: "auto",
fontWeight: "300",
fontFamily: "'Roboto', 'Helvetica', 'Arial', sans-serif",
marginBottom: "3px",
textDecoration: "none",
"& small": {
color: "#777",
fontSize: "65%",
fontWeight: "400",
lineHeight: "1"
}
}
};
const useStyles = makeStyles(styles);
export default function TableList() {
const classes = useStyles();
return (
<GridContainer>
<GridItem xs={12} sm={12} md={12}>
<Card>
<CardHeader color="primary">
<h4 className={classes.cardTitleWhite}>Simple Table</h4>
<p className={classes.cardCategoryWhite}>
Here is a subtitle for this table
</p>
</CardHeader>
<CardBody>
<Button variant="contained" color="secondary">
Add Movie
</Button>
<Table
tableHeaderColor="primary"
tableHead={["Name", "Country", "City", "Salary"]}
tableData={[
["Dakota Rice", "Niger", "Oud-Turnhout", "$36,738"],
["Minerva Hooper", "Curaçao", "Sinaai-Waas", "$23,789"],
["Sage Rodriguez", "Netherlands", "Baileux", "$56,142"],
["Philip Chaney", "Korea, South", "Overland Park", "$38,735"],
["Doris Greene", "Malawi", "Feldkirchen in Kärnten", "$63,542"],
["Mason Porter", "Chile", "Gloucester", "$78,615"]
]}
/>
</CardBody>
</Card>
</GridItem>
</GridContainer>
);
} | |
package_policies_to_agent_inputs.test.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { PackagePolicy, PackagePolicyInput } from '../types';
import { storedPackagePoliciesToAgentInputs } from './package_policies_to_agent_inputs';
describe('Fleet - storedPackagePoliciesToAgentInputs', () => {
const mockPackagePolicy: PackagePolicy = {
id: 'some-uuid',
name: 'mock-package-policy',
description: '',
created_at: '',
created_by: '',
updated_at: '',
updated_by: '',
policy_id: '',
enabled: true,
output_id: '',
namespace: 'default',
inputs: [],
revision: 1,
};
const mockInput: PackagePolicyInput = {
type: 'test-logs',
enabled: true,
vars: {
inputVar: { value: 'input-value' },
inputVar2: { value: undefined },
inputVar3: {
type: 'yaml',
value: 'testField: test',
},
inputVar4: { value: '' },
},
streams: [
{
id: 'test-logs-foo',
enabled: true,
data_stream: { dataset: 'foo', type: 'logs' },
vars: {
fooVar: { value: 'foo-value' },
fooVar2: { value: [1, 2] },
},
compiled_stream: {
fooKey: 'fooValue1',
fooKey2: ['fooValue2'],
},
},
{
id: 'test-logs-bar',
enabled: true,
data_stream: { dataset: 'bar', type: 'logs' },
vars: {
barVar: { value: 'bar-value' },
barVar2: { value: [1, 2] },
barVar3: {
type: 'yaml',
value:
'- namespace: mockNamespace\n #disabledProp: ["test"]\n anotherProp: test\n- namespace: mockNamespace2\n #disabledProp: ["test2"]\n anotherProp: test2',
},
barVar4: {
type: 'yaml',
value: '',
},
barVar5: {
type: 'yaml',
value: 'testField: test\n invalidSpacing: foo',
},
},
},
],
};
it('returns no inputs for package policy with no inputs, or only disabled inputs', () => {
expect(storedPackagePoliciesToAgentInputs([mockPackagePolicy])).toEqual([]);
expect(
storedPackagePoliciesToAgentInputs([
{
...mockPackagePolicy,
package: {
name: 'mock-package',
title: 'Mock package',
version: '0.0.0',
},
},
])
).toEqual([]);
expect(
storedPackagePoliciesToAgentInputs([
{
...mockPackagePolicy,
inputs: [{ ...mockInput, enabled: false }],
},
])
).toEqual([]);
});
it('returns agent inputs', () => {
expect(
storedPackagePoliciesToAgentInputs([
{
...mockPackagePolicy,
package: {
name: 'mock-package',
title: 'Mock package',
version: '0.0.0',
},
inputs: [mockInput],
},
])
).toEqual([
{
id: 'some-uuid',
name: 'mock-package-policy',
revision: 1,
type: 'test-logs',
data_stream: { namespace: 'default' },
use_output: 'default',
meta: {
package: {
name: 'mock-package',
version: '0.0.0',
},
},
streams: [
{
id: 'test-logs-foo',
data_stream: { dataset: 'foo', type: 'logs' },
fooKey: 'fooValue1',
fooKey2: ['fooValue2'],
}, | },
],
},
]);
});
it('returns agent inputs without disabled streams', () => {
expect(
storedPackagePoliciesToAgentInputs([
{
...mockPackagePolicy,
inputs: [
{
...mockInput,
streams: [{ ...mockInput.streams[0] }, { ...mockInput.streams[1], enabled: false }],
},
],
},
])
).toEqual([
{
id: 'some-uuid',
name: 'mock-package-policy',
revision: 1,
type: 'test-logs',
data_stream: { namespace: 'default' },
use_output: 'default',
streams: [
{
id: 'test-logs-foo',
data_stream: { dataset: 'foo', type: 'logs' },
fooKey: 'fooValue1',
fooKey2: ['fooValue2'],
},
],
},
]);
});
}); | {
id: 'test-logs-bar',
data_stream: { dataset: 'bar', type: 'logs' }, |
generateConfig.d.ts | export interface HttpTriggerConfig {
authType: string;
methods: string[];
name?: string;
qualifier?: string;
}
export interface IOssTriggerConfig {
bucketName: string;
events: string[];
filter: {
prefix: string;
suffix: string;
};
}
export declare function instanceOfHttpTriggerConfig(data: any): data is HttpTriggerConfig;
export declare function instanceOfIOssTriggerConfig(data: any): data is IOssTriggerConfig;
export declare function instanceOfISchedulerTriggerConfig(data: any): data is IOssTriggerConfig;
export default class | {
static generateConfig(inputs: any, command?: string): Promise<any>;
static getCustomDomain(inputs: any, region: any, serviceName: any): Promise<{
customDomain: any;
domainName: any;
}>;
static execIndexjs(codeUri: any): Promise<void>;
static getTriggers({ scodeUri, codeUri, http, routerItem }: {
scodeUri: any;
codeUri: any;
http: any;
routerItem: any;
}): Promise<{
name: string;
type: string;
config: any;
qualifier?: undefined;
} | {
name: any;
type: string;
qualifier: any;
config: Pick<any, "authType" | "methods">;
}>;
}
| GenerateConfig |
RoomUserInfosObj.py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class | (object):
def __init__(self, pageNumber=None, pageSize=None, totalElements=None, totalPages=None, content=None):
"""
:param pageNumber: (Optional) 当前页码
:param pageSize: (Optional) 每页数量
:param totalElements: (Optional) 查询总数
:param totalPages: (Optional) 总页数
:param content: (Optional) 分页内容
"""
self.pageNumber = pageNumber
self.pageSize = pageSize
self.totalElements = totalElements
self.totalPages = totalPages
self.content = content
| RoomUserInfosObj |
buidler-params.ts | import { BuidlerParamDefinitions } from "../../../types";
import * as types from "./argumentTypes";
export const BUIDLER_PARAM_DEFINITIONS: BuidlerParamDefinitions = {
network: {
name: "network",
defaultValue: undefined,
description: "The network to connect to.",
type: types.string,
isOptional: true,
isFlag: false,
isVariadic: false,
},
showStackTraces: {
name: "showStackTraces",
defaultValue: false,
description: "Show stack traces.",
type: types.boolean,
isFlag: true,
isOptional: true,
isVariadic: false,
},
version: {
name: "version",
defaultValue: false,
description: "Shows buidler's version.",
type: types.boolean,
isFlag: true,
isOptional: true,
isVariadic: false,
},
help: {
name: "help",
defaultValue: false,
description: "Shows this message, or a task's help if its name is provided",
type: types.boolean,
isFlag: true,
isOptional: true,
isVariadic: false,
},
emoji: {
name: "emoji",
defaultValue: process.platform === "darwin",
description: "Use emoji in messages.",
type: types.boolean,
isFlag: true,
isOptional: true,
isVariadic: false,
},
config: {
name: "config",
defaultValue: undefined,
description: "A Buidler config file.",
type: types.inputFile,
isFlag: false,
isOptional: true,
isVariadic: false, | description: "Enables Buidler verbose logging",
type: types.boolean,
isFlag: true,
isOptional: true,
isVariadic: false,
},
maxMemory: {
name: "maxMemory",
defaultValue: undefined,
description: "The maximum amount of memory that Buidler can use.",
type: types.int,
isOptional: true,
isFlag: false,
isVariadic: false,
},
}; | },
verbose: {
name: "verbose",
defaultValue: false, |
document.py | """
Object model representation of a document represented as a collection
of XML files in METS/MODS format.
"""
from defoe.fmp.page import Page
from lxml import etree
import re
class Document(object):
"""
Object model representation of a document represented as a
collection of XML files in METS/MODS format.
"""
def __init__(self, code, archive):
"""
Constructor
:param code: identifier for this document within an archive
:type code: str or unicode
:param archive: archive to which this document belongs
:type archive: defoe.alto.archive.Archive
"""
self.namespaces = {
"mods": "http://www.loc.gov/mods/v3",
"mets": "http://www.loc.gov/METS/",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"premis": "info:lc/xmlns/premis-v2",
"dcterms": "http://purl.org/dc/terms/",
"fits": "http://hul.harvard.edu/ois/xml/ns/fits/fits_output",
"xlink": "http://www.w3.org/1999/xlink",
}
self.archive = archive
self.code = code
self.num_pages = 0
self.metadata = self.archive.open_document(self.code)
self.metadata_tree = etree.parse(self.metadata)
self.title = self.single_query("//mods:title/text()")
self.page_codes = sorted(
self.archive.document_codes[self.code], key=Document.sorter
)
self.num_pages = len(self.page_codes)
self.years = Document.parse_year(self.single_query("//mods:dateIssued/text()"))
self.publisher = self.single_query("//mods:publisher/text()")
self.place = self.single_query("//mods:placeTerm/text()")
# place may often have a year in.
self.years += Document.parse_year(self.place)
self.years = sorted(self.years)
self.documentId = self.single_query("//mods:identifier/text()")
if self.years:
self.year = self.years[0]
else:
self.year = None
self.date = self.single_query("//mods:dateIssued/text()")
self.document_type = "newspaper"
self.model = "fmp"
#### New ############
# [art0001, art0002, art0003]
self.articlesId = self.parse_structMap_Logical()
# {'#art0001':['#pa0001001', '#pa0001002', '#pa0001003', '#pa0001004', '#pa0001005', '#pa0001006', '#pa0001007'], '#art0002': ['#pa0001008', '#pa0001009' ..]}
# {'pa0001001': 'page1 area1', 'pa0001003': 'page1 area3'}
self.articlesParts, self.partsPage = self.parse_structLink()
# {'pa0001001': ['RECT', '1220,5,2893,221'], 'pa0001003': ['RECT', '2934,14,3709,211'], 'pa0004044': ['RECT', '5334,2088,5584,2121']}
self.partsCoord = self.parse_structMap_Physical()
self.num_articles = len(self.articlesId)
#######################
@staticmethod
def parse_year(text):
"""
Parse text to extract years of form 16xx to 19xx.
Any date of form NN following a year of form CCYY to CCYY
is used to derive a date CCNN.
As an exception to this rule, single years are parsed
from dates precisely matching the format YYYY-MM-DD.
For example:
* "1862, [1861]" returns [1861, 1862]
* "1847 [1846, 47]" returns [1846, 1847]
* "1873-80" returns [1873, 1880]
* "1870-09-01" returns [1870]
:param text: text to parse
:type text: str or unicode
:return: years
:rtype: set(int)
"""
try:
date_pattern = re.compile(
"(1[6-9]\d{2}(-|/)(0[1-9]|1[0-2])(-|/)(0[1-9]|[12]\d|3[01]))"
)
if date_pattern.match(text):
return [int(text[0:4])]
long_pattern = re.compile("(1[6-9]\d\d)")
short_pattern = re.compile("\d\d")
results = []
chunks = iter(long_pattern.split(text)[1:])
for year, rest in zip(chunks, chunks):
results.append(int(year))
century = year[0:2]
short_years = short_pattern.findall(rest)
for short_year in short_years:
results.append(int(century + short_year))
return sorted(set(results))
except TypeError:
return []
@staticmethod
def sorter(page_code):
"""
Given a page code of form [0-9]*(_[0-9]*), split this
into the sub-codes. For example, given 123_456, return
[123, 456]
:param page_code: page code
:type page_code: str or unicode
:return: list of page codes
:rtype: list(int)
"""
codes = list(map(int, page_code.split("_")))
return codes
def query(self, query):
"""
Run XPath query.
:param query: XPath query
:type query: str or unicode
:return: list of query results or None if none
:rtype: list(lxml.etree.<MODULE>) (depends on query)
"""
return self.metadata_tree.xpath(query, namespaces=self.namespaces)
def single_query(self, query):
"""
Run XPath query and return first result.
:param query: XPath query
:type query: str or unicode
:return: query result or None if none
:rtype: str or unicode
"""
result = self.query(query)
if not result:
return None
return str(result[0])
def page(self, code):
"""
Given a page code, return a new Page object.
:param code: page code
:type code: str or unicode
:return: Page object
:rtype: defoe.alto.page.Page
"""
return Page(self, code)
def get_document_info(self):
"""
Gets information from ZIP file about metadata file
corresponding to this document.
:return: information
:rtype: zipfile.ZipInfo
"""
return self.archive.get_document_info(self.code)
def get_page_info(self, page_code):
"""
Gets information from ZIP file about a page file within
this document.
:param page_code: file code
:type page_code: str or unicode
:return: information
:rtype: zipfile.ZipInfo
"""
return self.archive.get_page_info(self.code, page_code)
def __getitem__(self, index):
"""
Given a page index, return a new Page object.
:param index: page index
:type index: int
:return: Page object
:rtype: defoe.alto.page.Page
"""
return self.page(self.page_codes[index])
def __iter__(self):
"""
Iterate over page codes, returning new Page objects.
:return: Page object
:rtype: defoe.alto.page.Page
"""
for page_code in self.page_codes:
yield self.page(page_code)
def scan_strings(self):
"""
Iterate over strings in pages.
:return: page and string
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for string in page.strings:
yield page, string
def scan_tb(self):
"""
Iterate over textblocks in pages
:return: page and textblock
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for tb in page.tb:
yield page, tb
def scan_words(self):
"""
Iterate over words in pages.
:return: page and word
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for word in page.words:
yield page, word
def scan_wc(self):
"""
Iterate over words cualities in pages.
:return: page and wc
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for wc in page.wc:
yield page, wc
@property
def articles(self):
"""
Iterate calculates the articles in each page.
:return: a dictionary per page with all the articles. Each articles is conformed by one or more textblocks
:rtype: dictionary of articles. Each
{'art0001': ['pa0001001': ['RECT', '1220,5,2893,221', 'page1 area1'], 'pa0001003': ['RECT', '2934,14,3709,211', page1 area3], ...]], ...}
"""
self.document_articles = {}
articlesInfo = self.articles_info()
for page in self:
for tb in page.tb:
for articleId in articlesInfo:
for partId in articlesInfo[articleId]:
if partId == tb.textblock_id:
if articleId not in self.document_articles:
self.document_articles[articleId] = []
tb.textblock_shape = articlesInfo[articleId][partId][0]
tb.textblock_coords = articlesInfo[articleId][partId][1]
tb.textblock_page_area = articlesInfo[articleId][partId][2]
self.document_articles[articleId].append(tb)
return self.document_articles
def scan_cc(self):
|
def scan_images(self):
"""
Iterate over images in pages.
:return: page and XML fragment with image
:rtype: tuple(defoe.alto.page.Page, lxml.etree._Element)
"""
for page in self:
for image in page.images:
yield page, image
def strings(self):
"""
Iterate over strings.
:return: string
:rtype: str or unicode
"""
for _, string in self.scan_strings():
yield string
def tb(self):
"""
Iterate over strings.
:return: string
:rtype: str or unicode
"""
for _, tb in self.scan_tb():
yield tb
def words(self):
"""
Iterate over strings.
:return: word
:rtype: str or unicode
"""
for _, word in self.scan_words():
yield word
def images(self):
"""
Iterate over images.
:return: XML fragment with image
:rtype: lxml.etree._Element
"""
for _, image in self.scan_images():
yield image
def wc(self):
"""
Iterate over words cualities.
:return: wc
:rtype: str or unicode
"""
for _, wc in self.scan_wc():
yield wc
def cc(self):
"""
Iterate over characters cualities.
:return: wc
:rtype: str or unicode
"""
for _, cc in self.scan_cc():
yield cc
def parse_structMap_Physical(self):
"""
Parse the structMap Physical information
:return: dictionary with the ID of each part as a keyword. For each part, it gets the shape and coord.
:rtype: dictionary
{'pa0001001': ['RECT', '1220,5,2893,221'], 'pa0001003': ['RECT', '2934,14,3709,211'], 'pa0004044': ['RECT', '5334,2088,5584,2121']}
"""
partsCoord = dict()
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="PHYSICAL"]', self.namespaces
)
for physic in elem:
parts = physic.findall('mets:div[@TYPE="page"]', self.namespaces)
for part in parts:
metadata_parts = part.findall("mets:div", self.namespaces)
for metadata in metadata_parts:
fptr = metadata.find("mets:fptr", self.namespaces)
for fp in fptr:
partsCoord[list(metadata.values())[0]] = [
list(fp.values())[1],
list(fp.values())[2],
]
return partsCoord
def parse_structMap_Logical(self):
"""
Parse the structMap Logical information
:return: list of articlesID that conforms each document/issue. It only returns the articles ID, no other type of elements.
:rtype: list
[art0001, art0002, art0003]
"""
articlesId = []
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="LOGICAL"]', self.namespaces
)
for logic in elem:
articles = logic.findall('mets:div[@TYPE="ARTICLE"]', self.namespaces)
for article in articles:
articlesId.append(list(article.values())[0])
return articlesId
def parse_structLink(self):
"""
Parse the strucLink information
:return: 1) A dictionary with articles IDs as keys. And per article ID, we have a list of parts/textblokcs ids that conform each article.
2) A dictionary with parts/textblocks ids as keys, and page and area as values.
:rtype: two dictionaries
{'#art0001':['#pa0001001', '#pa0001002', '#pa0001003', '#pa0001004', '#pa0001005', '#pa0001006', '#pa0001007'], '#art0002': ['#pa0001008', '#pa0001009' ..]}
{'pa0001001': 'page1 area1', 'pa0001003': 'page1 area3'}
"""
articlesId = []
articlesParts = dict()
partsPage = dict()
elem = self.metadata_tree.findall("mets:structLink", self.namespaces)
for smlinkgrp in elem:
parts = smlinkgrp.findall("mets:smLinkGrp", self.namespaces)
for linklocator in smlinkgrp:
linkl = linklocator.findall("mets:smLocatorLink", self.namespaces)
article_parts = []
for link in linkl:
idstring = list(link.values())[0]
partId = re.sub("[^A-Za-z0-9]+", "", idstring)
article_parts.append(partId)
partsPage[partId] = list(link.values())[1]
articlesParts[article_parts[0]] = article_parts[1:]
return articlesParts, partsPage
def articles_info(self):
"""
:return: create a dicitionary, with articles IDs as keys. Each entry has has a dictionary of parts/textblocks as values, with all the parts information (shape, coords and page_area).
:rtype: dictionary
#{'art0001 {'pa0001001': ['RECT', '1220,5,2893,221', 'page1 area1'], 'pa0001003': ['RECT', '2934,14,3709,211', 'page1 area3'], ....}}
"""
articlesId = []
articlesInfo = dict()
for a_id in self.articlesId:
articlesInfo[a_id] = dict()
for p_id in self.articlesParts[a_id]:
if p_id in self.partsCoord:
self.partsCoord[p_id].append(self.partsPage[p_id])
articlesInfo[a_id][p_id] = self.partsCoord[p_id]
return articlesInfo
| """
Iterate over characters cualities in pages.
:return: page and cc
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for cc in page.cc:
yield page, cc |
macro_choice.rs | // Test for Macro, exact same as usecase
use mpstthree::binary::struct_trait::{end::End, recv::Recv, send::Send, session::Session};
use mpstthree::functionmpst::close::close_mpst;
use mpstthree::functionmpst::fork::fork_mpst;
use mpstthree::meshedchannels::MeshedChannels;
use mpstthree::role::end::RoleEnd;
use mpstthree::role::Role;
use std::error::Error;
use mpstthree::functionmpst::ChooseMpst;
use mpstthree::functionmpst::OfferMpst;
use rand::{thread_rng, Rng};
use mpstthree::{
create_broadcast_role, create_choose_both_from_3_to_1_and_2, create_multiple_normal_role,
create_offer_mpst_session_2, create_recv_mpst_session_1, create_recv_mpst_session_2,
create_send_mpst_session_1, create_send_mpst_session_2,
};
// Create new roles
// normal
create_multiple_normal_role!(
RoleA, RoleADual |
RoleB, RoleBDual |
RoleC, RoleCDual |
);
// broadcast
create_broadcast_role!(RoleAlltoC, RoleCtoAll);
// Create new send functions
create_send_mpst_session_1!(send_mpst_c_to_a, RoleA, RoleC);
create_send_mpst_session_2!(send_mpst_a_to_c, RoleC, RoleA);
create_send_mpst_session_2!(send_mpst_c_to_b, RoleB, RoleC);
create_send_mpst_session_1!(send_mpst_b_to_a, RoleA, RoleB);
create_send_mpst_session_1!(send_mpst_a_to_b, RoleB, RoleA);
// Create new recv functions and related types
// normal
create_recv_mpst_session_1!(recv_mpst_c_from_a, RoleA, RoleC);
create_recv_mpst_session_2!(recv_mpst_a_from_c, RoleC, RoleA);
create_recv_mpst_session_2!(recv_mpst_b_from_c, RoleC, RoleB);
create_recv_mpst_session_1!(recv_mpst_b_from_a, RoleA, RoleB);
create_recv_mpst_session_1!(recv_mpst_a_from_b, RoleB, RoleA);
// Create the offer functions
create_offer_mpst_session_2!(offer_mpst_session_b_to_c, RoleAlltoC, RoleB);
create_offer_mpst_session_2!(offer_mpst_session_a_to_c, RoleAlltoC, RoleA);
// Create the choose functions
create_choose_both_from_3_to_1_and_2!(
choose_right_mpst_session_c_to_all,
choose_left_mpst_session_c_to_all,
RoleADual,
RoleBDual,
RoleCtoAll,
RoleC
);
// Types
type AtoCClose = End;
type AtoBClose = End;
type AtoCVideo<N> = Recv<N, Send<N, End>>;
type AtoBVideo<N> = Send<N, Recv<N, End>>;
type BtoAClose = <AtoBClose as Session>::Dual;
type BtoCClose = End;
type BtoAVideo<N> = <AtoBVideo<N> as Session>::Dual;
type CtoBClose = <BtoCClose as Session>::Dual;
type CtoAClose = <AtoCClose as Session>::Dual;
type CtoAVideo<N> = <AtoCVideo<N> as Session>::Dual;
// Stacks
type StackAEnd = RoleEnd;
type StackAEndDual = <StackAEnd as Role>::Dual;
type StackAVideo = RoleC<RoleB<RoleB<RoleC<RoleEnd>>>>;
type StackAVideoDual = <StackAVideo as Role>::Dual;
type StackAFull = RoleC<RoleC<RoleAlltoC<RoleEnd, RoleEnd>>>;
type StackBEnd = RoleEnd;
type StackBEndDual = <StackBEnd as Role>::Dual;
type StackBVideo = RoleA<RoleA<RoleEnd>>;
type StackBVideoDual = <StackBVideo as Role>::Dual;
type StackBFull = RoleAlltoC<RoleEnd, RoleEnd>;
type StackCEnd = RoleEnd;
type StackCVideo = RoleA<RoleA<RoleEnd>>;
type StackCChoice = RoleCtoAll<StackCVideo, StackCEnd>;
type StackCFull = RoleA<RoleA<StackCChoice>>;
// Creating the MP sessions
// For C
type ChooseCtoA<N> = ChooseMpst<
BtoAVideo<N>,
CtoAVideo<N>,
BtoAClose,
CtoAClose,
StackAVideoDual,
StackAEnd,
RoleADual<RoleEnd>,
>;
type ChooseCtoB<N> = ChooseMpst<
AtoBVideo<N>,
CtoBClose,
AtoBClose,
CtoBClose,
StackBVideoDual,
StackBEnd,
RoleBDual<RoleEnd>,
>;
type InitC<N> = Send<N, Recv<N, ChooseCtoA<N>>>;
type EndpointCFull<N> = MeshedChannels<InitC<N>, ChooseCtoB<N>, StackCFull, RoleC<RoleEnd>>;
// For A
type EndpointAVideo<N> = MeshedChannels<AtoBVideo<N>, AtoCVideo<N>, StackAVideo, RoleA<RoleEnd>>;
type OfferA<N> = OfferMpst<
AtoBVideo<N>,
AtoCVideo<N>,
AtoBClose,
AtoCClose,
StackAVideo,
StackAEnd,
RoleA<RoleEnd>,
>;
type InitA<N> = Recv<N, Send<N, OfferA<N>>>;
type EndpointAFull<N> = MeshedChannels<End, InitA<N>, StackAFull, RoleA<RoleEnd>>;
// For B
type EndpointBVideo<N> = MeshedChannels<BtoAVideo<N>, BtoCClose, StackBVideo, RoleB<RoleEnd>>;
type OfferB<N> = OfferMpst<
BtoAVideo<N>,
BtoCClose,
BtoAClose,
BtoCClose,
StackBVideo,
StackBEnd,
RoleB<RoleEnd>,
>;
type EndpointBFull<N> = MeshedChannels<End, OfferB<N>, StackBFull, RoleB<RoleEnd>>;
// Functions related to endpoints
fn server(s: EndpointBFull<i32>) -> Result<(), Box<dyn Error>> {
offer_mpst_session_b_to_c(
s,
|s: EndpointBVideo<i32>| {
let (request, s) = recv_mpst_b_from_a(s)?;
let s = send_mpst_b_to_a(request + 1, s);
close_mpst(s)
},
close_mpst,
)
}
fn authenticator(s: EndpointAFull<i32>) -> Result<(), Box<dyn Error>> {
let (id, s) = recv_mpst_a_from_c(s)?;
let s = send_mpst_a_to_c(id + 1, s);
offer_mpst_session_a_to_c(
s,
|s: EndpointAVideo<i32>| {
let (request, s) = recv_mpst_a_from_c(s)?;
let s = send_mpst_a_to_b(request + 1, s);
let (video, s) = recv_mpst_a_from_b(s)?;
let s = send_mpst_a_to_c(video + 1, s);
assert_eq!(request, id + 1);
assert_eq!(video, id + 3);
close_mpst(s)
},
close_mpst,
)
}
fn client_video(s: EndpointCFull<i32>) -> Result<(), Box<dyn Error>> {
let mut rng = thread_rng();
let id: i32 = rng.gen();
let s = send_mpst_c_to_a(id, s);
let (accept, s) = recv_mpst_c_from_a(s)?;
assert_eq!(accept, id + 1);
let s = choose_left_mpst_session_c_to_all::<
BtoAVideo<i32>,
BtoAClose,
CtoAVideo<i32>,
CtoAClose,
BtoCClose,
AtoCClose,
StackAVideoDual,
StackAEndDual,
StackBVideoDual,
StackBEndDual,
StackCVideo,
StackCEnd,
>(s);
let s = send_mpst_c_to_a(accept, s);
let (result, s) = recv_mpst_c_from_a(s)?;
assert_eq!(result, accept + 3);
close_mpst(s)
}
fn client_close(s: EndpointCFull<i32>) -> Result<(), Box<dyn Error>> {
let mut rng = thread_rng();
let id: i32 = rng.gen();
let s = send_mpst_c_to_a(id, s);
let (accept, s) = recv_mpst_c_from_a(s)?;
assert_eq!(accept, id + 1);
let s = choose_right_mpst_session_c_to_all::<
BtoAVideo<i32>,
BtoAClose,
CtoAVideo<i32>,
CtoAClose,
BtoCClose,
AtoCClose,
StackAVideoDual,
StackAEndDual,
StackBVideoDual,
StackBEndDual,
StackCVideo,
StackCEnd,
>(s);
close_mpst(s)
}
/////////////////////////////////////////
pub fn run_usecase_right() {
assert!(|| -> Result<(), Box<dyn Error>> {
// Test end branch.
{
let (thread_a, thread_b, thread_c) = fork_mpst(authenticator, server, client_close);
assert!(thread_a.join().is_ok());
assert!(thread_b.join().is_ok());
assert!(thread_c.join().is_ok());
}
Ok(())
}()
.is_ok());
}
pub fn run_usecase_left() | {
assert!(|| -> Result<(), Box<dyn Error>> {
// Test video branch.
{
let (thread_a, thread_b, thread_c) = fork_mpst(authenticator, server, client_video);
assert!(thread_a.join().is_ok());
assert!(thread_b.join().is_ok());
assert!(thread_c.join().is_ok());
}
Ok(())
}()
.is_ok());
} |
|
index.js | import React from 'react';
import MaterialCommunityIcons from 'react-native-vector-icons/MaterialCommunityIcons';
import {Container, TabsContainer, TabItem, TabText} from './styles';
export default function Tabs({translateY}) {
return (
<Container
style={{
transform: [
{
translateY: translateY.interpolate({
inputRange: [0, 380],
outputRange: [0, 30],
extrapolate: 'clamp',
}),
},
],
opacity: translateY.interpolate({
inputRange: [0, 380],
outputRange: [1, 0.3],
extrapolate: 'clamp',
}),
}}>
<TabsContainer>
<TabItem>
<MaterialCommunityIcons name="account-plus" size={24} color="#fff" />
<TabText>Indicar Amigos</TabText>
</TabItem>
<TabItem>
<MaterialCommunityIcons
name="message-outline"
size={24}
color="#fff"
/>
<TabText>Cobrar</TabText>
</TabItem>
<TabItem> | <MaterialCommunityIcons name="arrow-up" size={24} color="#fff" />
<TabText>Transferir</TabText>
</TabItem>
<TabItem>
<MaterialCommunityIcons name="lock" size={24} color="#fff" />
<TabText>Bloquear Cartão</TabText>
</TabItem>
</TabsContainer>
</Container>
);
} | <MaterialCommunityIcons name="arrow-down" size={24} color="#fff" />
<TabText>Depositar</TabText>
</TabItem>
<TabItem> |
0fd3.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : [email protected]
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0F d3 /r
# psrlq mm1, mm2/m64
| Buffer = bytes.fromhex('660fd39011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'psrlq')
assert_equal(myDisasm.repr(), 'psrlq xmm2, xmmword ptr [rax+44332211h]')
# VEX.NDS.128.66.0F.WIG d3 /r
# vpsrlq xmm1, xmm2, xmm3/m128
Buffer = bytes.fromhex('c40101d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm9, xmm15, xmmword ptr [r14]')
# VEX.NDS.256.66.0F.WIG d3 /r
# vpsrlq ymm1, ymm2, ymm3/m256
Buffer = bytes.fromhex('c40105d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm9, ymm15, ymmword ptr [r14]')
# EVEX.NDS.128.66.0F.WIG d3 /r
# vpsrlq xmm1 {k1}{z}, xmm2, xmm3/m128
Buffer = bytes.fromhex('62010506d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm25, xmm31, xmmword ptr [r14]')
# EVEX.NDS.256.66.0F.WIG d3 /r
# vpsrlq ymm1 {k1}{z}, ymm2, ymm3/m256
Buffer = bytes.fromhex('62010520d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm25, ymm31, ymmword ptr [r14]')
# EVEX.NDS.512.66.0F.WIG d3 /r
# vpsrlq zmm1 {k1}{z}, zmm2, zmm3/m512
Buffer = bytes.fromhex('62010540d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq zmm25, zmm31, zmmword ptr [r14]') |
|
results.rs | use agent::api::AgentApi;
use crates::{Crate, GitHubRepo};
use errors::*;
use experiments::Experiment;
use log;
use results::{TestResult, WriteResults};
use std::io::Read;
use std::ops::DerefMut;
use std::sync::{Arc, Mutex};
use toolchain::Toolchain;
#[derive(Clone)]
pub struct ResultsUploader<'a> {
api: &'a AgentApi,
shas: Arc<Mutex<Vec<(GitHubRepo, String)>>>,
}
impl<'a> ResultsUploader<'a> {
pub fn new(api: &'a AgentApi) -> Self {
ResultsUploader {
api,
shas: Arc::new(Mutex::new(Vec::new())),
}
}
}
impl<'a> WriteResults for ResultsUploader<'a> {
fn get_result(
&self,
_ex: &Experiment,
_toolchain: &Toolchain,
_krate: &Crate,
) -> Result<Option<TestResult>> {
// TODO: not yet implemented
Ok(None)
}
fn | (&self, _ex: &Experiment, repo: &GitHubRepo, sha: &str) -> Result<()> {
self.shas
.lock()
.unwrap()
.push((repo.clone(), sha.to_string()));
Ok(())
}
fn record_result<F>(
&self,
_ex: &Experiment,
toolchain: &Toolchain,
krate: &Crate,
f: F,
) -> Result<TestResult>
where
F: FnOnce() -> Result<TestResult>,
{
let mut log_file = ::tempfile::NamedTempFile::new()?;
let result = log::redirect(log_file.path(), f)?;
let mut buffer = Vec::new();
log_file.read_to_end(&mut buffer)?;
let shas = ::std::mem::replace(self.shas.lock().unwrap().deref_mut(), Vec::new());
info!("sending results to the crater server...");
self.api
.record_progress(krate, toolchain, &buffer, result, &shas)?;
Ok(result)
}
}
| record_sha |
main.js | /*
* Beer O'Clock
* @jamesrwhite
* @benhodgson
*/
// Set up the store
var preferences = {};
// Is local storage supported?
preferences.supported = function() {
try {
return window.localStorage !== undefined;
} catch (e) {
return false;
}
};
preferences.getInstance = function() {
if (this.supported()) {
// If one doesn't already exist then create it
if (window.localStorage['beeroclock'] === undefined) {
window.localStorage['beeroclock'] = '{}';
}
return JSON.parse(window.localStorage['beeroclock']);
}
};
// Get a key
preferences.get = function(key) {
// Does the browser support local storage?
if (this.supported()) {
var preferences_object = preferences.getInstance();
return preferences_object[key] != undefined ? JSON.parse(preferences_object[key]) : undefined;
}
};
// Set a key
preferences.set = function(key, value) {
// Does the browser support local storage?
if (this.supported()) {
// Get the preferences object
var preferences_object = preferences.getInstance();
// Add the new key to it
preferences_object[key] = value;
// Save the whole object back
window.localStorage['beeroclock'] = JSON.stringify(preferences_object);
return value;
}
};
// Unset a key
preferences.unset = function(key) {
// Does the browser support local storage?
if (this.supported()) {
// Get the preferences object
var preferences_object = preferences.getInstance();
// Delete the key
delete preferences_object[key];
// Overwrite the whole preferences object with the new instance
window.localStorage['beeroclock'] = JSON.parse(preferences_object);
}
};
// Set up the user object
var user = {};
user.getDrink = function() {
var drink = preferences.get('drink');
return drink !== undefined ? drink : user.setDrink(1);
};
user.setDrink = function(drink) {
preferences.set('drink', drink);
return drink;
};
// Does the user want bubbles displayed?
user.getBubblez = function() {
var bubblez = preferences.get('bubblez');
return bubblez !== undefined ? bubblez : user.setBubblez(true);
};
user.setBubblez = function(bubble_status) {
preferences.set('bubblez', bubble_status);
return bubble_status;
};
user.getTime = function() {
var time = preferences.get('time');
return time !== undefined ? time : user.setTime(17); // Default to 5pm
};
user.setTime = function(time) {
preferences.set('time', time);
return time;
};
$(document).ready(function() {
const GLASS_FULL = 100;
var drink = {},
index,
full_query_string = window.location.href.split('?')[1],
$hours = $('.countdown-hours > .digit'),
$minutes = $('.countdown-minutes > .digit'),
$seconds = $('.countdown-seconds > .digit');
// Split it up into key value pairs
full_query_string = typeof full_query_string == 'undefined' ? [] : full_query_string.split('&');
// Loop through the query string and set up an object based on it
query_string = {};
for (index in full_query_string) {
// Split up the key value pair
item = full_query_string[index].split('=');
// Add it to our object
query_string[item[0]] = item[1];
}
// Allow overriding of beer o'clock for debug
if (typeof query_string.t != 'undefined') {
user.setTime(query_string.t);
}
// Define our different drinks
var drinks = {
// Pint of Lager
1: {
glass: "uk-pint",
liquid: "drink-lager",
bubbles: true,
bubbles_min: 20,
bubbles_max: 40,
bubbles_small: 2,
bubbles_big: 6
},
// Pint of Ale
2: {
glass: "uk-pint",
liquid: "drink-ale",
bubbles: false,
bubbles_min: "0",
bubbles_max: "0",
bubbles_small: "0",
bubbles_big: "0"
},
// Pint of Stout
3: {
glass: "uk-pint",
liquid: "drink-stout",
bubbles: false,
bubbles_min: 0,
bubbles_max: 0,
bubbles_small: 0,
bubbles_big: 0
},
// Bottle of Lager
4: {
glass: "beer-bottle",
liquid: "drink-lager-light",
bubbles: true,
bubbles_min: 10,
bubbles_max: 30,
bubbles_small: 2,
bubbles_big: 4
},
// Gin & Tonic
5: {
glass: "hiball",
liquid: "drink-gintonic",
bubbles: true,
bubbles_min: 10,
bubbles_max: 30,
bubbles_small: 2,
bubbles_big: 4
},
// Coke Mixer
6: {
glass: "hiball",
liquid: "drink-cola",
bubbles: true,
bubbles_min: 10,
bubbles_max: 30,
bubbles_small: 2,
bubbles_big: 4
},
// Bottle of Alcopop
7: {
glass: "beer-bottle alcopop-bottle",
liquid: "drink-blue-alcopop",
bubbles: true,
bubbles_min: 10,
bubbles_max: 30,
bubbles_small: 1,
bubbles_big: 2
}
};
// Is it beer o'clock? i.e the weekend or after beer o'clock on the current day
drink.canHaz = function() {
var now = new Date;
return now.getHours() >= user.getTime() || now.getDay() === 6 || now.getDay() === 0;
};
// Add leading zeros to numbers
drink.leadingZero = function(number) {
return number < 10 ? '0' + number : number;
};
// Format a Date object a bit more nicely
drink.formatTime = function(date) {
var hours = this.leadingZero(date.getHours()),
minutes = this.leadingZero(date.getMinutes()),
seconds = this.leadingZero(date.getSeconds());
return {
hours: hours.toString().split(''),
minutes: minutes.toString().split(''),
seconds: seconds.toString().split('')
};
};
// A Date object for when beer o'clock is
drink.getDate = function() {
var drink_date = new Date;
// If it's already past beer o'clock then we need to wait till tomorrow
if (drink_date.getHours() >= user.getTime()) {
drink_date.setDate(drink_date.getDate() + 1);
}
drink_date.setHours(user.getTime());
drink_date.setMinutes(0);
drink_date.setSeconds(0);
return drink_date;
};
// How long till you can haz beer!
drink.howLong = function() {
var beeroclock_date = this.getDate(),
now = this.canHaz() ? beeroclock_date : new Date,
hours_diff = Math.abs((beeroclock_date.getHours() - now.getHours()) - 1),
minutes_diff = Math.abs(60 - now.getMinutes()),
seconds_diff = Math.abs(60 - now.getSeconds());
var diff = new Date;
diff.setHours(hours_diff);
diff.setMinutes(minutes_diff);
diff.setSeconds(seconds_diff);
return diff;
};
// How full should the beer glass be?
drink.howLongPercentage = function() {
if (this.canHaz()) {
return GLASS_FULL;
}
var now = new Date / 1000,
beer = this.getDate() / 1000;
return parseInt(GLASS_FULL - (((beer - now) / 86400) * 100));
};
drink.pour = function() {
// Update the page content | $.each($hours, function(index, value) {
$hours[index].innerHTML = pretty_time.hours[index];
});
// Update the minutes display
$.each($minutes, function(index, value) {
$minutes[index].innerHTML = pretty_time.minutes[index];
});
// Update the seconds display
$.each($seconds, function(index, value) {
$seconds[index].innerHTML = pretty_time.seconds[index];
});
// Update the title
if (drink.canHaz()) {
document.title = "It's Beer o'clock now!";
$('.beer-oclock-notification').show();
$('.countdown-container').hide();
} else {
var time_string = '';
if (time.getHours() > 0) {
time_string += time.getHours() + 'h ';
}
if (time.getMinutes() > 0) {
time_string += time.getMinutes() + 'm ';
}
time_string += time.getSeconds() + 's ';
document.title = time_string;
// We need to keep doing this in case somebody changed the time
$('.beer-oclock-notification').hide();
$('.countdown-container').show();
}
// Fill up that beer glass!
$(".tasty-beverage").css({
height: drink.howLongPercentage() + '%'
});
}
drink.releaseTheBubblez = function() {
// Get variables
var $bubbles = $('.bubbles');
// Get bubble settings
var drink = drinks[user.getDrink()];
var min_bubble_count = drink.bubbles_min, // Minimum number of bubbles
max_bubble_count = drink.bubbles_max, // Maximum number of bubbles
min_bubble_size = drink.bubbles_small, // Smallest possible bubble diameter (px)
max_bubble_size = drink.bubbles_big; // Largest possible bubble diameter (px)
// If drink has bubbles, generate our bubbles from the above options
$bubbles.empty();
if (drink.bubbles) {
var bubbleCount = min_bubble_count + Math.floor(Math.random() * (max_bubble_count + 1));
for (var i = 0; i < bubbleCount; i++) {
$bubbles.append('<div class="bubble-container"><div class="bubble"></div></div>');
}
}
// Make each bubble random
$bubbles.find('> .bubble-container').each(function() {
// Randomise their size
var size_rand = min_bubble_size + Math.floor(Math.random() * (max_bubble_size + 1));
// Randomly position the bubbles
var pos_rand = Math.floor(Math.random() * 101);
// Randomise the time they start rising
var delay_rand = Math.floor(Math.random() * 16);
// Randomise their speed
var speed_rand = 3 + Math.floor(Math.random() * 9);
// Cache the this selector
var $this = $(this);
// Stick the above to the bubble container
$this.css({
'left' : pos_rand + '%',
'-webkit-animation-duration' : speed_rand + 's',
'-moz-animation-duration' : speed_rand + 's',
'-ms-animation-duration' : speed_rand + 's',
'animation-duration' : speed_rand + 's',
'-webkit-animation-delay' : delay_rand + 's',
'-moz-animation-delay' : delay_rand + 's',
'-ms-animation-delay' : delay_rand + 's',
'animation-delay' : delay_rand + 's'
});
// And set the bubble size
$this.children('.bubble').css({
'width' : size_rand + 'px',
'height' : size_rand + 'px'
});
});
};
drink.bubblesOn = function() {
// Create the bubbles
drink.releaseTheBubblez();
// Show the bubbles
$('.bubbles').fadeIn('1000', function(){
// Start the animation
$('.bubble-container').css({
'animation-play-state' : 'running',
'-webkit-animation-play-state' : 'running'
});
// Change the toggle text
$('.bubble-toggle').text('Bubbles Off');
});
};
drink.bubblesOff = function() {
// Hide the bubbles
$('.bubbles').fadeOut('2500', function() {
// Stop the animation
$('.bubble-container').css({
'animation-play-state' : 'paused',
'-webkit-animation-play-state' : 'paused'
});
// Remove the bubble divs
$('.bubbles').empty();
// Change the toggle text
$('.bubble-toggle').text('Bubbles On');
});
};
drink.toggleBubblez = function() {
if (!user.getBubblez()) {
user.setBubblez(true);
drink.bubblesOn();
} else {
user.setBubblez(false);
drink.bubblesOff();
}
};
drink.render = function() {
// Get the drink settings
var user_drink = drinks[user.getDrink()];
// Clear existing and set the drink type
$('#drink-type').removeClass().fadeOut('100', function() {
$(this).addClass(user_drink.glass + ' ' + user_drink.liquid).fadeIn('250');
});
// Get rid of existing bubbles
$('.bubbles').empty();
// If drink has bubbles, and user hasn't disabled them, run the bubble cannon
if (user_drink.bubbles && user.getBubblez()) {
this.releaseTheBubblez(user.getDrink());
}
// Toggle button
var $bubble_toggle = $('.bubble-toggle');
// Show/Hide toggle button based on drink settings
if (!user_drink.bubbles) {
$bubble_toggle.hide();
} else {
$bubble_toggle.show();
}
// Set toggle button text based on user prefs
if (user.getBubblez()) {
$bubble_toggle.text('Bubbles Off');
} else {
$bubble_toggle.text('Bubbles On');
}
};
// Handle the user changing their drink
$('select[name="drink_type"]').on('change', function() {
user.setDrink($(this).find(':selected').val());
drink.render();
});
// Handle the user changing their drink time
$('select[name="start_hour"]').on('change', function() {
user.setTime($(this).find(':selected').val());
});
// Toggle the Bubble Cannon
$('.bubble-toggle').on('click', drink.toggleBubblez);
// Settings Panel Toggle
$('.settings-toggle').on('click', function() {
// Make sure the correct drink is selected in the list
$('select[name="drink_type"]').find('> option[value="' + user.getDrink() + '"]')[0].selected = true;
// Make sure the correct drink time is selected in the list
$('select[name="start_hour"]').find('> option[value="' + user.getTime() + '"]')[0].selected = true;
$('.settings-panel, .site-footer').slideToggle();
});
// Was a drink set in the query string?
if (typeof query_string.drink != 'undefined') {
user.setDrink(query_string.drink);
}
drink.render();
// And go..
setTimeout(function() {
drink.pour();
setInterval(drink.pour, 1000);
}, 1000);
window.drink = drink;
});
// Custom Select Styling
/*!
* jquery.customSelect() - v0.3.6
* http://adam.co/lab/jquery/customselect/
* 2013-04-16
*
* Copyright 2013 Adam Coulombe
* @license http://www.opensource.org/licenses/mit-license.html MIT License
* @license http://www.gnu.org/licenses/gpl.html GPL2 License
*/
(function(a){a.fn.extend({customSelect:function(b){var c={customClass:null,mapClass:true,mapStyle:true},d=function(f,i){var e=f.find(":selected"),h=i.children(":first"),g=e.html()||" ";h.html(g);setTimeout(function(){i.removeClass("customSelectOpen");a(document).off("mouseup.customSelectOpen")},60)};if(typeof document.body.style.maxHeight==="undefined"){return this}b=a.extend(c,b);return this.each(function(){var e=a(this),g=a('<span class="customSelectInner" />'),f=a('<span class="customSelect" />');f.append(g);e.after(f);if(b.customClass){f.addClass(b.customClass)}if(b.mapClass){f.addClass(e.attr("class"))}if(b.mapStyle){f.attr("style",e.attr("style"))}e.addClass("hasCustomSelect").on("update",function(){d(e,f);var i=parseInt(e.outerWidth(),10)-(parseInt(f.outerWidth(),10)-parseInt(f.width(),10));f.css({display:"inline-block"});var h=f.outerHeight();if(e.attr("disabled")){f.addClass("customSelectDisabled")}else{f.removeClass("customSelectDisabled")}g.css({width:i,display:"inline-block"});e.css({"-webkit-appearance":"menulist-button",width:f.outerWidth(),position:"absolute",opacity:0,height:h,fontSize:f.css("font-size")})}).on("change",function(){f.addClass("customSelectChanged");d(e,f)}).on("keyup",function(){if(!f.hasClass("customSelectOpen")){e.blur();e.focus()}}).on("mouseup",function(h){f.removeClass("customSelectChanged");if(!f.hasClass("customSelectOpen")){f.addClass("customSelectOpen");h.stopPropagation();a(document).one("mouseup.customSelectOpen",function(i){if(i.target!=e.get(0)&&a.inArray(i.target,e.find("*").get())<0){e.blur()}else{d(e,f)}})}}).focus(function(){f.removeClass("customSelectChanged").addClass("customSelectFocus")}).blur(function(){f.removeClass("customSelectFocus customSelectOpen")}).hover(function(){f.addClass("customSelectHover")},function(){f.removeClass("customSelectHover")}).trigger("update")})}})})(jQuery); | var time = drink.howLong();
var pretty_time = drink.formatTime(time);
// Update the hours display |
commits.js | import {spawnSync} from 'child_process' | .stdout
.toString()
.split('\n')
.filter(str => str !== '') |
export default () =>
spawnSync('git', ['rev-list', 'head']) |
connect.go | // Copyright 2019 James Cote
// All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by gen-esign; DO NOT EDIT.
// Package connect implements the DocuSign SDK
// category Connect.
//
// The Connect category enables your application to be called via HTTPS when an event of interest occurs.
//
// Use the Connect service to "end the polling madness." With Connect, there is no need for your application to poll DocuSign every 15 minutes to learn the latest about your envelopes.
//
// Instead, you register your interest in one or more types of envelope or recipient events. Then, when an interesting event occurs, the DocuSign platform will contact your application with the event's details and data. You can register interest in envelopes sent by particular users in your account, or for envelopes sent by any user.
//
// ## Incoming Connect Calls
// To use the Connect service, your application needs to provide an https url that can be called from the public internet. If your application runs on a server behind your organization's firewall, then you will need to create a "pinhole" in the firewall to allow the incoming Connect calls from DocuSign to reach your application. Other techniques for receiving the incoming calls including proxy servers and DMZ networking can also be used.
//
// ## Per-envelope Connect Configuration
// Instead of registering a general Connect configuration and listener, an individual envelope can have its own Connect configuration. See the `eventNotification` field for envelopes.
//
// ## Categories
// Use the Connect category for:
//
// * Programmatically creating Connect configurations. Connect configurations can be created manually by using the DocuSign web service, or programmatically via the API. Configurations created via the API can be seen and updated from the web service.
// * Retrieving and managing the event log for your Connect configurations.
// * Requesting that an event be re-published to the listener.
//
// Service Api documentation may be found at:
// https://developers.docusign.com/esign-rest-api/v2/reference/Connect
// Usage example:
//
// import (
// "github.com/ICGGroup/esign"
// "github.com/ICGGroup/esign/v2/connect"
// "github.com/ICGGroup/esign/v2/model"
// )
// ...
// connectService := connect.New(esignCredential)
package connect // import "github.com/ICGGroup/esign/v2/connect"
import (
"context"
"fmt"
"io"
"net/url"
"strings"
"time"
"github.com/ICGGroup/esign"
"github.com/ICGGroup/esign/v2/model"
)
// Service implements DocuSign Connect Category API operations
type Service struct {
credential esign.Credential
}
// New initializes a connect service using cred to authorize ops.
func New(cred esign.Credential) *Service {
return &Service{credential: cred}
}
// ConfigurationsCreate creates a connect configuration for the specified account.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectconfigurations/create
//
// SDK Method Connect::createConfiguration
func (s *Service) ConfigurationsCreate(connectConfigurations *model.ConnectCustomConfiguration) *ConfigurationsCreateOp {
return &ConfigurationsCreateOp{
Credential: s.credential,
Method: "POST",
Path: "connect",
Payload: connectConfigurations,
QueryOpts: make(url.Values),
}
}
// ConfigurationsCreateOp implements DocuSign API SDK Connect::createConfiguration
type ConfigurationsCreateOp esign.Op
// Do executes the op. A nil context will return error.
func (op *ConfigurationsCreateOp) Do(ctx context.Context) (*model.ConnectCustomConfiguration, error) {
var res *model.ConnectCustomConfiguration
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// ConfigurationsDelete deletes the specified connect configuration.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectconfigurations/delete
//
// SDK Method Connect::deleteConfiguration
func (s *Service) ConfigurationsDelete(connectID string) *ConfigurationsDeleteOp {
return &ConfigurationsDeleteOp{
Credential: s.credential,
Method: "DELETE",
Path: strings.Join([]string{"connect", connectID}, "/"),
QueryOpts: make(url.Values),
}
}
// ConfigurationsDeleteOp implements DocuSign API SDK Connect::deleteConfiguration
type ConfigurationsDeleteOp esign.Op
// Do executes the op. A nil context will return error.
func (op *ConfigurationsDeleteOp) Do(ctx context.Context) error {
return ((*esign.Op)(op)).Do(ctx, nil)
}
// ConfigurationsGet get information on a Connect Configuration
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectconfigurations/get
//
// SDK Method Connect::getConfiguration
func (s *Service) ConfigurationsGet(connectID string) *ConfigurationsGetOp {
return &ConfigurationsGetOp{
Credential: s.credential,
Method: "GET",
Path: strings.Join([]string{"connect", connectID}, "/"),
QueryOpts: make(url.Values),
}
}
// ConfigurationsGetOp implements DocuSign API SDK Connect::getConfiguration
type ConfigurationsGetOp esign.Op
// Do executes the op. A nil context will return error.
func (op *ConfigurationsGetOp) Do(ctx context.Context) (*model.ConnectConfigResults, error) {
var res *model.ConnectConfigResults
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// ConfigurationsList get Connect Configuration Information
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectconfigurations/list
//
// SDK Method Connect::listConfigurations
func (s *Service) ConfigurationsList() *ConfigurationsListOp {
return &ConfigurationsListOp{
Credential: s.credential,
Method: "GET",
Path: "connect",
QueryOpts: make(url.Values),
}
}
// ConfigurationsListOp implements DocuSign API SDK Connect::listConfigurations
type ConfigurationsListOp esign.Op
// Do executes the op. A nil context will return error.
func (op *ConfigurationsListOp) Do(ctx context.Context) (*model.ConnectConfigResults, error) {
var res *model.ConnectConfigResults
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// ConfigurationsListUsers returns users from the configured Connect service.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectconfigurations/listusers
//
// SDK Method Connect::connectUsers
func (s *Service) ConfigurationsListUsers(connectID string) *ConfigurationsListUsersOp {
return &ConfigurationsListUsersOp{
Credential: s.credential,
Method: "GET",
Path: strings.Join([]string{"connect", connectID, "users"}, "/"),
QueryOpts: make(url.Values),
}
}
// ConfigurationsListUsersOp implements DocuSign API SDK Connect::connectUsers
type ConfigurationsListUsersOp esign.Op
// Do executes the op. A nil context will return error.
func (op *ConfigurationsListUsersOp) Do(ctx context.Context) (*model.IntegratedUserInfoList, error) {
var res *model.IntegratedUserInfoList
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// Count optional. Number of items to return.
func (op *ConfigurationsListUsersOp) Count(val int) *ConfigurationsListUsersOp {
if op != nil {
op.QueryOpts.Set("count", fmt.Sprintf("%d", val))
}
return op
}
// EmailSubstring filters the returned user records by the email address or a sub-string of email address.
func (op *ConfigurationsListUsersOp) EmailSubstring(val string) *ConfigurationsListUsersOp {
if op != nil |
return op
}
// ListIncludedUsers set the call query parameter list_included_users
func (op *ConfigurationsListUsersOp) ListIncludedUsers() *ConfigurationsListUsersOp {
if op != nil {
op.QueryOpts.Set("list_included_users", "true")
}
return op
}
// StartPosition is the position within the total result set from which to start returning values. The value **thumbnail** may be used to return the page image.
func (op *ConfigurationsListUsersOp) StartPosition(val int) *ConfigurationsListUsersOp {
if op != nil {
op.QueryOpts.Set("start_position", fmt.Sprintf("%d", val))
}
return op
}
// Status filters the results by user status.
// You can specify a comma-separated
// list of the following statuses:
//
// * ActivationRequired
// * ActivationSent
// * Active
// * Closed
// * Disabled
func (op *ConfigurationsListUsersOp) Status(val ...string) *ConfigurationsListUsersOp {
if op != nil {
op.QueryOpts.Set("status", strings.Join(val, ","))
}
return op
}
// UserNameSubstring filters the user records returned by the user name or a sub-string of user name.
func (op *ConfigurationsListUsersOp) UserNameSubstring(val string) *ConfigurationsListUsersOp {
if op != nil {
op.QueryOpts.Set("user_name_substring", val)
}
return op
}
// ConfigurationsUpdate updates a specified Connect configuration.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectconfigurations/update
//
// SDK Method Connect::updateConfiguration
func (s *Service) ConfigurationsUpdate(connectConfigurations *model.ConnectCustomConfiguration) *ConfigurationsUpdateOp {
return &ConfigurationsUpdateOp{
Credential: s.credential,
Method: "PUT",
Path: "connect",
Payload: connectConfigurations,
QueryOpts: make(url.Values),
}
}
// ConfigurationsUpdateOp implements DocuSign API SDK Connect::updateConfiguration
type ConfigurationsUpdateOp esign.Op
// Do executes the op. A nil context will return error.
func (op *ConfigurationsUpdateOp) Do(ctx context.Context) (*model.ConnectCustomConfiguration, error) {
var res *model.ConnectCustomConfiguration
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// EventsDelete deletes a specified Connect log entry.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/delete
//
// SDK Method Connect::deleteEventLog
func (s *Service) EventsDelete(logID string) *EventsDeleteOp {
return &EventsDeleteOp{
Credential: s.credential,
Method: "DELETE",
Path: strings.Join([]string{"connect", "logs", logID}, "/"),
QueryOpts: make(url.Values),
}
}
// EventsDeleteOp implements DocuSign API SDK Connect::deleteEventLog
type EventsDeleteOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsDeleteOp) Do(ctx context.Context) error {
return ((*esign.Op)(op)).Do(ctx, nil)
}
// EventsDeleteFailure deletes a Connect failure log entry.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/deletefailure
//
// SDK Method Connect::deleteEventFailureLog
func (s *Service) EventsDeleteFailure(failureID string) *EventsDeleteFailureOp {
return &EventsDeleteFailureOp{
Credential: s.credential,
Method: "DELETE",
Path: strings.Join([]string{"connect", "failures", failureID}, "/"),
QueryOpts: make(url.Values),
}
}
// EventsDeleteFailureOp implements DocuSign API SDK Connect::deleteEventFailureLog
type EventsDeleteFailureOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsDeleteFailureOp) Do(ctx context.Context) error {
return ((*esign.Op)(op)).Do(ctx, nil)
}
// EventsDeleteList gets a list of Connect log entries.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/deletelist
//
// SDK Method Connect::deleteEventLogs
func (s *Service) EventsDeleteList() *EventsDeleteListOp {
return &EventsDeleteListOp{
Credential: s.credential,
Method: "DELETE",
Path: "connect/logs",
QueryOpts: make(url.Values),
}
}
// EventsDeleteListOp implements DocuSign API SDK Connect::deleteEventLogs
type EventsDeleteListOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsDeleteListOp) Do(ctx context.Context) error {
return ((*esign.Op)(op)).Do(ctx, nil)
}
// EventsGet get the specified Connect log entry.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/get
//
// SDK Method Connect::getEventLog
func (s *Service) EventsGet(logID string) *EventsGetOp {
return &EventsGetOp{
Credential: s.credential,
Method: "GET",
Path: strings.Join([]string{"connect", "logs", logID}, "/"),
QueryOpts: make(url.Values),
}
}
// EventsGetOp implements DocuSign API SDK Connect::getEventLog
type EventsGetOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsGetOp) Do(ctx context.Context) (*model.ConnectLog, error) {
var res *model.ConnectLog
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// AdditionalInfo when true, the connectDebugLog information is included in the response.
func (op *EventsGetOp) AdditionalInfo() *EventsGetOp {
if op != nil {
op.QueryOpts.Set("additional_info", "true")
}
return op
}
// EventsList gets the Connect log.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/list
//
// SDK Method Connect::listEventLogs
func (s *Service) EventsList() *EventsListOp {
return &EventsListOp{
Credential: s.credential,
Method: "GET",
Path: "connect/logs",
QueryOpts: make(url.Values),
}
}
// EventsListOp implements DocuSign API SDK Connect::listEventLogs
type EventsListOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsListOp) Do(ctx context.Context) (*model.ConnectLogs, error) {
var res *model.ConnectLogs
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// FromDate start of the search date range. Only returns templates created on or after this date/time. If no value is specified, there is no limit on the earliest date created.
func (op *EventsListOp) FromDate(val time.Time) *EventsListOp {
if op != nil {
op.QueryOpts.Set("from_date", val.Format(time.RFC3339))
}
return op
}
// ToDate end of the search date range. Only returns templates created up to this date/time. If no value is provided, this defaults to the current date.
func (op *EventsListOp) ToDate(val time.Time) *EventsListOp {
if op != nil {
op.QueryOpts.Set("to_date", val.Format(time.RFC3339))
}
return op
}
// EventsListFailures gets the Connect failure log information.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/listfailures
//
// SDK Method Connect::listEventFailureLogs
func (s *Service) EventsListFailures() *EventsListFailuresOp {
return &EventsListFailuresOp{
Credential: s.credential,
Method: "GET",
Path: "connect/failures",
QueryOpts: make(url.Values),
}
}
// EventsListFailuresOp implements DocuSign API SDK Connect::listEventFailureLogs
type EventsListFailuresOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsListFailuresOp) Do(ctx context.Context) (*model.ConnectLogs, error) {
var res *model.ConnectLogs
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// FromDate start of the search date range. Only returns templates created on or after this date/time. If no value is specified, there is no limit on the earliest date created.
func (op *EventsListFailuresOp) FromDate(val time.Time) *EventsListFailuresOp {
if op != nil {
op.QueryOpts.Set("from_date", val.Format(time.RFC3339))
}
return op
}
// ToDate end of the search date range. Only returns templates created up to this date/time. If no value is provided, this defaults to the current date.
func (op *EventsListFailuresOp) ToDate(val time.Time) *EventsListFailuresOp {
if op != nil {
op.QueryOpts.Set("to_date", val.Format(time.RFC3339))
}
return op
}
// EventsRetryForEnvelope republishes Connect information for the specified envelope.
// If media is an io.ReadCloser, Do() will close media.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/retryforenvelope
//
// SDK Method Connect::retryEventForEnvelope
func (s *Service) EventsRetryForEnvelope(envelopeID string, media io.Reader, mimeType string) *EventsRetryForEnvelopeOp {
return &EventsRetryForEnvelopeOp{
Credential: s.credential,
Method: "PUT",
Path: strings.Join([]string{"connect", "envelopes", envelopeID, "retry_queue"}, "/"),
Payload: &esign.UploadFile{Reader: media, ContentType: mimeType},
QueryOpts: make(url.Values),
}
}
// EventsRetryForEnvelopeOp implements DocuSign API SDK Connect::retryEventForEnvelope
type EventsRetryForEnvelopeOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsRetryForEnvelopeOp) Do(ctx context.Context) (*model.ConnectFailureResults, error) {
var res *model.ConnectFailureResults
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
// EventsRetryForEnvelopes republishes Connect information for multiple envelopes.
//
// https://developers.docusign.com/esign-rest-api/v2/reference/connect/connectevents/retryforenvelopes
//
// SDK Method Connect::retryEventForEnvelopes
func (s *Service) EventsRetryForEnvelopes(connectFailureFilter *model.ConnectFailureFilter) *EventsRetryForEnvelopesOp {
return &EventsRetryForEnvelopesOp{
Credential: s.credential,
Method: "PUT",
Path: "connect/envelopes/retry_queue",
Payload: connectFailureFilter,
QueryOpts: make(url.Values),
}
}
// EventsRetryForEnvelopesOp implements DocuSign API SDK Connect::retryEventForEnvelopes
type EventsRetryForEnvelopesOp esign.Op
// Do executes the op. A nil context will return error.
func (op *EventsRetryForEnvelopesOp) Do(ctx context.Context) (*model.ConnectFailureResults, error) {
var res *model.ConnectFailureResults
return res, ((*esign.Op)(op)).Do(ctx, &res)
}
| {
op.QueryOpts.Set("email_substring", val)
} |
main.py | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""CLI for Tensorflow Datasets.
TFDS CLI to help creates and build datasets (e.g. `tfds new my_dataset`,
`tfds build`,...)
"""
import argparse
from typing import List
from absl import app
from absl.flags import argparse_flags
import tensorflow_datasets.public_api as tfds
def _parse_flags(argv: List[str]) -> argparse.Namespace:
"""Command lines flag parsing."""
parser = argparse_flags.ArgumentParser(
description='Tensorflow Datasets CLI tool',
)
parser.add_argument(
'--version',
action='version',
version='TensorFlow Datasets: ' + tfds.__version__
)
return parser.parse_args(argv[1:])
def main(args: argparse.Namespace) -> None:
|
def launch_cli() -> None:
"""Parse arguments and launch the CLI main function."""
app.run(main, flags_parser=_parse_flags)
if __name__ == '__main__':
# Entry-points in `setup.py` launch the `launch_cli()` function directly, so
# the code in `if __name__ == '__main__'` is never executed.
launch_cli()
| del args # Unused for now |
fact.go | // Code generated by informer-gen. DO NOT EDIT.
package v1
import (
time "time"
jenkins_io_v1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1"
versioned "github.com/jenkins-x/jx/pkg/client/clientset/versioned"
internalinterfaces "github.com/jenkins-x/jx/pkg/client/informers/externalversions/internalinterfaces"
v1 "github.com/jenkins-x/jx/pkg/client/listers/jenkins.io/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// FactInformer provides access to a shared informer and lister for
// Facts.
type FactInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.FactLister
}
type factInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewFactInformer constructs a new informer for Fact type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func | (client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredFactInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredFactInformer constructs a new informer for Fact type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredFactInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.JenkinsV1().Facts(namespace).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.JenkinsV1().Facts(namespace).Watch(options)
},
},
&jenkins_io_v1.Fact{},
resyncPeriod,
indexers,
)
}
func (f *factInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredFactInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *factInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&jenkins_io_v1.Fact{}, f.defaultInformer)
}
func (f *factInformer) Lister() v1.FactLister {
return v1.NewFactLister(f.Informer().GetIndexer())
}
| NewFactInformer |
index.js | import satchelUpgrade from './other/satchel-upgrade';
//import DiamondRing from './rings/diamond-ring';
//import AmethystRing from './rings/amethyst-ring';
//import OldRing from './rings/old-ring';
import HpPotion from './other/hp-potion';
import GreatHpPotion from './other/great-hp-potion';
import witchGloves from './armor/witch-gloves';
import witchHat from './armor/witch-hat';
import santaHat from './armor/santa-hat';
import witchBoots from './armor/witch-boots';
import witchCape from './armor/witch-cape';
import staff from './weapons/staff';
import snowballGun from './weapons/snowballgun';
import bell from './ingredients/bell';
// import bow from './ingredients/bow';
import candle from './ingredients/candle';
import candyCane from './ingredients/candycane';
import gingerBread from './ingredients/gingerbread';
import button from './ingredients/button';
const items = {
weapons: {
staff,
snowballGun
},
armor: { | witchHat,
santaHat
},
rings: {
// AmethystRing,
// DiamondRing,
// OldRing
},
other: {
candyCane,
HpPotion,
gingerBread,
GreatHpPotion,
satchelUpgrade,
bell,
button,
candle
}
};
export const randomItemsT1 = [
bell,
button,
candle,
candyCane,
gingerBread,
HpPotion,
witchBoots,
witchCape,
witchGloves,
witchHat,
candyCane,
candyCane,
candyCane
];
export const randomItemsT2 = [
...randomItemsT1,
snowballGun,
santaHat,
GreatHpPotion
// DiamondRing
];
export default items; | witchCape,
witchBoots,
witchGloves, |
pasta_fp.rs | use crate::arkworks::bigint_256::{self, WasmBigInteger256};
use ark_ff::{
fields::{Field, FpParameters, PrimeField, SquareRootField},
FftField, One, UniformRand, Zero,
};
use ark_ff::{FromBytes, ToBytes};
use ark_poly::{EvaluationDomain, Radix2EvaluationDomain as Domain};
use mina_curves::pasta::fp::{Fp, FpParameters as Fp_params};
use num_bigint::BigUint;
use rand::rngs::StdRng;
use std::cmp::Ordering::{Equal, Greater, Less};
use wasm_bindgen::convert::{FromWasmAbi, IntoWasmAbi, OptionFromWasmAbi, OptionIntoWasmAbi};
use wasm_bindgen::prelude::*;
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct WasmPastaFp(pub Fp);
impl crate::wasm_flat_vector::FlatVectorElem for WasmPastaFp {
const FLATTENED_SIZE: usize = std::mem::size_of::<Fp>();
fn flatten(self) -> Vec<u8> {
let mut bytes: Vec<u8> = Vec::with_capacity(Self::FLATTENED_SIZE);
self.0.write(&mut bytes).unwrap();
bytes
}
fn unflatten(flat: Vec<u8>) -> Self {
WasmPastaFp(FromBytes::read(flat.as_slice()).unwrap())
}
}
impl From<Fp> for WasmPastaFp {
fn from(x: Fp) -> Self {
WasmPastaFp(x)
}
}
impl From<WasmPastaFp> for Fp {
fn from(x: WasmPastaFp) -> Self {
x.0
}
}
impl<'a> From<&'a WasmPastaFp> for &'a Fp {
fn from(x: &'a WasmPastaFp) -> Self {
&x.0
}
}
impl wasm_bindgen::describe::WasmDescribe for WasmPastaFp {
fn describe() {
<Vec<u8> as wasm_bindgen::describe::WasmDescribe>::describe()
}
}
impl FromWasmAbi for WasmPastaFp {
type Abi = <Vec<u8> as FromWasmAbi>::Abi;
unsafe fn from_abi(js: Self::Abi) -> Self {
let bytes: Vec<u8> = FromWasmAbi::from_abi(js);
WasmPastaFp(FromBytes::read(bytes.as_slice()).unwrap())
}
}
impl IntoWasmAbi for WasmPastaFp {
type Abi = <Vec<u8> as FromWasmAbi>::Abi;
fn into_abi(self) -> Self::Abi {
let mut bytes: Vec<u8> = vec![];
self.0.write(&mut bytes).unwrap();
bytes.into_abi()
}
}
impl OptionIntoWasmAbi for WasmPastaFp {
fn none() -> Self::Abi {
<Vec<u8> as OptionIntoWasmAbi>::none()
}
}
impl OptionFromWasmAbi for WasmPastaFp {
fn is_none(abi: &Self::Abi) -> bool {
<Vec<u8> as OptionFromWasmAbi>::is_none(abi)
}
}
#[wasm_bindgen]
pub fn caml_pasta_fp_size_in_bits() -> isize {
Fp_params::MODULUS_BITS as isize
}
#[wasm_bindgen]
pub fn caml_pasta_fp_size() -> WasmBigInteger256 {
WasmBigInteger256(Fp_params::MODULUS)
}
#[wasm_bindgen]
pub fn caml_pasta_fp_add(x: WasmPastaFp, y: WasmPastaFp) -> WasmPastaFp {
WasmPastaFp(x.0 + y.0)
}
#[wasm_bindgen]
pub fn caml_pasta_fp_sub(x: WasmPastaFp, y: WasmPastaFp) -> WasmPastaFp {
WasmPastaFp(x.0 - y.0)
}
#[wasm_bindgen]
pub fn caml_pasta_fp_negate(x: WasmPastaFp) -> WasmPastaFp {
WasmPastaFp(-x.0)
}
#[wasm_bindgen]
pub fn caml_pasta_fp_mul(x: WasmPastaFp, y: WasmPastaFp) -> WasmPastaFp {
WasmPastaFp(x.0 * y.0)
}
#[wasm_bindgen]
pub fn caml_pasta_fp_div(x: WasmPastaFp, y: WasmPastaFp) -> WasmPastaFp {
WasmPastaFp(x.0 / y.0)
}
#[wasm_bindgen]
pub fn caml_pasta_fp_inv(x: WasmPastaFp) -> Option<WasmPastaFp> {
x.0.inverse().map(|x| WasmPastaFp(x))
}
#[wasm_bindgen]
pub fn caml_pasta_fp_square(x: WasmPastaFp) -> WasmPastaFp {
WasmPastaFp(x.0.square())
}
#[wasm_bindgen]
pub fn caml_pasta_fp_is_square(x: WasmPastaFp) -> bool {
let s = x.0.pow(Fp_params::MODULUS_MINUS_ONE_DIV_TWO);
s.is_zero() || s.is_one()
}
#[wasm_bindgen]
pub fn caml_pasta_fp_sqrt(x: WasmPastaFp) -> Option<WasmPastaFp> {
x.0.sqrt().map(|x| WasmPastaFp(x))
}
#[wasm_bindgen]
pub fn caml_pasta_fp_of_int(i: i32) -> WasmPastaFp {
WasmPastaFp(Fp::from(i as u64))
}
#[wasm_bindgen]
pub fn caml_pasta_fp_to_string(x: WasmPastaFp) -> String {
bigint_256::to_biguint(&x.0.into_repr()).to_string()
}
#[wasm_bindgen]
pub fn caml_pasta_fp_of_string(s: String) -> Result<WasmPastaFp, JsValue> {
let biguint = BigUint::parse_bytes(s.as_bytes(), 10)
.ok_or(JsValue::from_str("caml_pasta_fp_of_string"))?;
match Fp::from_repr(bigint_256::of_biguint(&biguint)) {
Some(x) => Ok(x.into()),
None => Err(JsValue::from_str("caml_pasta_fp_of_string")),
}
}
#[wasm_bindgen]
pub fn caml_pasta_fp_print(x: WasmPastaFp) {
println!("{}", bigint_256::to_biguint(&(x.0.into_repr())));
}
#[wasm_bindgen]
pub fn caml_pasta_fp_compare(x: WasmPastaFp, y: WasmPastaFp) -> i32 {
match x.0.cmp(&y.0) {
Less => -1,
Equal => 0,
Greater => 1,
}
}
#[wasm_bindgen]
pub fn caml_pasta_fp_equal(x: WasmPastaFp, y: WasmPastaFp) -> bool {
x.0 == y.0
}
#[wasm_bindgen]
pub fn caml_pasta_fp_random() -> WasmPastaFp {
WasmPastaFp(UniformRand::rand(&mut rand::thread_rng()))
}
#[wasm_bindgen]
pub fn caml_pasta_fp_rng(i: i32) -> WasmPastaFp {
// We only care about entropy here, so we force a conversion i32 -> u32.
let i: u64 = (i as u32).into();
let mut rng: StdRng = rand::SeedableRng::seed_from_u64(i);
WasmPastaFp(UniformRand::rand(&mut rng))
}
#[wasm_bindgen]
pub fn caml_pasta_fp_to_bigint(x: WasmPastaFp) -> WasmBigInteger256 {
WasmBigInteger256(x.0.into_repr())
}
#[wasm_bindgen]
pub fn caml_pasta_fp_of_bigint(x: WasmBigInteger256) -> Result<WasmPastaFp, JsValue> {
match Fp::from_repr(x.0) {
Some(x) => Ok(x.into()),
None => Err(JsValue::from_str("caml_pasta_fp_of_bigint")),
}
}
#[wasm_bindgen]
pub fn caml_pasta_fp_two_adic_root_of_unity() -> WasmPastaFp {
WasmPastaFp(FftField::two_adic_root_of_unity())
}
#[wasm_bindgen]
pub fn caml_pasta_fp_domain_generator(log2_size: i32) -> WasmPastaFp {
match Domain::new(1 << log2_size) {
Some(x) => WasmPastaFp(x.group_gen),
None => panic!("caml_pasta_fp_domain_generator"),
}
}
#[wasm_bindgen]
pub fn caml_pasta_fp_to_bytes(x: WasmPastaFp) -> Vec<u8> {
let len = std::mem::size_of::<Fp>();
let mut str: Vec<u8> = Vec::with_capacity(len);
str.resize(len, 0);
let str_as_fp: *mut Fp = str.as_mut_ptr().cast::<Fp>();
unsafe {
*str_as_fp = x.0;
}
str
}
#[wasm_bindgen]
pub fn caml_pasta_fp_of_bytes(x: &[u8]) -> WasmPastaFp {
let len = std::mem::size_of::<Fp>();
if x.len() != len {
panic!("caml_pasta_fp_of_bytes");
};
let x = unsafe { *(x.as_ptr() as *const Fp) };
WasmPastaFp(x)
}
#[wasm_bindgen]
pub fn caml_pasta_fp_deep_copy(x: WasmPastaFp) -> WasmPastaFp | {
x
} |
|
DescribeConformancePacksCommand.ts | import { ConfigServiceClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ConfigServiceClient";
import { DescribeConformancePacksRequest, DescribeConformancePacksResponse } from "../models/models_0";
import {
deserializeAws_json1_1DescribeConformancePacksCommand,
serializeAws_json1_1DescribeConformancePacksCommand,
} from "../protocols/Aws_json1_1";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
export type DescribeConformancePacksCommandInput = DescribeConformancePacksRequest;
export type DescribeConformancePacksCommandOutput = DescribeConformancePacksResponse & __MetadataBearer;
/**
* <p>Returns a list of one or more conformance packs.</p> | DescribeConformancePacksCommandOutput,
ConfigServiceClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: DescribeConformancePacksCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: ConfigServiceClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<DescribeConformancePacksCommandInput, DescribeConformancePacksCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "ConfigServiceClient";
const commandName = "DescribeConformancePacksCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: DescribeConformancePacksRequest.filterSensitiveLog,
outputFilterSensitiveLog: DescribeConformancePacksResponse.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: DescribeConformancePacksCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_json1_1DescribeConformancePacksCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<DescribeConformancePacksCommandOutput> {
return deserializeAws_json1_1DescribeConformancePacksCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
} | */
export class DescribeConformancePacksCommand extends $Command<
DescribeConformancePacksCommandInput, |
TypeProfessionalController.js | /* eslint-disable no-unused-vars */
require('dotenv').config();
const bcrypt = require('bcrypt');
const TypeProfessionalRepository = require('../repositories/TypeProfessionalRepository');
class TypeProfessionalController {
async index(req, res) {
const typeprofessional = await TypeProfessionalRepository.findAll();
res.json(typeprofessional);
}
async store(req, res) {
const { | situacao,
} = req.body;
if (descricao === undefined || situacao === undefined) return res.status(400).json({ error: 'Por favor, verifique todos os campos' });
const type = await TypeProfessionalRepository.create({
descricao,
situacao,
});
res.json(type);
}
async delete(req, res) {
const {
id,
} = req.body;
const type = await TypeProfessionalRepository.delete(
id,
);
res.json(type);
}
async update(req, res) {
const {
id,
descricao,
situacao,
} = req.body;
const type = await TypeProfessionalRepository.update(id, {
descricao,
situacao,
});
res.json(type);
}
}
module.exports = new TypeProfessionalController(); | descricao, |
ipam_rirs_bulk_update_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
// Copyright (c) 2020 Samuel Mutel <[email protected]>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
package ipam
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/strfmt"
"github.com/smutel/go-netbox/netbox/models"
)
// NewIpamRirsBulkUpdateParams creates a new IpamRirsBulkUpdateParams object,
// with the default timeout for this client.
//
// Default values are not hydrated, since defaults are normally applied by the API server side.
//
// To enforce default values in parameter, use SetDefaults or WithDefaults.
func NewIpamRirsBulkUpdateParams() *IpamRirsBulkUpdateParams {
return &IpamRirsBulkUpdateParams{
timeout: cr.DefaultTimeout,
}
}
// NewIpamRirsBulkUpdateParamsWithTimeout creates a new IpamRirsBulkUpdateParams object
// with the ability to set a timeout on a request.
func NewIpamRirsBulkUpdateParamsWithTimeout(timeout time.Duration) *IpamRirsBulkUpdateParams |
// NewIpamRirsBulkUpdateParamsWithContext creates a new IpamRirsBulkUpdateParams object
// with the ability to set a context for a request.
func NewIpamRirsBulkUpdateParamsWithContext(ctx context.Context) *IpamRirsBulkUpdateParams {
return &IpamRirsBulkUpdateParams{
Context: ctx,
}
}
// NewIpamRirsBulkUpdateParamsWithHTTPClient creates a new IpamRirsBulkUpdateParams object
// with the ability to set a custom HTTPClient for a request.
func NewIpamRirsBulkUpdateParamsWithHTTPClient(client *http.Client) *IpamRirsBulkUpdateParams {
return &IpamRirsBulkUpdateParams{
HTTPClient: client,
}
}
/* IpamRirsBulkUpdateParams contains all the parameters to send to the API endpoint
for the ipam rirs bulk update operation.
Typically these are written to a http.Request.
*/
type IpamRirsBulkUpdateParams struct {
// Data.
Data *models.RIR
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithDefaults hydrates default values in the ipam rirs bulk update params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *IpamRirsBulkUpdateParams) WithDefaults() *IpamRirsBulkUpdateParams {
o.SetDefaults()
return o
}
// SetDefaults hydrates default values in the ipam rirs bulk update params (not the query body).
//
// All values with no default are reset to their zero value.
func (o *IpamRirsBulkUpdateParams) SetDefaults() {
// no default values defined for this parameter
}
// WithTimeout adds the timeout to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) WithTimeout(timeout time.Duration) *IpamRirsBulkUpdateParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) WithContext(ctx context.Context) *IpamRirsBulkUpdateParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) WithHTTPClient(client *http.Client) *IpamRirsBulkUpdateParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithData adds the data to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) WithData(data *models.RIR) *IpamRirsBulkUpdateParams {
o.SetData(data)
return o
}
// SetData adds the data to the ipam rirs bulk update params
func (o *IpamRirsBulkUpdateParams) SetData(data *models.RIR) {
o.Data = data
}
// WriteToRequest writes these params to a swagger request
func (o *IpamRirsBulkUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Data != nil {
if err := r.SetBodyParam(o.Data); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
| {
return &IpamRirsBulkUpdateParams{
timeout: timeout,
}
} |
post.go | package fetchqueue
import (
"encoding/json"
"os"
"errors"
"../../lib/inforer"
"../../lib/schedler"
"github.com/sirupsen/logrus"
tarantool "github.com/tarantool/go-tarantool"
)
func init() {
logrus.SetFormatter(&logrus.JSONFormatter{FieldMap: logrus.FieldMap{logrus.FieldKeyTime: "date", logrus.FieldKeyLevel: "type"}})
logrus.SetOutput(os.Stdout)
}
type TeacherSchedule struct {
Info inforer.TeacherInfo `json:"info"`
Schedule schedler.TeacherSchedule `json:"schedule"`
}
type GroupSchedule struct {
Info inforer.GroupInfo `json:"info"`
Schedule schedler.GroupSchedule `json:"schedule"`
}
type FetchTask struct {
Data struct {
Try int `json:"try"`
UserID string `json:"user_id"`
} `json:"data"`
ID int `json:"id"`
Destination string `json:"destination"`
Action string `json:"action"`
Type string `json:"type"`
Date string `json:"date"`
}
func (task *FetchTask) Unmarshal(data []byte, point interface{}) (err error) {
return json.Unmarshal(data, point)
}
func (task *FetchTask) Marshal() (string, error) {
result, err := json.Marshal(task)
if err != nil {
logrus.WithFields(logrus.Fields{"module": "Fetch"}).Error("Ошибка при формировании JSON ", err)
return "", err
}
return string(result), nil
}
func (schedule *TeacherSchedule) Post(connection *tarantool.Connection, task FetchTask) error {
jsonSchedule, err := json.Marshal(schedule)
if err != nil {
logrus.WithFields(logrus.Fields{"module": "Fetch", "section": "Post"}).Error("Невозможно сформировать JSON ", err)
return err
}
response, err := connection.Call("formatSchedule", []interface{}{task.Data.UserID, task.Date, task.Action, task.Destination, task.Type, jsonSchedule})
if err != nil {
logrus.WithFields(logrus.Fields{"module": "Fetch", "section": "Post"}).Error("Невозможно отправить расписание по преподавателю ", err)
return err
}
if response.Data[0].([]interface{})[0].(bool) == false {
logrus.WithFields(logrus.Fields{"module": "Fetch", "section": "Post"}).Error("Невозможно отправить расписание по преподавателю ", err)
return errors.New("Невозможно добавить расписание")
}
return nil
}
func (schedule *GroupSchedule) Post(connection *tarantool.Connection, task FetchTask) error {
jsonSchedule, err := json.Marshal(schedule)
if err != nil {
logrus.WithFields(logrus.Fields{"module": "Fetch", "section": "Post"}).Error("Невозможно сформировать JSON ", err) | response, err := connection.Call("formatSchedule", []interface{}{task.Data.UserID, task.Date, task.Action, task.Destination, task.Type, jsonSchedule})
if err != nil {
logrus.WithFields(logrus.Fields{"module": "Fetch", "section": "Post"}).Error("Невозможно отправить расписание по группе ", err)
return err
}
if response.Data[0].([]interface{})[0].(bool) == false {
logrus.WithFields(logrus.Fields{"module": "Fetch", "section": "Post"}).Error("Невозможно отправить расписание по группе ", err)
return errors.New("Невозможно добавить расписание")
}
return nil
} | return err
} |
task5.py | '''
# TASK 5 - Write a function that takes a string and a shift integer and returns the string with each letter shifted
- you can iterate over the letters in a string
- for letter in str:
'''
def get_shifted_string(string, shift):
| ''' return the input string with each letter shifted shift steps '''
raise NotImplementedError |
|
ajaxappender.js | //-- copyright
// OpenProject is a project management system.
// Copyright (C) 2012-2013 the OpenProject Foundation (OPF)
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License version 3.
//
// OpenProject is a fork of ChiliProject, which is a fork of Redmine. The copyright follows:
// Copyright (C) 2006-2013 Jean-Philippe Lang
// Copyright (C) 2010-2013 the ChiliProject Team
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation; either version 2
// of the License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
//
// See doc/COPYRIGHT.rdoc for more details.
//++
(function ($) {
var AjaxAppender = function (options) {
var append_href,
close,
target_container,
is_inplace,
is_loaded,
state_loading,
state_loaded,
replace_with_close,
replace_with_open,
slideIn,
init;
options = $.extend(true,
{},
{ loading_class: 'loading',
loading: null,
loaded: null,
load_target: null,
trigger: '.ajax_append',
container_class: 'ajax_appended_information',
indicator_class: 'ajax_indicator',
hide_text: 'Hide',
loading_text: null
},
options);
close = function () {
var close_link = $(this),
information_window = close_link.siblings('.' + options.container_class);
replace_with_open(close_link);
information_window.slideUp();
};
append_href = function (link) {
var target = target_container(link),
loading_div,
url = link.attr('href');
if (is_loaded(link)) {
state_loaded(target, link);
}
else {
state_loading(target);
$.ajax({ url: url,
headers: { Accept: 'text/javascript' },
complete: function (jqXHR) {
target.html(jqXHR.responseText);
state_loaded(target, link);
}
});
}
};
is_inplace = function() {
return options.load_target === null;
};
is_loaded = function(link) {
var container = target_container(link);
return container.children().not('.' + options.indicator_class).size() > 0;
};
target_container = function(link) {
var target,
container_string = '<div class="' + options.container_class + '"></div>',
container;
if (is_inplace()) {
target = link.parent();
}
else {
target = $(options.load_target);
}
container = target.find('.' + options.container_class);
if (container.size() === 0) {
container = $(container_string);
target.append(container);
}
return container;
};
state_loading = function (target) {
var loading = $('<span class="' + options.indicator_class + '"></span>'); | }
target.addClass(options.loading_class);
target.append(loading);
if (options.loading !== null) {
options.loading.call(this, target);
}
};
state_loaded = function (target, link) {
target.removeClass(options.loading_class);
if (is_inplace()) {
replace_with_close(link, true);
}
if (options.loaded !== null) {
target.slideDown(function() {
options.loaded.call(this, target, link);
});
}
else{
target.slideDown();
}
};
replace_with_close = function (to_replace, hide) {
var close_link = $('<a href="javascript:void(0)">' + options.hide_text + '</a>');
to_replace.after(close_link);
if (hide) {
to_replace.hide();
}
else {
to_replace.remove();
}
close_link.click(close);
};
replace_with_open = function(to_replace) {
var load_link = to_replace.siblings(options.trigger);
to_replace.remove();
/* this link is never removed, only hidden */
load_link.show();
};
$(options.trigger).click(function(link) {
append_href($(this));
return false;
});
return this;
};
if ($.ajaxAppend) {
return;
}
$.ajaxAppend = function (options) {
AjaxAppender(options);
return this;
};
}(jQuery)); |
if (options.loading_text !== null) {
loading.html(options.loading_text); |
reverse.pipe.spec.ts | import {ReversePipe} from './reverse.pipe';
describe('ReversePipe', () => {
it('should reverse string', () => {
const reversePipe = new ReversePipe();
expect(reversePipe.transform('hello')).toEqual('olleh');
}); | }); |
|
s0053_maximum_subarray.rs | /**
* [53] Maximum Subarray
*
* Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
*
* Example:
*
*
* Input: [-2,1,-3,4,-1,2,1,-5,4],
* Output: 6
* Explanation: [4,-1,2,1] has the largest sum = 6.
*
*
* Follow up:
*
* If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
*
*/
pub struct Solution {}
// problem: https://leetcode.com/problems/maximum-subarray/
// discuss: https://leetcode.com/problems/maximum-subarray/discuss/?currentPage=1&orderBy=most_votes&query=
// submission codes start here
impl Solution {
pub fn | (nums: Vec<i32>) -> i32 {
let mut j = 0_usize;
let mut max = i32::min_value();
let mut curr = 0;
for j in 0..nums.len() {
curr += nums[j];
max = i32::max(max, curr);
if curr <= 0 {
curr = 0;
}
}
max
}
}
// submission codes end
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_53() {
assert_eq!(
Solution::max_sub_array(vec![-2, 1, -3, 4, -1, 2, 1, -5, 4]),
6
);
assert_eq!(Solution::max_sub_array(vec![-8]), -8);
assert_eq!(Solution::max_sub_array(vec![-8, -2]), -2);
}
}
| max_sub_array |
info.rs | use crate::{api_client::{Client,
PortableText,
TabularText},
common::ui::{Status,
UIWriter,
UI},
error::{Error,
Result},
PRODUCT,
VERSION};
pub async fn start(ui: &mut UI,
bldr_url: &str,
token: &str,
origin: &str,
to_json: bool)
-> Result<()> | {
let api_client = Client::new(bldr_url, PRODUCT, VERSION, None).map_err(Error::APIClient)?;
match api_client.origin_info(token, origin).await {
Ok(resp) => {
if to_json {
match resp.as_json() {
Ok(body) => {
println!("{}", body);
Ok(())
}
Err(e) => {
ui.fatal(format!("Failed to deserialize into json! {:?}.", e))?;
Err(Error::from(e))
}
}
} else {
ui.status(Status::Discovering, "origin metadata".to_string())?;
println!("Origin [{}]:", origin);
match resp.as_tabbed() {
Ok(body) => {
println!("{}", body);
Ok(())
}
Err(e) => {
ui.fatal(format!("Failed to format origin metadata! {:?}.", e))?;
Err(Error::from(e))
}
}
}
}
Err(e) => {
ui.fatal(format!("Failed to retrieve origin metadata! {:?}.", e))?;
Err(Error::from(e))
}
}
} |
|
google.cloud.recaptchaenterprise.v1beta1.rs | /// The create assessment request message.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateAssessmentRequest {
/// Required. The name of the project in which the assessment will be created,
/// in the format "projects/{project_number}".
#[prost(string, tag = "1")]
pub parent: ::prost::alloc::string::String,
/// Required. The assessment details.
#[prost(message, optional, tag = "2")]
pub assessment: ::core::option::Option<Assessment>,
}
/// The request message to annotate an Assessment.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnnotateAssessmentRequest {
/// Required. The resource name of the Assessment, in the format
/// "projects/{project_number}/assessments/{assessment_id}".
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Required. The annotation that will be assigned to the Event.
#[prost(enumeration = "annotate_assessment_request::Annotation", tag = "2")]
pub annotation: i32,
}
/// Nested message and enum types in `AnnotateAssessmentRequest`.
pub mod annotate_assessment_request {
/// Enum that reprensents the types of annotations.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Annotation {
/// Default unspecified type.
Unspecified = 0,
/// Provides information that the event turned out to be legitimate.
Legitimate = 1,
/// Provides information that the event turned out to be fraudulent.
Fraudulent = 2,
}
}
/// Empty response for AnnotateAssessment.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnnotateAssessmentResponse {}
/// A recaptcha assessment resource.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Assessment {
/// Output only. The resource name for the Assessment in the format
/// "projects/{project_number}/assessments/{assessment_id}".
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// The event being assessed.
#[prost(message, optional, tag = "2")]
pub event: ::core::option::Option<Event>,
/// Output only. Legitimate event score from 0.0 to 1.0.
/// (1.0 means very likely legitimate traffic while 0.0 means very likely
/// non-legitimate traffic).
#[prost(float, tag = "3")]
pub score: f32,
/// Output only. Properties of the provided event token.
#[prost(message, optional, tag = "4")]
pub token_properties: ::core::option::Option<TokenProperties>,
/// Output only. Reasons contributing to the risk analysis verdict.
#[prost(
enumeration = "assessment::ClassificationReason",
repeated,
packed = "false",
tag = "5"
)]
pub reasons: ::prost::alloc::vec::Vec<i32>,
}
/// Nested message and enum types in `Assessment`.
pub mod assessment {
/// LINT.IfChange(classification_reason)
/// Reasons contributing to the risk analysis verdict.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum ClassificationReason {
/// Default unspecified type.
Unspecified = 0,
/// Interactions matched the behavior of an automated agent.
Automation = 1,
/// The event originated from an illegitimate environment.
UnexpectedEnvironment = 2,
/// Traffic volume from the event source is higher than normal.
TooMuchTraffic = 3,
/// Interactions with the site were significantly different than expected
/// patterns.
UnexpectedUsagePatterns = 4,
/// Too little traffic has been received from this site thus far to generate
/// quality risk analysis.
LowConfidenceScore = 5,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Event {
/// Optional. The user response token provided by the reCAPTCHA client-side integration
/// on your site.
#[prost(string, tag = "1")]
pub token: ::prost::alloc::string::String,
/// Optional. The site key that was used to invoke reCAPTCHA on your site and generate
/// the token.
#[prost(string, tag = "2")]
pub site_key: ::prost::alloc::string::String,
/// Optional. The user agent present in the request from the user's device related to
/// this event.
#[prost(string, tag = "3")]
pub user_agent: ::prost::alloc::string::String,
/// Optional. The IP address in the request from the user's device related to this event.
#[prost(string, tag = "4")]
pub user_ip_address: ::prost::alloc::string::String,
/// Optional. The expected action for this type of event. This should be the same action
/// provided at token generation time on client-side platforms already
/// integrated with recaptcha enterprise.
#[prost(string, tag = "5")]
pub expected_action: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TokenProperties {
/// Whether the provided user response token is valid.
#[prost(bool, tag = "1")]
pub valid: bool,
/// Reason associated with the response when valid = false.
#[prost(enumeration = "token_properties::InvalidReason", tag = "2")]
pub invalid_reason: i32,
/// The timestamp corresponding to the generation of the token.
#[prost(message, optional, tag = "3")]
pub create_time: ::core::option::Option<::prost_types::Timestamp>,
/// The hostname of the page on which the token was generated.
#[prost(string, tag = "4")]
pub hostname: ::prost::alloc::string::String,
/// Action name provided at token generation.
#[prost(string, tag = "5")]
pub action: ::prost::alloc::string::String,
}
/// Nested message and enum types in `TokenProperties`.
pub mod token_properties {
/// LINT.IfChange
/// Enum that represents the types of invalid token reasons.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum InvalidReason {
/// Default unspecified type.
Unspecified = 0,
/// If the failure reason was not accounted for.
UnknownInvalidReason = 1,
/// The provided user verification token was malformed.
Malformed = 2,
/// The user verification token had expired.
Expired = 3,
/// The user verification had already been seen.
Dupe = 4,
/// The user verification token did not match the provided site key.
/// This may be a configuration error (e.g. development keys used in
/// production) or end users trying to use verification tokens from other
/// sites.
SiteMismatch = 5,
/// The user verification token was not present. It is a required input.
Missing = 6,
}
}
/// The create key request message.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateKeyRequest {
/// Required. The name of the project in which the key will be created, in the
/// format "projects/{project_number}".
#[prost(string, tag = "1")]
pub parent: ::prost::alloc::string::String,
/// Required. Information to create a reCAPTCHA Enterprise key.
#[prost(message, optional, tag = "2")]
pub key: ::core::option::Option<Key>,
}
/// The list keys request message.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListKeysRequest {
/// Required. The name of the project that contains the keys that will be
/// listed, in the format "projects/{project_number}".
#[prost(string, tag = "1")]
pub parent: ::prost::alloc::string::String,
/// Optional. The maximum number of keys to return. Default is 10. Max limit is
/// 1000.
#[prost(int32, tag = "2")]
pub page_size: i32,
/// Optional. The next_page_token value returned from a previous.
/// ListKeysRequest, if any.
#[prost(string, tag = "3")]
pub page_token: ::prost::alloc::string::String,
}
/// Response to request to list keys in a project.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListKeysResponse {
/// Key details.
#[prost(message, repeated, tag = "1")]
pub keys: ::prost::alloc::vec::Vec<Key>,
/// Token to retrieve the next page of results. It is set to empty if no keys
/// remain in results.
#[prost(string, tag = "2")]
pub next_page_token: ::prost::alloc::string::String,
}
/// The get key request message.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetKeyRequest {
/// Required. The name of the requested key, in the format
/// "projects/{project_number}/keys/{key_id}".
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
}
/// The update key request message.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct UpdateKeyRequest {
/// Required. The key to update.
#[prost(message, optional, tag = "1")]
pub key: ::core::option::Option<Key>,
/// Optional. The mask to control which field of the key get updated. If the mask is not
/// present, all fields will be updated.
#[prost(message, optional, tag = "2")]
pub update_mask: ::core::option::Option<::prost_types::FieldMask>,
}
/// The delete key request message.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteKeyRequest {
/// Required. The name of the key to be deleted, in the format
/// "projects/{project_number}/keys/{key_id}".
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
}
/// A key used to identify and configure applications (web and/or mobile) that
/// use reCAPTCHA Enterprise.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Key {
/// The resource name for the Key in the format
/// "projects/{project_number}/keys/{key_id}".
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Human-readable display name of this key. Modifiable by user.
#[prost(string, tag = "2")]
pub display_name: ::prost::alloc::string::String,
/// Platform specific settings for this key. The key can only be used on one
/// platform, the one it has settings for.
#[prost(oneof = "key::PlatformSettings", tags = "3, 4, 5")]
pub platform_settings: ::core::option::Option<key::PlatformSettings>,
}
/// Nested message and enum types in `Key`.
pub mod key {
/// Platform specific settings for this key. The key can only be used on one
/// platform, the one it has settings for.
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum PlatformSettings {
/// Settings for keys that can be used by websites.
#[prost(message, tag = "3")]
WebSettings(super::WebKeySettings),
/// Settings for keys that can be used by Android apps.
#[prost(message, tag = "4")]
AndroidSettings(super::AndroidKeySettings),
/// Settings for keys that can be used by iOS apps.
#[prost(message, tag = "5")]
IosSettings(super::IosKeySettings),
}
}
/// Settings specific to keys that can be used by websites.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WebKeySettings {
/// Whether allowed_domains is enforced or not.
#[prost(bool, tag = "3")]
pub enforce_allowed_domains: bool,
/// Domains or subdomains of websites allowed to use the key. All subdomains
/// of an allowed domain are automatically allowed. A valid domain requires a
/// host and must not include any path, port, query or fragment.
/// Examples: 'example.com' or 'subdomain.example.com'
#[prost(string, repeated, tag = "1")]
pub allowed_domains: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Whether this key can be used on AMP (Accelerated Mobile Pages) websites.
#[prost(bool, tag = "2")]
pub allow_amp_traffic: bool,
/// Required. Describes how this key is integrated with the website.
#[prost(enumeration = "web_key_settings::IntegrationType", tag = "4")]
pub integration_type: i32,
/// Settings for the frequency and difficulty at which this key triggers
/// captcha challenges. This should only be specified for IntegrationTypes
/// CHECKBOX_CHALLENGE and INVISIBLE_CHALLENGE.
#[prost(
enumeration = "web_key_settings::ChallengeSecurityPreference",
tag = "5"
)]
pub challenge_security_preference: i32,
}
/// Nested message and enum types in `WebKeySettings`.
pub mod web_key_settings {
/// Enum that represents the integration types for web keys.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum IntegrationType {
/// Default type that indicates this enum hasn't been specified. This is not
/// a valid IntegrationType, one of the other types must be specified
/// instead.
Unspecified = 0,
/// Only used to produce scores. It doesn't display the "I'm not a robot"
/// checkbox and never shows captcha challenges.
ScoreOnly = 1,
/// Displays the "I'm not a robot" checkbox and may show captcha challenges
/// after it is checked.
CheckboxChallenge = 2,
/// Doesn't display the "I'm not a robot" checkbox, but may show captcha
/// challenges after risk analysis.
InvisibleChallenge = 3,
}
/// Enum that represents the possible challenge frequency and difficulty
/// configurations for a web key.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum ChallengeSecurityPreference {
/// Default type that indicates this enum hasn't been specified.
Unspecified = 0,
/// Key tends to show fewer and easier challenges.
Usability = 1,
/// Key tends to show balanced (in amount and difficulty) challenges.
Balanced = 2,
/// Key tends to show more and harder challenges.
Security = 3,
}
}
/// Settings specific to keys that can be used by Android apps.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AndroidKeySettings {
/// Android package names of apps allowed to use the key.
/// Example: 'com.companyname.appname'
#[prost(string, repeated, tag = "1")]
pub allowed_package_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
/// Settings specific to keys that can be used by iOS apps.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct IosKeySettings {
/// iOS bundle ids of apps allowed to use the key.
/// Example: 'com.companyname.productname.appname'
#[prost(string, repeated, tag = "1")]
pub allowed_bundle_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
#[doc = r" Generated client implementations."]
pub mod recaptcha_enterprise_service_v1_beta1_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
#[doc = " Service to determine the likelihood an event is legitimate."]
#[derive(Debug, Clone)]
pub struct RecaptchaEnterpriseServiceV1Beta1Client<T> {
inner: tonic::client::Grpc<T>,
}
impl<T> RecaptchaEnterpriseServiceV1Beta1Client<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: Body + Send + 'static,
T::Error: Into<StdError>,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> RecaptchaEnterpriseServiceV1Beta1Client<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<http::Request<tonic::body::BoxBody>>>::Error:
Into<StdError> + Send + Sync,
{
RecaptchaEnterpriseServiceV1Beta1Client::new(InterceptedService::new(
inner,
interceptor,
))
}
#[doc = r" Compress requests with `gzip`."]
#[doc = r""]
#[doc = r" This requires the server to support it otherwise it might respond with an"]
#[doc = r" error."]
pub fn send_gzip(mut self) -> Self {
self.inner = self.inner.send_gzip();
self
}
#[doc = r" Enable decompressing responses with `gzip`."]
pub fn accept_gzip(mut self) -> Self {
self.inner = self.inner.accept_gzip();
self
}
#[doc = " Creates an Assessment of the likelihood an event is legitimate."]
pub async fn create_assessment(
&mut self,
request: impl tonic::IntoRequest<super::CreateAssessmentRequest>,
) -> Result<tonic::Response<super::Assessment>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recaptchaenterprise.v1beta1.RecaptchaEnterpriseServiceV1Beta1/CreateAssessment") ;
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Annotates a previously created Assessment to provide additional information"]
#[doc = " on whether the event turned out to be authentic or fradulent."]
pub async fn annotate_assessment(
&mut self,
request: impl tonic::IntoRequest<super::AnnotateAssessmentRequest>,
) -> Result<tonic::Response<super::AnnotateAssessmentResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recaptchaenterprise.v1beta1.RecaptchaEnterpriseServiceV1Beta1/AnnotateAssessment") ;
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Creates a new reCAPTCHA Enterprise key."]
pub async fn create_key(
&mut self,
request: impl tonic::IntoRequest<super::CreateKeyRequest>,
) -> Result<tonic::Response<super::Key>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recaptchaenterprise.v1beta1.RecaptchaEnterpriseServiceV1Beta1/CreateKey") ;
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Returns the list of all keys that belong to a project."]
pub async fn list_keys(
&mut self,
request: impl tonic::IntoRequest<super::ListKeysRequest>,
) -> Result<tonic::Response<super::ListKeysResponse>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recaptchaenterprise.v1beta1.RecaptchaEnterpriseServiceV1Beta1/ListKeys") ;
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Returns the specified key."]
pub async fn | (
&mut self,
request: impl tonic::IntoRequest<super::GetKeyRequest>,
) -> Result<tonic::Response<super::Key>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recaptchaenterprise.v1beta1.RecaptchaEnterpriseServiceV1Beta1/GetKey") ;
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Updates the specified key."]
pub async fn update_key(
&mut self,
request: impl tonic::IntoRequest<super::UpdateKeyRequest>,
) -> Result<tonic::Response<super::Key>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recaptchaenterprise.v1beta1.RecaptchaEnterpriseServiceV1Beta1/UpdateKey") ;
self.inner.unary(request.into_request(), path, codec).await
}
#[doc = " Deletes the specified key."]
pub async fn delete_key(
&mut self,
request: impl tonic::IntoRequest<super::DeleteKeyRequest>,
) -> Result<tonic::Response<()>, tonic::Status> {
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http :: uri :: PathAndQuery :: from_static ("/google.cloud.recaptchaenterprise.v1beta1.RecaptchaEnterpriseServiceV1Beta1/DeleteKey") ;
self.inner.unary(request.into_request(), path, codec).await
}
}
}
| get_key |
config.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package aggregate implements a read-only aggregator for config stores.
package aggregate
import (
"errors"
"istio.io/pkg/ledger"
"github.com/hashicorp/go-multierror"
"istio.io/pkg/log"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/resource"
)
var errorUnsupported = errors.New("unsupported operation: the config aggregator is read-only")
// Make creates an aggregate config store from several config stores and
// unifies their descriptors
func Make(stores []model.ConfigStore) (model.ConfigStore, error) {
union := collection.NewSchemasBuilder()
storeTypes := make(map[resource.GroupVersionKind][]model.ConfigStore)
for _, store := range stores {
for _, s := range store.Schemas().All() {
if len(storeTypes[s.Resource().GroupVersionKind()]) == 0 {
if err := union.Add(s); err != nil {
return nil, err
}
}
storeTypes[s.Resource().GroupVersionKind()] = append(storeTypes[s.Resource().GroupVersionKind()], store)
}
}
schemas := union.Build()
if err := schemas.Validate(); err != nil {
return nil, err
}
result := &store{
schemas: schemas,
stores: storeTypes,
}
var l ledger.Ledger
for _, store := range stores {
if l == nil {
l = store.GetLedger()
result.getVersion = store.Version
result.getResourceAtVersion = store.GetResourceAtVersion
} else {
err := store.SetLedger(l)
if err != nil {
log.Warnf("Config Store %v cannot track distribution in aggregate: %v", store, err)
}
}
}
return result, nil
}
// MakeCache creates an aggregate config store cache from several config store
// caches.
func MakeCache(caches []model.ConfigStoreCache) (model.ConfigStoreCache, error) {
stores := make([]model.ConfigStore, 0, len(caches))
for _, cache := range caches {
stores = append(stores, cache)
}
store, err := Make(stores)
if err != nil {
return nil, err
}
return &storeCache{
ConfigStore: store,
caches: caches,
}, nil
}
type store struct {
// schemas is the unified
schemas collection.Schemas
// stores is a mapping from config type to a store
stores map[resource.GroupVersionKind][]model.ConfigStore
getVersion func() string
getResourceAtVersion func(version, key string) (resourceVersion string, err error)
ledger ledger.Ledger
}
func (cr *store) GetLedger() ledger.Ledger {
return cr.ledger
}
| cr.ledger = l
return nil
}
func (cr *store) GetResourceAtVersion(version string, key string) (resourceVersion string, err error) {
return cr.getResourceAtVersion(version, key)
}
func (cr *store) Schemas() collection.Schemas {
return cr.schemas
}
func (cr *store) Version() string {
return cr.getVersion()
}
// Get the first config found in the stores.
func (cr *store) Get(typ resource.GroupVersionKind, name, namespace string) *model.Config {
for _, store := range cr.stores[typ] {
config := store.Get(typ, name, namespace)
if config != nil {
return config
}
}
return nil
}
// List all configs in the stores.
func (cr *store) List(typ resource.GroupVersionKind, namespace string) ([]model.Config, error) {
if len(cr.stores[typ]) == 0 {
return nil, nil
}
var errs *multierror.Error
var configs []model.Config
// Used to remove duplicated config
configMap := make(map[string]struct{})
for _, store := range cr.stores[typ] {
storeConfigs, err := store.List(typ, namespace)
if err != nil {
errs = multierror.Append(errs, err)
}
for _, config := range storeConfigs {
key := config.GroupVersionKind.Kind + config.Namespace + config.Name
if _, exist := configMap[key]; exist {
continue
}
configs = append(configs, config)
configMap[key] = struct{}{}
}
}
return configs, errs.ErrorOrNil()
}
func (cr *store) Delete(_ resource.GroupVersionKind, _, _ string) error {
return errorUnsupported
}
func (cr *store) Create(model.Config) (string, error) {
return "", errorUnsupported
}
func (cr *store) Update(model.Config) (string, error) {
return "", errorUnsupported
}
type storeCache struct {
model.ConfigStore
caches []model.ConfigStoreCache
}
func (cr *storeCache) HasSynced() bool {
for _, cache := range cr.caches {
if !cache.HasSynced() {
return false
}
}
return true
}
func (cr *storeCache) RegisterEventHandler(kind resource.GroupVersionKind, handler func(model.Config, model.Config, model.Event)) {
for _, cache := range cr.caches {
if _, exists := cache.Schemas().FindByGroupVersionKind(kind); exists {
cache.RegisterEventHandler(kind, handler)
}
}
}
func (cr *storeCache) Run(stop <-chan struct{}) {
for _, cache := range cr.caches {
go cache.Run(stop)
}
<-stop
} | func (cr *store) SetLedger(l ledger.Ledger) error { |
ops.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import sys
import threading
import types
import numpy as np
import six
from six.moves import map # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
# pywrap_tensorflow must be imported first to avoid profobuf issues.
# (b/143110113)
# pylint: disable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import pywrap_tfe
# pylint: enable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import kwarg_only
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name`, `dtype` and `shape` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
if not (hasattr(tensor_type, "name") and
isinstance(tensor_type.name, property)):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
if not (hasattr(tensor_type, "dtype") and
isinstance(tensor_type.dtype, property)):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
if not (hasattr(tensor_type, "shape") and
isinstance(tensor_type.shape, property)):
raise TypeError("Type %s does not define a `shape` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return pywrap_tfe.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
@tf_export("Tensor")
class Tensor(_TensorLike):
"""A tensor represents a rectangular array of data.
When writing a TensorFlow program, the main object you manipulate and pass
around is the `tf.Tensor`. A `tf.Tensor` object represents a rectangular array
of arbitrary dimension, filled with data of a specific data type.
A `tf.Tensor` has the following properties:
* a data type (float32, int32, or string, for example)
* a shape
Each element in the Tensor has the same data type, and the data type is always
known.
In eager execution, which is the default mode in TensorFlow, results are
calculated immediately.
>>> # Compute some values using a Tensor
>>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
>>> e = tf.matmul(c, d)
>>> print(e)
tf.Tensor(
[[1. 3.]
[3. 7.]], shape=(2, 2), dtype=float32)
Note that during eager execution, you may discover your `Tensors` are actually
of type `EagerTensor`. This is an internal detail, but it does give you
access to a useful function, `numpy`:
>>> type(e)
<class '...ops.EagerTensor'>
>>> print(e.numpy())
[[1. 3.]
[3. 7.]]
TensorFlow can define computations without immediately executing them, most
commonly inside `tf.function`s, as well as in (legacy) Graph mode. In those
cases, the shape (that is, the rank of the Tensor and the size of
each dimension) might be only partially known.
Most operations produce tensors of fully-known shapes if the shapes of their
inputs are also fully known, but in some cases it's only possible to find the
shape of a tensor at execution time.
There are specialized tensors; for these, see `tf.Variable`, `tf.constant`,
`tf.placeholder`, `tf.SparseTensor`, and `tf.RaggedTensor`.
For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor`).
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to execute the underlying kernel. This
can be used for debugging and providing early error messages. For
example:
```python
>>> c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> print(c.shape) # will be TensorShape([2, 3])
(2, 3)
>>> d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
>>> print(d.shape)
(4, 2)
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
>>> e = tf.matmul(c, d)
Traceback (most recent call last):
...
tensorflow.python.framework.errors_impl.InvalidArgumentError: Matrix
size-incompatible: In[0]: [2,3], In[1]: [4,2] [Op:MatMul] name: MatMul/
# This works because we have compatible shapes.
>>> f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
>>> print(f.shape)
(3, 4)
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `tf.TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vec = [None if d == -1 else d for d in shape_vec]
return tensor_shape.TensorShape(shape_vec)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did not convert this function. Try"
" decorating it directly with @tf.function.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
return _TensorIterator(self, shape[0])
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of `tf.Tensor.shape`."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
pywrap_tf_session.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g.building_function)):
raise TypeError("Tensor is unhashable. "
"Instead, use tensor.ref() as the key.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy"
" array.".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Note: If you are not using `compat.v1` libraries, you should not need this,
(or `feed_dict` or `Session`). In eager execution (or within `tf.function`)
you do not need to call `eval`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
@deprecation.deprecated(None, "Use ref() instead.")
def experimental_ref(self):
return self.ref()
def ref(self):
# tf.Variable also has the same ref() API. If you update the
# documentation here, please update tf.Variable.ref() as well.
"""Returns a hashable reference object to this Tensor.
The primary use case for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
The following will raise an exception starting 2.0
>>> x = tf.constant(5)
>>> y = tf.constant(10)
>>> z = tf.constant(10)
>>> tensor_set = {x, y, z}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
>>> tensor_dict = {x: 'five', y: 'ten'}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
Instead, we can use `tensor.ref()`.
>>> tensor_set = {x.ref(), y.ref(), z.ref()}
>>> x.ref() in tensor_set
True
>>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}
>>> tensor_dict[y.ref()]
'ten'
Also, the reference object provides `.deref()` function that returns the
original Tensor.
>>> x = tf.constant(5)
>>> x.ref().deref()
<tf.Tensor: shape=(), dtype=int32, numpy=5>
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __complex__(self):
return complex(self._numpy())
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
return self._numpy().__index__()
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % (
self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
# pylint: disable=protected-access
try:
return self._shape_tuple()[0]
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
def _numpy_internal(self):
raise NotImplementedError()
def _numpy(self):
# pylint: disable=protected-access
try:
return self._numpy_internal()
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Copy of the contents of this Tensor into a NumPy array or scalar.
Unlike NumPy arrays, Tensors are immutable, so this method has to copy
the contents to ensure safety. Use `memoryview` to get a readonly
view of the contents without doing a copy:
>>> t = tf.constant([42])
>>> np.array(memoryview(t))
array([42], dtype=int32)
Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor
is on GPU, it will have to be transferred to CPU first in order for
`memoryview` to work.
Returns:
A NumPy array of the same shape and dtype or a NumPy scalar, if this
Tensor has rank 0.
Raises:
ValueError: If the dtype of this Tensor does not have a compatible
NumPy dtype.
"""
# TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# pylint: disable=protected-access
try:
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
>>> def my_func(arg):
... arg = tf.convert_to_tensor(arg, dtype=tf.float32)
... return arg
>>> # The following calls are equivalent.
>>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
>>> print(value_1)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
>>> print(value_2)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
>>> print(value_3)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
# TODO(b/142518781): Fix all call-sites and remove redundant arg
preferred_dtype = preferred_dtype or dtype_hint
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
internal_convert_to_tensor = convert_to_tensor
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
attrs: Dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),
name=compat.as_bytes(name))
if attrs:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/>]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/>]*$")
def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A flattened list of `Tensor`s. This function handles grouping
tensors into lists as per attributes in the `node_def`.
control_inputs: A list of `Operation`s to set as control dependencies.
op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not
specified, is looked up from the `graph` using `node_def.op`.
Returns:
A wrapped TF_Operation*.
"""
if op_def is None:
op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)
# pylint: disable=protected-access
op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
pywrap_tf_session.TF_AddInputList(op_desc,
[t._as_tf_output() for t in op_input])
else:
pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),
serialized)
try:
c_op = pywrap_tf_session.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`
objects as input, and produces zero or more `Tensor` objects as output.
Objects of type `Operation` are created by calling a Python op constructor
(such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`
context manager.
For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an
`Operation` of type "MatMul" that takes tensors `a` and `b` as input, and
produces `c` as output.
If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be
executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for
calling `tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "TF_Operation":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Gradient function for this op. There are three ways to specify gradient
# function, and first available gradient gets used, in the following order.
# 1. self._gradient_function
# 2. Gradient name registered by "_gradient_op_type" attribute.
# 3. Gradient name registered by op.type.
self._gradient_function = None
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))
name = self.name
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
self._c_op = _create_c_op(self._graph, node_def, inputs,
control_input_ops, op_def)
name = compat.as_str(node_def.name)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing(input_tensors=inputs)
def _control_flow_post_processing(self, input_tensors=None):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
Args:
input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs
of this op, which should be equivalent to `self.inputs`. Pass this
argument to avoid evaluating `self.inputs` unnecessarily.
"""
if input_tensors is None:
input_tensors = self.inputs
for input_tensor in input_tensors:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return pywrap_tf_session.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return pywrap_tf_session.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in pywrap_tf_session.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
output_types = [
int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))
for i in xrange(num_outputs)
]
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = pywrap_tf_session.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = pywrap_tf_session.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
pywrap_tf_session.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
@property
def inputs(self):
"""The sequence of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
# pylint: disable=protected-access
self._inputs_val = tuple(
map(self.graph._get_tensor_by_tf_output,
pywrap_tf_session.GetOperationInputs(self._c_op)))
# pylint: enable=protected-access
return self._inputs_val
@property
def _input_types(self):
num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(
pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return pywrap_tf_session.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._traceback
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = pywrap_tf_session.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
self._set_attr_with_buf(attr_name, buf)
finally:
pywrap_tf_session.TF_DeleteBuffer(buf)
def _set_attr_with_buf(self, attr_name, attr_buf):
"""Set an attr in the node_def with a pre-allocated buffer."""
# pylint: disable=protected-access
pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,
attr_buf)
# pylint: enable=protected-access
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the `DType` value of the attr of this op with the given `name`."""
try:
dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_bool(self, name):
"""Returns the `bool` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_int(self, name):
"""Returns the `int` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
gradient_function = op._gradient_function # pylint: disable=protected-access
if gradient_function:
return gradient_function
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
Graphs are used by `tf.function`s to represent the function's computations.
Each graph contains a set of `tf.Operation` objects, which represent units of
computation; and `tf.Tensor` objects, which represent the units of data that
flow between operations.
### Using graphs directly (deprecated)
A `tf.Graph` can be constructed and used directly without a `tf.function`, as
was required in TensorFlow 1, but this is deprecated and it is recommended to
use a `tf.function` instead. If a graph is directly used, other deprecated
TensorFlow 1 classes are also required to execute the graph, such as a
`tf.compat.v1.Session`.
A default graph can be registered with the `tf.Graph.as_default` context
manager. Then, operations will be added to the graph instead of being executed
eagerly. For example:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
`tf.compat.v1.get_default_graph()` can be used to obtain the default graph.
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# A map from op type to a gradient function that should be used instead.
self._gradient_function_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op, op_name):
"""Adds 'op' to the graph and returns the unique ID for the added Operation.
Args:
op: the Operation to add.
op_name: the name of the Operation.
Returns:
An integer that is a unique ID for the added Operation.
"""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
op_id = self._next_id_counter
self._nodes_by_id[op_id] = op
self._nodes_by_name[op_name] = op
self._version = max(self._version, op_id)
return op_id
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
# TODO(b/141471245): Fix the inconsistency when inputs of func graph
# are appended during gradient computation of while/cond.
for input_tensor, _ in zip(func_graph_inputs,
function_def.signature.input_arg):
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
outputs = op.outputs
if op.type == "StatefulPartitionedCall":
# Filter out any extra outputs (possibly added by function
# backpropagation rewriting).
num_outputs = len(node.attr["Tout"].list.type)
outputs = outputs[:num_outputs]
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,
gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, attrs)
input_ops = set(t.op for t in inputs)
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friendly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = pywrap_tf_session.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),
buf)
# pylint: enable=protected-access
data = pywrap_tf_session.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend(c for c in controller.control_inputs if c not in input_ops)
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _override_gradient_function(self, gradient_function_map):
"""Specify gradient function for the given op type."""
# This is an internal API and we don't need nested context for this.
assert not self._gradient_function_map
self._gradient_function_map = gradient_function_map
yield
self._gradient_function_map = {}
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, dtype):
if dtype:
dtype = dtypes.as_dtype(dtype)
self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
This function specifies the device to be used for ops created/executed in a
particular context. Nested contexts will inherit and also create/execute
their ops on the specified device. If a specific device is not required,
consider not using this function so that a device can be automatically
assigned. In general the use of this function is optional. `device_name` can
be fully specified, as in "/job:worker/task:1/device:cpu:0", or partially
specified, containing only a subset of the "/"-separated fields. Any fields
which are specified will override device annotations from outer scopes.
For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Execute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when tracing a `tf.function`. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.function
def func():
# A function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(
scope, skip_on_eager=False), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export(v1=["executing_eagerly_outside_functions"])
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function.
This function will check the outermost context for the program and see if
it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,
which checks the current context and will return `False` within a
`tf.function` body. It can be used to build library that behave differently
in eager runtime and v1 session runtime (deprecated).
Example:
>>> tf.compat.v1.enable_eager_execution()
>>> @tf.function
... def func():
... # A function constructs TensorFlow graphs, it does not execute eagerly,
... # but the outer most context is still eager.
... assert not tf.executing_eagerly()
... return tf.compat.v1.executing_eagerly_outside_functions()
>>> func()
<tf.Tensor: shape=(), dtype=bool, numpy=True>
Returns:
boolean, whether the outermost context is in eager mode.
"""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
def name_scope(name, default_name=None, values=None, skip_on_eager=True):
"""Internal-only entry point for `name_scope*`.
Internal ops do not use the public API and instead rely on
`ops.name_scope` regardless of the execution mode. This function
dispatches to the correct `name_scope*` implementation based on
the arguments provided and the current mode. Specifically,
* if `values` contains a graph tensor `Graph.name_scope` is used;
* `name_scope_v1` is used in graph mode;
* `name_scope_v2` -- in eager mode.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
skip_on_eager: Indicates to return NullContextmanager if executing eagerly.
By default this is True since naming tensors and operations in eager mode
have little use and cause unnecessary performance overhead. However, it is
important to preserve variable names since they are often useful for
debugging and saved models.
Returns:
`name_scope*` context manager.
"""
ctx = context.context()
in_eager_mode = ctx.executing_eagerly()
if not in_eager_mode:
return internal_name_scope_v1(name, default_name, values)
if skip_on_eager:
return NullContextmanager()
name = default_name if name is None else name
if values:
# The presence of a graph tensor in `values` overrides the context.
# TODO(slebedev): this is Keras-specific and should be removed.
# pylint: disable=unidiomatic-typecheck
graph_value = next((value for value in values if type(value) == Tensor),
None)
# pylint: enable=unidiomatic-typecheck
if graph_value is not None:
return graph_value.graph.name_scope(name)
return name_scope_v2(name or "")
class internal_name_scope_v1(object): # pylint: disable=invalid-name
"""Graph-only version of `name_scope_v1`."""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info):
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope_v1(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
self._name_scope = name_scope(
name, default_name, values, skip_on_eager=False)
self._name = default_name if name is None else name
def __enter__(self):
return self._name_scope.__enter__()
def __exit__(self, *exc_info):
return self._name_scope.__exit__(*exc_info)
def enter_eager_name_scope(ctx, name):
"""Updates the eager context to enter the given name scope."""
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name.endswith("/"):
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = name
else:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(object):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and isinstance(v.handle, Tensor):
if graph.building_function:
return graph.capture(v.handle).op
else:
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
# Helper functions for op wrapper modules generated by `python_op_gen`.
def to_raw_op(f):
"""Make a given op wrapper function `f` raw.
Raw op wrappers can only be called with keyword arguments.
Args:
f: An op wrapper function to make raw.
Returns:
Raw `f`.
"""
# Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail
# due to double-registration.
f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,
f.__closure__)
return kwarg_only(f)
def raise_from_not_ok_status(e, name):
message = e.message + (" name: " + name if name is not None else "")
# pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
def | (fn):
"""Add a callback to run when the default function graph goes out of scope.
Usage:
```python
@tf.function
def fn(x, v):
expensive = expensive_object(v)
add_exit_callback_to_default_func_graph(lambda: expensive.release())
return g(x, expensive)
fn(x=tf.constant(...), v=...)
# `expensive` has been released.
```
Args:
fn: A callable that takes no arguments and whose output is ignored.
To be executed when exiting func graph scope.
Raises:
RuntimeError: If executed when the current default graph is not a FuncGraph,
or not currently executing in function creation mode (e.g., if inside
an init_scope).
"""
default_graph = get_default_graph()
if not default_graph._building_function: # pylint: disable=protected-access
raise RuntimeError(
"Cannot add scope exit callbacks when not building a function. "
"Default graph: {}".format(default_graph))
default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access
def _reconstruct_sequence_inputs(op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
class _TensorIterator(object):
"""Iterates over the leading dim of a Tensor. Performs no error checks."""
def __init__(self, tensor, dim0):
self._tensor = tensor
self._index = 0
self._limit = dim0
def __iter__(self):
return self
def __next__(self):
if self._index == self._limit:
raise StopIteration
result = self._tensor[self._index]
self._index += 1
return result
next = __next__ # python2.x compatibility.
| add_exit_callback_to_default_func_graph |
test_capat.py | # -*- coding: utf-8 -*-
import py
try:
from jabberbot import capat
except ImportError:
py.test.skip("Skipping jabber bot tests - pyxmpp is not installed")
def test_ver_simple():
# example values supplied by the XEP
ident = (("client", "pc"), )
feat = ("http://jabber.org/protocol/disco#info",
"http://jabber.org/protocol/disco#items",
"http://jabber.org/protocol/muc",
)
assert capat.generate_ver(ident, feat) == "8RovUdtOmiAjzj+xI7SK5BCw3A8="
def test_ver_complex():
# this test should verify that ordering works properly
ident = (("client", "animal"),
("client", "bear"), # type ordering after category ordering
("apples", "bar"),
("apple", "foo"), # "apples" starts with "apple"
# thus it's greater
)
feat = ()
expected = capat.hash_new('sha1')
expected.update("apple/foo<apples/bar<client/animal<client/bear<")
expected = capat.base64.b64encode(expected.digest())
assert capat.generate_ver(ident, feat) == expected
def test_xml():
try:
import pyxmpp.iq
except ImportError:
py.test.skip("pyxmpp needs to be installed for this test")
x = pyxmpp.iq.Iq(stanza_type='result', stanza_id='disco1', | to_jid='[email protected]/chamber')
y = x.new_query(ns_uri='http://jabber.org/protocol/disco#info')
z = y.newChild(None, 'identity', None)
z.setProp('category', 'client')
z.setProp('type', 'pc')
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/disco#info')
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/disco#items')
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/muc')
assert capat.hash_iq(x) == "8RovUdtOmiAjzj+xI7SK5BCw3A8="
# hash value taken from `test_ver_simple` | from_jid='[email protected]/orchard', |
motor.js | /**
* motor Schema
*/
const mongoose = require('mongoose')
const Schema = mongoose.Schema | motor: String,
brand: String,
price: Number,
dealer: String,
created_at: {
type:Date,
default:Date.now
},
updated_at: {
type: Date,
default: Date.now
}
})
let Motor = mongoose.model("Motor", motorSchema)
module.exports = Motor |
let motorSchema = new Schema ({ |
multiple_crate_versions.rs | //! lint on multiple versions of a crate being used
use crate::utils::span_lint;
use rustc::lint::{EarlyContext, EarlyLintPass, LintArray, LintPass};
use rustc::{declare_tool_lint, lint_array};
use syntax::{ast::*, source_map::DUMMY_SP};
use cargo_metadata;
use itertools::Itertools;
/// **What it does:** Checks to see if multiple versions of a crate are being
/// used.
///
/// **Why is this bad?** This bloats the size of targets, and can lead to
/// confusing error messages when structs or traits are used interchangeably
/// between different versions of a crate.
///
/// **Known problems:** Because this can be caused purely by the dependencies
/// themselves, it's not always possible to fix this issue.
///
/// **Example:**
/// ```toml
/// # This will pull in both winapi v0.3.4 and v0.2.8, triggering a warning.
/// [dependencies]
/// ctrlc = "3.1.0"
/// ansi_term = "0.11.0"
/// ```
declare_clippy_lint! {
pub MULTIPLE_CRATE_VERSIONS,
cargo,
"multiple versions of the same crate being used"
}
pub struct Pass;
impl LintPass for Pass {
fn get_lints(&self) -> LintArray {
lint_array!(MULTIPLE_CRATE_VERSIONS)
}
fn name(&self) -> &'static str |
}
impl EarlyLintPass for Pass {
fn check_crate(&mut self, cx: &EarlyContext<'_>, _: &Crate) {
let metadata = if let Ok(metadata) = cargo_metadata::MetadataCommand::new().exec() {
metadata
} else {
span_lint(cx, MULTIPLE_CRATE_VERSIONS, DUMMY_SP, "could not read cargo metadata");
return;
};
let mut packages = metadata.packages;
packages.sort_by(|a, b| a.name.cmp(&b.name));
for (name, group) in &packages.into_iter().group_by(|p| p.name.clone()) {
let group: Vec<cargo_metadata::Package> = group.collect();
if group.len() > 1 {
let versions = group.into_iter().map(|p| p.version).join(", ");
span_lint(
cx,
MULTIPLE_CRATE_VERSIONS,
DUMMY_SP,
&format!("multiple versions for dependency `{}`: {}", name, versions),
);
}
}
}
}
| {
"MultipleCrateVersions"
} |
lib.rs | use error::*;
use silk_proto::*;
mod support;
pub use support::*;
mod start;
pub use start::*;
#[macro_use]
extern crate log;
use std::sync::Arc;
use tokio::sync::RwLock;
pub trait IChain {
fn configure(&self, tx: Transaction) -> Result<()>;
fn order(&self, tx: Transaction) -> Result<()>;
fn start(&self);
}
#[async_trait::async_trait]
pub trait IChainSupport: Send + Sync + 'static {
async fn cutter_ordered(&self, vec: Vec<Transaction>) -> Vec<Vec<Transaction>>;
async fn cutter_cut(&self) -> Vec<Transaction>;
async fn create_next_block(&self, _vec: Vec<Transaction>) -> Block;
async fn write_block(&self, _block: Block) -> Result<()>;
}
pub trait IConsensus: Send + Sync + 'static {
type Output: IChain;
fn handler_chain<S: IChainSupport>(&self, support: S) -> Self::Output;
}
#[derive(Clone)]
pub struct ChainSupport {
id: String,
header: Arc<RwLock<BlockHeader>>,
}
impl ChainSupport {
pub fn new(id: String, header: BlockHeader) -> Self {
ChainSupport {
id,
header: Arc::new(RwLock::new(header)),
}
}
pub async fn set(&self, header: BlockHeader) {
let mut lock = self.header.write().await;
if lock.number != header.number {
info!("update chain {:} header {:}", self.id, header.number);
*lock = header
}
}
pub async fn start(&self) -> Result<()> {
let lock = self.header.read().await;
println!("->{:}", lock.number);
Ok(())
}
}
#[async_trait::async_trait]
impl IChainSupport for ChainSupport {
async fn cutter_ordered(&self, _vec: Vec<Transaction>) -> Vec<Vec<Transaction>> {
unimplemented!()
}
async fn cutter_cut(&self) -> Vec<Transaction> {
unimplemented!()
}
async fn create_next_block(&self, _vec: Vec<Transaction>) -> Block {
unimplemented!()
}
async fn write_block(&self, _block: Block) -> Result<()> {
unimplemented!()
}
}
#[cfg(test)]
mod tests {
use crate::ChainSupport;
use futures::executor::block_on;
use silk_proto::BlockHeader;
#[test]
fn | () {
let s = ChainSupport::new(
"default".to_string(),
BlockHeader {
number: 10,
previous_hash: vec![],
data_hash: vec![],
},
);
let sc = s.clone();
let set = sc.set(BlockHeader {
number: 11,
previous_hash: vec![],
data_hash: vec![],
});
block_on(set);
let f = s.start();
block_on(f);
let f2 = sc.start();
block_on(f2);
}
}
| it_works |
cuda_memory_bw_performance.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Module of the Cuda memory performance benchmarks."""
import os
import re | from superbench.benchmarks.micro_benchmarks import MemBwBenchmark
class CudaMemBwBenchmark(MemBwBenchmark):
"""The Cuda memory performance benchmark class."""
def __init__(self, name, parameters=''):
"""Constructor.
Args:
name (str): benchmark name.
parameters (str): benchmark parameters.
"""
super().__init__(name, parameters)
self._bin_name = 'bandwidthTest'
def add_parser_arguments(self):
"""Add the specified arguments."""
super().add_parser_arguments()
self._parser.add_argument(
'--shmoo_mode',
action='store_true',
default=False,
help='Enable shmoo mode for bandwidthtest.',
)
def _preprocess(self):
"""Preprocess/preparation operations before the benchmarking.
Return:
True if _preprocess() succeed.
"""
if not super()._preprocess():
return False
# Check the arguments and generate the commands
for mem_type in self._args.mem_type:
command = os.path.join(self._args.bin_dir, self._bin_name)
command += ' --' + mem_type
if self._args.shmoo_mode:
command += ' mode=shmoo'
if self._args.memory == 'pinned':
command += ' memory=pinned'
command += ' --csv'
self._commands.append(command)
return True
def _process_raw_result(self, cmd_idx, raw_output):
"""Function to parse raw results and save the summarized results.
self._result.add_raw_data() and self._result.add_result() need to be called to save the results.
Args:
cmd_idx (int): the index of command corresponding with the raw_output.
raw_output (str): raw output string of the micro-benchmark.
Return:
True if the raw output string is valid and result can be extracted.
"""
self._result.add_raw_data('raw_output_' + self._args.mem_type[cmd_idx], raw_output)
mem_bw = -1
valid = True
content = raw_output.splitlines()
try:
metric = self._metrics[self._mem_types.index(self._args.mem_type[cmd_idx])]
parse_logline = self._parse_logline_map[self._args.mem_type[cmd_idx]]
for line in content:
if parse_logline in line:
line = line.split(',')[1]
value = re.search(r'(\d+.\d+)', line)
if value:
mem_bw = max(mem_bw, float(value.group(0)))
except BaseException:
valid = False
finally:
if valid is False or mem_bw == -1:
logger.error(
'The result format is invalid - round: {}, benchmark: {}, raw output: {}.'.format(
self._curr_run_index, self._name, raw_output
)
)
return False
self._result.add_result(metric, mem_bw)
return True
BenchmarkRegistry.register_benchmark('mem-bw', CudaMemBwBenchmark, platform=Platform.CUDA) |
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkRegistry, Platform |
cookie.py | #!/usr/bin/python3
"""
Beautiful command line parsing
@author chairs
"""
import inspect, sys
from collections import namedtuple
from collections import defaultdict
from subprocess import DEVNULL
class Cookie (object):
"""
Main decorator object
@param name of application
"""
def __init__ (self, app_name, notes=()):
self.optarg = namedtuple('optarg',
['full', 'abbrev', 'default'])
self.name = str(app_name)
self.notes = notes
def __parse (self, args):
"""
Parse command line arguments from argv, built to be simple
and as fast as possible to avoid application overhead
@param command line arguments
@return necessary destinations and identifiers
"""
ordered = list(); full = abbrev = dict()
args = args + ['']
i = 0
while i < len(args) - 1:
token = args[i]
next_token = args[i + 1]
# the full argument case
if token.startswith('--'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
full[token[2:]] = next_token
i += 2
# the shorthand argument case (more common)
elif token.startswith('-'):
if next_token.startswith('-'):
raise ValueError('{} incomplete'.format(token))
else:
abbrev[token[1:]] = next_token
i += 2
else:
ordered.append(token)
i += 1
return ordered, full, abbrev
def __construct_ordered (self, params):
"""
Build the ordered parameters (those without flags, positional)
@param parameters from parse
@return all exclusively oredered arguments
"""
return [key for key, arg in params.items() if arg.default == inspect._empty]
def __construct_optional (self, params):
|
def __resolve (self, args, signature):
"""
Resolve arguments final destinations
@param args arguments from construction
@param signatures
@return final destinations
"""
ordered, opt_parsed_full, opt_parsed_abbrev = self.__parse(args[1:])
ordered_def = self.__construct_ordered(signature.parameters)
if len(ordered) != len(ordered_def):
raise Exception('wrong number of oredered arguments')
opt_parsed = dict()
opt_parsed.update(opt_parsed_full)
opt_parsed.update(opt_parsed_abbrev)
opt_def_full, opt_def_abbrev = self.__construct_optional(signature.parameters)
optional = {o.full: o.default for o in opt_def_full.values()}
opt_def = dict()
opt_def.update(opt_def_full)
opt_def.update(opt_def_abbrev)
for key, value in opt_parsed.items():
if key not in opt_def: raise Exception('resolution error')
d = opt_def[key]
optional[d.full] = value
return ordered, optional
def __usage_outline (self, signature):
"""
Nice formatted help message to outline usage
@param signature for arguments
"""
ordered = self.__construct_ordered(signature.parameters)
full, _ = self.__construct_optional(signature.parameters)
ordered_str = ' '.join(name.upper() for name in ordered)
optional_str = ' '.join('\n[-{} | --{} {}],'.format(
opt.abbrev, opt.full, opt.full.upper()) for opt in full.values())
optional_str = ''.join(optional_str.split(',')[::2])
return '{} {}'.format(ordered_str, optional_str)
def get_args (self, function):
"""
The main decorator, the glue
"""
def wrapper ():
sig = inspect.signature(function)
try:
ordered, optional = self.__resolve(sys.argv, sig)
except Exception:
self.outline = ('Usage: ', sys.argv[0], self.__usage_outline(sig,))
print(*self.outline)
if not self.notes == ():
print('\n'.join(self.notes) + '\n'+'\t'*1 + 'respectively')
return
function(*ordered, **optional)
return wrapper
def run (self, function_name, silent=False):
restore = sys.stdout
if silent:
sys.stdout = open('/dev/null', 'w').close()
function_name()
sys.stdout = restore
| """
Build the optional parameters (those with flags, switches)
@param parameters from parse
@return all exclusively optional arguments
"""
args = []
filtered = {
key: arg.default for key, arg in params.items() if arg.default != inspect._empty}
for key, default in filtered.items():
arg = self.optarg(full=key, abbrev=key[0].lower(), default=default)
args.append(arg)
args_full = args_abbrev = dict()
# resolve possible conflicts
known_count = defaultdict(int)
for arg in args:
args_full[arg.full] = arg
if known_count[arg.abbrev] == 0: args_abbrev[arg.abbrev] = arg
elif known_count[arg.abbrev] == 1:
# establish abbreviation
new_abbrev = arg.apprev.upper()
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
else:
new_abbrev = arg.apprev.upper() + str(known_count[arg.abbrev])
args_full[arg.full] = self.optarg(
full=arg.full,
abbrev=new_abbrev,
default=arg.default)
args_abbrev[new_abbrev] = args_full[arg.full]
known_count[arg.abbrev] += 1
return args_full, args_abbrev |
updateRemoteListeners.ts | import { Callback } from './redis/types'
import { SelvaClient } from '.'
import { ServerSelector } from './types'
import getServer from './getServer'
import { createConnection } from './connection'
export const addRemoteListener = (
selvaClient: SelvaClient,
selector: ServerSelector,
event: string,
cb: Callback,
id?: string
) => {
getServer(
selvaClient,
(server) => { | connection.addRemoteListener(
event,
cb,
id === undefined ? selvaClient.selvaId : id
)
},
selector
)
}
export const removeRemoteListener = (
selvaClient: SelvaClient,
selector: ServerSelector,
event: string,
cb?: Callback,
id?: string
) => {
getServer(
selvaClient,
(server) => {
const connection = createConnection(server)
connection.attachClient(selvaClient)
connection.removeRemoteListener(
event,
cb,
id === undefined ? selvaClient.selvaId : id
)
},
selector
)
} | const connection = createConnection(server)
connection.attachClient(selvaClient) |
destroy_composite_key.spec.ts | import { createStore, fillState } from 'test/Helpers'
import { Model, Attr, Str } from '@/index'
describe('feature/repository/destroy_composite_key', () => {
class | extends Model {
static entity = 'users'
static primaryKey = ['idA', 'idB']
@Attr() idA!: any
@Attr() idB!: any
@Str('') name!: string
}
it('throws if the model has composite key', () => {
const store = createStore()
fillState(store, {
users: {
1: { id: 1, name: 'John Doe' },
2: { id: 2, name: 'Jane Doe' },
3: { id: 3, name: 'Johnny Doe' }
}
})
expect(() => store.$repo(User).destroy(2)).toThrow()
})
})
| User |
Deposit Calculator.py | deposit = float(input())
term_of_deposit = int(input())
interest_percent = float(input())
interest_accumulated = deposit * interest_percent / 100
interest_per_month = interest_accumulated / 12 | total_amount = deposit + term_of_deposit * interest_per_month
print(total_amount) | |
url.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { URI } from 'vs/base/common/uri';
import { createDecorator } from 'vs/platform/instantiation/common/instantiation';
import { IDisposable } from 'vs/base/common/lifecycle';
export const IURLService = createDecorator<IURLService>('urlService');
export interface IURLHandler {
handleURL(uri: URI): Promise<boolean>;
}
export interface IURLService { |
open(url: URI): Promise<boolean>;
registerHandler(handler: IURLHandler): IDisposable;
} | _serviceBrand: any; |
__init__.py | """
This package holds the Optimal BPM plugin, its libraries and UI
The Optimal Framework loads this that
"""
import runpy
__author__ = 'Nicklas Borjesson'
def | ():
runpy.run_module(mod_name="optimalbpm.agent.agent", run_name="__main__")
| run_agent |
shape.rs | use crate::{
text::{Fonts, Galley, TextStyle},
Color32, Mesh, Stroke,
};
use emath::*;
/// A paint primitive such as a circle or a piece of text.
/// Coordinates are all screen space points (not physical pixels).
#[must_use = "Add a Shape to a Painter"]
#[derive(Clone, Debug, PartialEq)]
pub enum Shape {
/// Paint nothing. This can be useful as a placeholder.
Noop,
/// Recursively nest more shapes - sometimes a convenience to be able to do.
/// For performance reasons it is better to avoid it.
Vec(Vec<Shape>),
Circle(CircleShape),
/// A line between two points.
LineSegment {
points: [Pos2; 2],
stroke: Stroke,
},
/// A series of lines between points.
/// The path can have a stroke and/or fill (if closed).
Path(PathShape),
Rect(RectShape),
Text(TextShape),
Mesh(Mesh),
}
/// ## Constructors
impl Shape {
/// A line between two points.
/// More efficient than calling [`Self::line`].
#[inline]
pub fn line_segment(points: [Pos2; 2], stroke: impl Into<Stroke>) -> Self {
Self::LineSegment {
points,
stroke: stroke.into(),
}
}
/// A line through many points.
///
/// Use [`Self::line_segment`] instead if your line only connects two points.
#[inline]
pub fn line(points: Vec<Pos2>, stroke: impl Into<Stroke>) -> Self {
Self::Path(PathShape::line(points, stroke))
}
/// A line that closes back to the start point again.
#[inline]
pub fn closed_line(points: Vec<Pos2>, stroke: impl Into<Stroke>) -> Self {
Self::Path(PathShape::closed_line(points, stroke))
}
/// Turn a line into equally spaced dots.
pub fn dotted_line(
points: &[Pos2],
color: impl Into<Color32>,
spacing: f32,
radius: f32,
) -> Vec<Self> {
let mut shapes = Vec::new();
points_from_line(points, spacing, radius, color.into(), &mut shapes);
shapes
}
/// Turn a line into dashes.
pub fn dashed_line(
points: &[Pos2],
stroke: impl Into<Stroke>,
dash_length: f32,
gap_length: f32,
) -> Vec<Self> {
let mut shapes = Vec::new();
dashes_from_line(points, stroke.into(), dash_length, gap_length, &mut shapes);
shapes
}
/// Turn a line into dashes. If you need to create many dashed lines use this instead of
/// [`Self::dashed_line`]
pub fn dashed_line_many(
points: &[Pos2],
stroke: impl Into<Stroke>,
dash_length: f32,
gap_length: f32,
shapes: &mut Vec<Shape>,
) {
dashes_from_line(points, stroke.into(), dash_length, gap_length, shapes);
}
/// A convex polygon with a fill and optional stroke.
#[inline]
pub fn convex_polygon(
points: Vec<Pos2>,
fill: impl Into<Color32>,
stroke: impl Into<Stroke>,
) -> Self {
Self::Path(PathShape::convex_polygon(points, fill, stroke))
}
#[inline]
pub fn circle_filled(center: Pos2, radius: f32, fill_color: impl Into<Color32>) -> Self {
Self::Circle(CircleShape::filled(center, radius, fill_color))
}
#[inline]
pub fn circle_stroke(center: Pos2, radius: f32, stroke: impl Into<Stroke>) -> Self {
Self::Circle(CircleShape::stroke(center, radius, stroke))
}
#[inline]
pub fn rect_filled(rect: Rect, corner_radius: f32, fill_color: impl Into<Color32>) -> Self {
Self::Rect(RectShape::filled(rect, corner_radius, fill_color))
}
#[inline]
pub fn rect_stroke(rect: Rect, corner_radius: f32, stroke: impl Into<Stroke>) -> Self {
Self::Rect(RectShape::stroke(rect, corner_radius, stroke))
}
#[allow(clippy::needless_pass_by_value)]
pub fn text(
fonts: &Fonts,
pos: Pos2,
anchor: Align2,
text: impl ToString,
text_style: TextStyle,
color: Color32,
) -> Self {
let galley = fonts.layout_no_wrap(text.to_string(), text_style, color);
let rect = anchor.anchor_rect(Rect::from_min_size(pos, galley.size()));
Self::galley(rect.min, galley)
}
#[inline]
pub fn galley(pos: Pos2, galley: crate::mutex::Arc<Galley>) -> Self {
TextShape::new(pos, galley).into()
}
pub fn mesh(mesh: Mesh) -> Self {
crate::epaint_assert!(mesh.is_valid());
Self::Mesh(mesh)
}
}
/// ## Inspection and transforms
impl Shape {
#[inline(always)]
pub fn texture_id(&self) -> super::TextureId {
if let Shape::Mesh(mesh) = self | else {
super::TextureId::default()
}
}
/// Move the shape by this many points, in-place.
pub fn translate(&mut self, delta: Vec2) {
match self {
Shape::Noop => {}
Shape::Vec(shapes) => {
for shape in shapes {
shape.translate(delta);
}
}
Shape::Circle(circle_shape) => {
circle_shape.center += delta;
}
Shape::LineSegment { points, .. } => {
for p in points {
*p += delta;
}
}
Shape::Path(path_shape) => {
for p in &mut path_shape.points {
*p += delta;
}
}
Shape::Rect(rect_shape) => {
rect_shape.rect = rect_shape.rect.translate(delta);
}
Shape::Text(text_shape) => {
text_shape.pos += delta;
}
Shape::Mesh(mesh) => {
mesh.translate(delta);
}
}
}
}
// ----------------------------------------------------------------------------
/// How to paint a circle.
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct CircleShape {
pub center: Pos2,
pub radius: f32,
pub fill: Color32,
pub stroke: Stroke,
}
impl CircleShape {
#[inline]
pub fn filled(center: Pos2, radius: f32, fill_color: impl Into<Color32>) -> Self {
Self {
center,
radius,
fill: fill_color.into(),
stroke: Default::default(),
}
}
#[inline]
pub fn stroke(center: Pos2, radius: f32, stroke: impl Into<Stroke>) -> Self {
Self {
center,
radius,
fill: Default::default(),
stroke: stroke.into(),
}
}
}
impl From<CircleShape> for Shape {
#[inline(always)]
fn from(shape: CircleShape) -> Self {
Self::Circle(shape)
}
}
// ----------------------------------------------------------------------------
/// A path which can be stroked and/or filled (if closed).
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct PathShape {
pub points: Vec<Pos2>,
/// If true, connect the first and last of the points together.
/// This is required if `fill != TRANSPARENT`.
pub closed: bool,
/// Fill is only supported for convex polygons.
pub fill: Color32,
pub stroke: Stroke,
}
impl PathShape {
/// A line through many points.
///
/// Use [`Shape::line_segment`] instead if your line only connects two points.
#[inline]
pub fn line(points: Vec<Pos2>, stroke: impl Into<Stroke>) -> Self {
PathShape {
points,
closed: false,
fill: Default::default(),
stroke: stroke.into(),
}
}
/// A line that closes back to the start point again.
#[inline]
pub fn closed_line(points: Vec<Pos2>, stroke: impl Into<Stroke>) -> Self {
PathShape {
points,
closed: true,
fill: Default::default(),
stroke: stroke.into(),
}
}
/// A convex polygon with a fill and optional stroke.
#[inline]
pub fn convex_polygon(
points: Vec<Pos2>,
fill: impl Into<Color32>,
stroke: impl Into<Stroke>,
) -> Self {
PathShape {
points,
closed: true,
fill: fill.into(),
stroke: stroke.into(),
}
}
/// Screen-space bounding rectangle.
#[inline]
pub fn bounding_rect(&self) -> Rect {
Rect::from_points(&self.points).expand(self.stroke.width)
}
}
impl From<PathShape> for Shape {
#[inline(always)]
fn from(shape: PathShape) -> Self {
Self::Path(shape)
}
}
// ----------------------------------------------------------------------------
/// How to paint a rectangle.
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct RectShape {
pub rect: Rect,
/// How rounded the corners are. Use `0.0` for no rounding.
pub corner_radius: f32,
pub fill: Color32,
pub stroke: Stroke,
}
impl RectShape {
#[inline]
pub fn filled(rect: Rect, corner_radius: f32, fill_color: impl Into<Color32>) -> Self {
Self {
rect,
corner_radius,
fill: fill_color.into(),
stroke: Default::default(),
}
}
#[inline]
pub fn stroke(rect: Rect, corner_radius: f32, stroke: impl Into<Stroke>) -> Self {
Self {
rect,
corner_radius,
fill: Default::default(),
stroke: stroke.into(),
}
}
/// Screen-space bounding rectangle.
#[inline]
pub fn bounding_rect(&self) -> Rect {
self.rect.expand(self.stroke.width)
}
}
impl From<RectShape> for Shape {
#[inline(always)]
fn from(shape: RectShape) -> Self {
Self::Rect(shape)
}
}
// ----------------------------------------------------------------------------
/// How to paint some text on screen.
#[derive(Clone, Debug, PartialEq)]
pub struct TextShape {
/// Top left corner of the first character.
pub pos: Pos2,
/// The layed out text, from [`Fonts::layout_job`].
pub galley: crate::mutex::Arc<Galley>,
/// Add this underline to the whole text.
/// You can also set an underline when creating the galley.
pub underline: Stroke,
/// If set, the text color in the galley will be ignored and replaced
/// with the given color.
/// This will NOT replace background color nor strikethrough/underline color.
pub override_text_color: Option<Color32>,
/// Rotate text by this many radians clock-wise.
/// The pivot is `pos` (the upper left corner of the text).
pub angle: f32,
}
impl TextShape {
#[inline]
pub fn new(pos: Pos2, galley: crate::mutex::Arc<Galley>) -> Self {
Self {
pos,
galley,
underline: Stroke::none(),
override_text_color: None,
angle: 0.0,
}
}
/// Screen-space bounding rectangle.
#[inline]
pub fn bounding_rect(&self) -> Rect {
self.galley.mesh_bounds.translate(self.pos.to_vec2())
}
}
impl From<TextShape> for Shape {
#[inline(always)]
fn from(shape: TextShape) -> Self {
Self::Text(shape)
}
}
// ----------------------------------------------------------------------------
/// Creates equally spaced filled circles from a line.
fn points_from_line(
line: &[Pos2],
spacing: f32,
radius: f32,
color: Color32,
shapes: &mut Vec<Shape>,
) {
let mut position_on_segment = 0.0;
line.windows(2).for_each(|window| {
let start = window[0];
let end = window[1];
let vector = end - start;
let segment_length = vector.length();
while position_on_segment < segment_length {
let new_point = start + vector * (position_on_segment / segment_length);
shapes.push(Shape::circle_filled(new_point, radius, color));
position_on_segment += spacing;
}
position_on_segment -= segment_length;
});
}
/// Creates dashes from a line.
fn dashes_from_line(
line: &[Pos2],
stroke: Stroke,
dash_length: f32,
gap_length: f32,
shapes: &mut Vec<Shape>,
) {
let mut position_on_segment = 0.0;
let mut drawing_dash = false;
line.windows(2).for_each(|window| {
let start = window[0];
let end = window[1];
let vector = end - start;
let segment_length = vector.length();
let mut start_point = start;
while position_on_segment < segment_length {
let new_point = start + vector * (position_on_segment / segment_length);
if drawing_dash {
// This is the end point.
shapes.push(Shape::line_segment([start_point, new_point], stroke));
position_on_segment += gap_length;
} else {
// Start a new dash.
start_point = new_point;
position_on_segment += dash_length;
}
drawing_dash = !drawing_dash;
}
// If the segment ends and the dash is not finished, add the segment's end point.
if drawing_dash {
shapes.push(Shape::line_segment([start_point, end], stroke));
}
position_on_segment -= segment_length;
});
}
| {
mesh.texture_id
} |
solution.go | /**
* @Author: vincent
* @Description:
* @File: solution
* @Version: 1.0.0
* @Date: 2021/9/27 09:12
*/
package _015_3sum
import (
"sort"
)
func | (nums []int, start int, target int) [][]int {
l, h := start, len(nums)-1
var res [][]int
var sum, left, right int
for l < h {
sum = nums[l] + nums[h]
left, right = nums[l], nums[h]
if sum < target {
// l向后找
for l < h && nums[l] == left {
l++
}
} else if sum > target {
// h向前找
for l < h && nums[h] == right {
h--
}
} else {
// sum == target
res = append(res, []int{left, right})
for l < h && nums[l] == left {
// l向后找
l++
}
for l < h && nums[h] == right {
h--
}
}
}
return res
}
func threeSum(nums []int) [][]int {
// sort first
sort.Ints(nums)
n := len(nums)
var res [][]int
target := 0
for i := 0; i < n; i++ {
// find two-sum tuple(元组)
tuples := twoSumTarget(nums, i+1, target-nums[i])
for _, tuple := range tuples {
tuple = append(tuple, nums[i])
res = append(res, tuple)
}
// 等值向后过滤
for i < n-1 && nums[i] == nums[i+1] {
i++
}
}
return res
}
| twoSumTarget |
ResourceLinks.js | import React from 'react';
import ResourceLinks from 'gatsby-theme-carbon/src/components/LeftNav/ResourceLinks';
const links = [
{
title: 'IBM Cloud',
href: 'https://cloud.ibm.com/', | {
title: 'IBM Cloud Paks',
href: 'https://www.ibm.com/cloud/paks/',
},
{
title: 'IBM Edge Application Manager',
href: 'https://www.ibm.com/cloud/edge-application-manager/',
}
];
// shouldOpenNewTabs: true if outbound links should open in a new tab
const CustomResources = () => <ResourceLinks shouldOpenNewTabs links={links} />;
export default CustomResources; | }, |
Home.js | import * as React from "react";
import LoggedIn from "../Functions/LoggedIn";
import {Redirect} from 'react-router-dom';
export default class Home extends React.Component{
constructor(props){
super(props);
this.state = {
cookie: '',
};
}
render()
{
//Check Existence of localStorage existence
if('login_info' in localStorage)
{
let info = JSON.parse(localStorage.getItem('login_info'));
return (
<div>
</div>
)
}
else{
return ( | </div>
)
}
}
} | <div>
<Redirect to='/login'/> |
init.go | /*******************************************************************************
* Copyright 2019 Dell Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*
*******************************************************************************/
package secretstore
import (
"sync"
"time"
"github.com/edgexfoundry/edgex-go/internal"
"github.com/edgexfoundry/edgex-go/internal/pkg/config"
"github.com/edgexfoundry/go-mod-core-contracts/clients/logger"
"github.com/edgexfoundry/go-mod-core-contracts/models"
)
// Global variables
var Configuration *ConfigurationStruct
var LoggingClient logger.LoggingClient
func Retry(useRegistry bool, configDir, profileDir string, timeout int, wait *sync.WaitGroup, ch chan error) {
until := time.Now().Add(time.Millisecond * time.Duration(timeout))
for time.Now().Before(until) {
var err error
// When looping, only handle configuration if it hasn't already been set.
if Configuration == nil {
// Next two lines are workaround for issue #1814 (nil panic while logging)
// where config.LoadFromFile depends on a global LoggingClient that isn't set anywhere
// Remove this workaround once this tool is migrated to common bootstrap.
lc := logger.NewClient(internal.SecuritySecretStoreSetupServiceKey, false, "", models.InfoLog)
config.LoggingClient = lc
Configuration, err = initializeConfiguration(useRegistry, configDir, profileDir)
if err != nil {
ch <- err
if !useRegistry {
// Error occurred when attempting to read from local filesystem. Fail fast.
close(ch)
wait.Done()
return
}
} else {
// Setup Logging
logTarget := setLoggingTarget()
LoggingClient = logger.NewClient(internal.SecuritySecretStoreSetupServiceKey, Configuration.Logging.EnableRemote, logTarget, Configuration.Writable.LogLevel)
}
}
// This seems a bit artificial here due to lack of additional service requirements
// but conforms to the pattern found in other edgex-go services.
if Configuration != nil {
break
}
time.Sleep(time.Second * time.Duration(1))
}
close(ch)
wait.Done()
return
}
func initializeConfiguration(useRegistry bool, configDir, profileDir string) (*ConfigurationStruct, error) {
// We currently have to load configuration from filesystem first in order to obtain Registry Host/Port
configuration := &ConfigurationStruct{}
err := config.LoadFromFile(configDir, profileDir, configuration)
if err != nil {
return nil, err
}
return configuration, nil
}
func | () string {
return Configuration.Logging.File
}
| setLoggingTarget |
docs.go | // GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// This file was generated by swaggo/swag
package docs
import (
"bytes"
"encoding/json"
"strings"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{.Description}}",
"title": "{{.Title}}",
"contact": {
"name": "Source Code",
"url": "https://github.com/yoanyombapro1234/FeelGuuds/src/services/product_catalogue_service"
},
"license": {
"name": "MIT License",
"url": "https://github.com/yoanyombapro1234/FeelGuuds/src/services/product_catalogue_service/blob/master/LICENSE"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/": {
"get": {
"description": "renders service UI",
"produces": [
"text/html"
],
"tags": [
"HTTP API"
],
"summary": "Index",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/api/echo": {
"post": {
"description": "forwards the call to the backend service and echos the posted content",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Echo",
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/api/info": {
"get": {
"description": "returns the runtime information",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Runtime information",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.RuntimeResponse"
}
}
}
}
},
"/cache/{key}": {
"get": {
"description": "returns the content from cache if key exists",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Get payload from cache",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
},
"post": {
"description": "writes the posted content in cache",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Save payload in cache",
"responses": {
"202": {
"description": ""
}
}
},
"delete": {
"description": "deletes the key and its value from cache",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Delete payload from cache",
"responses": {
"202": {
"description": ""
}
}
}
},
"/chunked/{seconds}": {
"get": {
"description": "uses transfer-encoding type chunked to give a partial response and then waits for the specified period",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Chunked transfer encoding",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/delay/{seconds}": {
"get": {
"description": "waits for the specified period",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Delay",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/env": {
"get": {
"description": "returns the environment variables as a JSON array",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Environment",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
},
"/headers": {
"get": {
"description": "returns a JSON array with the request HTTP headers",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Headers",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
},
"/healthz": {
"get": {
"description": "used by Kubernetes liveness probe",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Liveness check",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
} | "get": {
"description": "returns HTTP requests duration and Go runtime metrics",
"produces": [
"text/plain"
],
"tags": [
"Kubernetes"
],
"summary": "Prometheus metrics",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/panic": {
"get": {
"description": "crashes the process with exit code 255",
"tags": [
"HTTP API"
],
"summary": "Panic"
}
},
"/readyz": {
"get": {
"description": "used by Kubernetes readiness probe",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Readiness check",
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/readyz/disable": {
"post": {
"description": "signals the Kubernetes LB to stop sending requests to this instance",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Disable ready state",
"responses": {
"202": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/readyz/enable": {
"post": {
"description": "signals the Kubernetes LB that this instance is ready to receive traffic",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"Kubernetes"
],
"summary": "Enable ready state",
"responses": {
"202": {
"description": "OK",
"schema": {
"type": "string"
}
}
}
}
},
"/status/{code}": {
"get": {
"description": "sets the response status code to the specified code",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Status code",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/store": {
"post": {
"description": "writes the posted content to disk at /data/hash and returns the SHA1 hash of the content",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Upload file",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/store/{hash}": {
"get": {
"description": "returns the content of the file /data/hash if exists",
"consumes": [
"application/json"
],
"produces": [
"text/plain"
],
"tags": [
"HTTP API"
],
"summary": "Download file",
"responses": {
"200": {
"description": "file",
"schema": {
"type": "string"
}
}
}
}
},
"/token": {
"post": {
"description": "issues a JWT token valid for one minute",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Generate JWT token",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.TokenResponse"
}
}
}
}
},
"/token/validate": {
"post": {
"description": "validates the JWT token",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Validate JWT token",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.TokenValidationResponse"
}
},
"401": {
"description": "Unauthorized",
"schema": {
"type": "string"
}
}
}
}
},
"/version": {
"get": {
"description": "returns service version and git commit hash",
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Version",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
},
"/ws/echo": {
"post": {
"description": "echos content via websockets",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"HTTP API"
],
"summary": "Echo over websockets",
"responses": {
"202": {
"description": "Accepted",
"schema": {
"$ref": "#/definitions/api.MapResponse"
}
}
}
}
}
},
"definitions": {
"api.MapResponse": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"api.RuntimeResponse": {
"type": "object",
"properties": {
"color": {
"type": "string"
},
"goarch": {
"type": "string"
},
"goos": {
"type": "string"
},
"hostname": {
"type": "string"
},
"logo": {
"type": "string"
},
"message": {
"type": "string"
},
"num_cpu": {
"type": "string"
},
"num_goroutine": {
"type": "string"
},
"revision": {
"type": "string"
},
"runtime": {
"type": "string"
},
"version": {
"type": "string"
}
}
},
"api.TokenResponse": {
"type": "object",
"properties": {
"expires_at": {
"type": "string"
},
"token": {
"type": "string"
}
}
},
"api.TokenValidationResponse": {
"type": "object",
"properties": {
"expires_at": {
"type": "string"
},
"token_name": {
"type": "string"
}
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{
Version: "2.0",
Host: "localhost:9898",
BasePath: "/",
Schemes: []string{"http", "https"},
Title: "Service API",
Description: "Go microservice template for Kubernetes.",
}
type s struct{}
func (s *s) ReadDoc() string {
sInfo := SwaggerInfo
sInfo.Description = strings.Replace(sInfo.Description, "\n", "\\n", -1)
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, sInfo); err != nil {
return doc
}
return tpl.String()
}
func init() {
swag.Register(swag.Name, &s{})
} | }
},
"/metrics": { |
engine_description.rs | /*
* Docker Engine API
*
* The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. # Errors The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: ``` { \"message\": \"page not found\" } ``` # Versioning The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. For Docker Engine 17.10, the API version is 1.33. To lock to this version, you prefix the URL with `/v1.33`. For example, calling `/info` is the same as calling `/v1.33/info`. Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. If the API version specified in the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. This documentation is for version 1.34 of the API. Use this table to find documentation for previous versions of the API: Docker version | API version | Changes ----------------|-------------|--------- 17.10.x | [1.33](https://docs.docker.com/engine/api/v1.33/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-33-api-changes) 17.09.x | [1.32](https://docs.docker.com/engine/api/v1.32/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-32-api-changes) 17.07.x | [1.31](https://docs.docker.com/engine/api/v1.31/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-31-api-changes) 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes) 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) # Authentication Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: ``` { \"username\": \"string\", \"password\": \"string\", \"email\": \"string\", \"serveraddress\": \"string\" } ``` The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: ``` { \"identitytoken\": \"9cbaf023786cd7...\" } ```
*
* OpenAPI spec version: 1.34
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
/// EngineDescription : EngineDescription provides information about an engine.
use serde_derive::{Deserialize, Serialize};
#[allow(unused_imports)]
use serde_json::Value;
#[derive(Debug, Serialize, Deserialize)]
pub struct EngineDescription {
#[serde(rename = "EngineVersion", skip_serializing_if = "Option::is_none")]
engine_version: Option<String>,
#[serde(rename = "Labels", skip_serializing_if = "Option::is_none")]
labels: Option<::std::collections::HashMap<String, String>>,
#[serde(rename = "Plugins", skip_serializing_if = "Option::is_none")]
plugins: Option<Vec<crate::models::EngineDescriptionPlugins>>,
}
impl EngineDescription {
/// EngineDescription provides information about an engine.
pub fn new() -> Self {
EngineDescription {
engine_version: None,
labels: None,
plugins: None,
} | }
pub fn set_engine_version(&mut self, engine_version: String) {
self.engine_version = Some(engine_version);
}
pub fn with_engine_version(mut self, engine_version: String) -> Self {
self.engine_version = Some(engine_version);
self
}
pub fn engine_version(&self) -> Option<&str> {
self.engine_version.as_ref().map(AsRef::as_ref)
}
pub fn reset_engine_version(&mut self) {
self.engine_version = None;
}
pub fn set_labels(&mut self, labels: ::std::collections::HashMap<String, String>) {
self.labels = Some(labels);
}
pub fn with_labels(mut self, labels: ::std::collections::HashMap<String, String>) -> Self {
self.labels = Some(labels);
self
}
pub fn labels(&self) -> Option<&::std::collections::HashMap<String, String>> {
self.labels.as_ref()
}
pub fn reset_labels(&mut self) {
self.labels = None;
}
pub fn set_plugins(&mut self, plugins: Vec<crate::models::EngineDescriptionPlugins>) {
self.plugins = Some(plugins);
}
pub fn with_plugins(mut self, plugins: Vec<crate::models::EngineDescriptionPlugins>) -> Self {
self.plugins = Some(plugins);
self
}
pub fn plugins(&self) -> Option<&[crate::models::EngineDescriptionPlugins]> {
self.plugins.as_ref().map(AsRef::as_ref)
}
pub fn reset_plugins(&mut self) {
self.plugins = None;
}
} | |
apiOperationPolicy.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package apimanagement
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Manages an API Management API Operation Policy
//
// ## Example Usage
//
// ```go
// package main
//
// import (
// "fmt"
//
// "github.com/pulumi/pulumi-azure/sdk/v3/go/azure/apimanagement"
// "github.com/pulumi/pulumi/sdk/v2/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// exampleApiOperation, err := apimanagement.NewApiOperation(ctx, "exampleApiOperation", nil)
// if err != nil {
// return err
// }
// _, err = apimanagement.NewApiOperationPolicy(ctx, "exampleApiOperationPolicy", &apimanagement.ApiOperationPolicyArgs{
// ApiName: exampleApiOperation.ApiName,
// ApiManagementName: exampleApiOperation.ApiManagementName,
// ResourceGroupName: exampleApiOperation.ResourceGroupName,
// OperationId: exampleApiOperation.OperationId,
// XmlContent: pulumi.String(fmt.Sprintf("%v%v%v%v%v", "<policies>\n", " <inbound>\n", " <find-and-replace from=\"xyz\" to=\"abc\" />\n", " </inbound>\n", "</policies>\n")),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// API Management API Operation Policy can be imported using the `resource id`, e.g.
//
// ```sh
// $ pulumi import azure:apimanagement/apiOperationPolicy:ApiOperationPolicy example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/instance1/apis/api1/operations/operation1/policies/policy
// ```
type ApiOperationPolicy struct {
pulumi.CustomResourceState
// The name of the API Management Service. Changing this forces a new resource to be created.
ApiManagementName pulumi.StringOutput `pulumi:"apiManagementName"`
// The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
ApiName pulumi.StringOutput `pulumi:"apiName"`
OperationId pulumi.StringOutput `pulumi:"operationId"`
// The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
ResourceGroupName pulumi.StringOutput `pulumi:"resourceGroupName"`
// The XML Content for this Policy.
XmlContent pulumi.StringOutput `pulumi:"xmlContent"`
// A link to a Policy XML Document, which must be publicly available.
XmlLink pulumi.StringPtrOutput `pulumi:"xmlLink"`
}
// NewApiOperationPolicy registers a new resource with the given unique name, arguments, and options.
func NewApiOperationPolicy(ctx *pulumi.Context,
name string, args *ApiOperationPolicyArgs, opts ...pulumi.ResourceOption) (*ApiOperationPolicy, error) {
if args == nil || args.ApiManagementName == nil {
return nil, errors.New("missing required argument 'ApiManagementName'")
}
if args == nil || args.ApiName == nil {
return nil, errors.New("missing required argument 'ApiName'")
}
if args == nil || args.OperationId == nil {
return nil, errors.New("missing required argument 'OperationId'")
}
if args == nil || args.ResourceGroupName == nil {
return nil, errors.New("missing required argument 'ResourceGroupName'")
}
if args == nil {
args = &ApiOperationPolicyArgs{}
}
var resource ApiOperationPolicy
err := ctx.RegisterResource("azure:apimanagement/apiOperationPolicy:ApiOperationPolicy", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetApiOperationPolicy gets an existing ApiOperationPolicy resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetApiOperationPolicy(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *ApiOperationPolicyState, opts ...pulumi.ResourceOption) (*ApiOperationPolicy, error) {
var resource ApiOperationPolicy
err := ctx.ReadResource("azure:apimanagement/apiOperationPolicy:ApiOperationPolicy", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering ApiOperationPolicy resources.
type apiOperationPolicyState struct {
// The name of the API Management Service. Changing this forces a new resource to be created.
ApiManagementName *string `pulumi:"apiManagementName"`
// The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
ApiName *string `pulumi:"apiName"`
OperationId *string `pulumi:"operationId"`
// The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
ResourceGroupName *string `pulumi:"resourceGroupName"`
// The XML Content for this Policy.
XmlContent *string `pulumi:"xmlContent"`
// A link to a Policy XML Document, which must be publicly available.
XmlLink *string `pulumi:"xmlLink"`
}
type ApiOperationPolicyState struct {
// The name of the API Management Service. Changing this forces a new resource to be created.
ApiManagementName pulumi.StringPtrInput
// The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
ApiName pulumi.StringPtrInput
OperationId pulumi.StringPtrInput
// The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
ResourceGroupName pulumi.StringPtrInput
// The XML Content for this Policy.
XmlContent pulumi.StringPtrInput
// A link to a Policy XML Document, which must be publicly available.
XmlLink pulumi.StringPtrInput
}
func (ApiOperationPolicyState) ElementType() reflect.Type {
return reflect.TypeOf((*apiOperationPolicyState)(nil)).Elem()
}
type apiOperationPolicyArgs struct {
// The name of the API Management Service. Changing this forces a new resource to be created.
ApiManagementName string `pulumi:"apiManagementName"`
// The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
ApiName string `pulumi:"apiName"`
OperationId string `pulumi:"operationId"`
// The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The XML Content for this Policy.
XmlContent *string `pulumi:"xmlContent"`
// A link to a Policy XML Document, which must be publicly available.
XmlLink *string `pulumi:"xmlLink"`
}
// The set of arguments for constructing a ApiOperationPolicy resource.
type ApiOperationPolicyArgs struct {
// The name of the API Management Service. Changing this forces a new resource to be created.
ApiManagementName pulumi.StringInput
// The ID of the API Management API Operation within the API Management Service. Changing this forces a new resource to be created.
ApiName pulumi.StringInput
OperationId pulumi.StringInput
// The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
ResourceGroupName pulumi.StringInput
// The XML Content for this Policy.
XmlContent pulumi.StringPtrInput
// A link to a Policy XML Document, which must be publicly available.
XmlLink pulumi.StringPtrInput
}
func (ApiOperationPolicyArgs) ElementType() reflect.Type {
return reflect.TypeOf((*apiOperationPolicyArgs)(nil)).Elem()
}
type ApiOperationPolicyInput interface {
pulumi.Input
ToApiOperationPolicyOutput() ApiOperationPolicyOutput
ToApiOperationPolicyOutputWithContext(ctx context.Context) ApiOperationPolicyOutput
}
func (ApiOperationPolicy) ElementType() reflect.Type {
return reflect.TypeOf((*ApiOperationPolicy)(nil)).Elem()
}
func (i ApiOperationPolicy) ToApiOperationPolicyOutput() ApiOperationPolicyOutput {
return i.ToApiOperationPolicyOutputWithContext(context.Background())
}
func (i ApiOperationPolicy) ToApiOperationPolicyOutputWithContext(ctx context.Context) ApiOperationPolicyOutput {
return pulumi.ToOutputWithContext(ctx, i).(ApiOperationPolicyOutput)
}
type ApiOperationPolicyOutput struct {
*pulumi.OutputState
}
func (ApiOperationPolicyOutput) ElementType() reflect.Type {
return reflect.TypeOf((*ApiOperationPolicyOutput)(nil)).Elem()
}
func (o ApiOperationPolicyOutput) ToApiOperationPolicyOutput() ApiOperationPolicyOutput {
return o
} |
func (o ApiOperationPolicyOutput) ToApiOperationPolicyOutputWithContext(ctx context.Context) ApiOperationPolicyOutput {
return o
}
func init() {
pulumi.RegisterOutputType(ApiOperationPolicyOutput{})
} | |
mb27_16b_cs.rs | #[doc = "Reader of register MB27_16B_CS"]
pub type R = crate::R<u32, super::MB27_16B_CS>;
#[doc = "Writer for register MB27_16B_CS"]
pub type W = crate::W<u32, super::MB27_16B_CS>;
#[doc = "Register MB27_16B_CS `reset()`'s with value 0"]
impl crate::ResetValue for super::MB27_16B_CS {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `TIME_STAMP`"]
pub type TIME_STAMP_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `TIME_STAMP`"]
pub struct TIME_STAMP_W<'a> {
w: &'a mut W,
}
impl<'a> TIME_STAMP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
#[doc = "Reader of field `DLC`"]
pub type DLC_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DLC`"]
pub struct DLC_W<'a> {
w: &'a mut W,
}
impl<'a> DLC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 16)) | (((value as u32) & 0x0f) << 16);
self.w
}
}
#[doc = "Reader of field `RTR`"]
pub type RTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RTR`"]
pub struct RTR_W<'a> {
w: &'a mut W,
}
impl<'a> RTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Reader of field `IDE`"]
pub type IDE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `IDE`"]
pub struct IDE_W<'a> {
w: &'a mut W,
}
impl<'a> IDE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
#[doc = "Reader of field `SRR`"]
pub type SRR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SRR`"]
pub struct SRR_W<'a> {
w: &'a mut W,
}
impl<'a> SRR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W |
}
#[doc = "Reader of field `CODE`"]
pub type CODE_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CODE`"]
pub struct CODE_W<'a> {
w: &'a mut W,
}
impl<'a> CODE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 24)) | (((value as u32) & 0x0f) << 24);
self.w
}
}
#[doc = "Reader of field `ESI`"]
pub type ESI_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ESI`"]
pub struct ESI_W<'a> {
w: &'a mut W,
}
impl<'a> ESI_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 29)) | (((value as u32) & 0x01) << 29);
self.w
}
}
#[doc = "Reader of field `BRS`"]
pub type BRS_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `BRS`"]
pub struct BRS_W<'a> {
w: &'a mut W,
}
impl<'a> BRS_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Reader of field `EDL`"]
pub type EDL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EDL`"]
pub struct EDL_W<'a> {
w: &'a mut W,
}
impl<'a> EDL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - Free-Running Counter Time stamp. This 16-bit field is a copy of the Free-Running Timer, captured for Tx and Rx frames at the time when the beginning of the Identifier field appears on the CAN bus."]
#[inline(always)]
pub fn time_stamp(&self) -> TIME_STAMP_R {
TIME_STAMP_R::new((self.bits & 0xffff) as u16)
}
#[doc = "Bits 16:19 - Length of the data to be stored/transmitted."]
#[inline(always)]
pub fn dlc(&self) -> DLC_R {
DLC_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bit 20 - Remote Transmission Request. One/zero for remote/data frame."]
#[inline(always)]
pub fn rtr(&self) -> RTR_R {
RTR_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 21 - ID Extended. One/zero for extended/standard format frame."]
#[inline(always)]
pub fn ide(&self) -> IDE_R {
IDE_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 22 - Substitute Remote Request. Contains a fixed recessive bit."]
#[inline(always)]
pub fn srr(&self) -> SRR_R {
SRR_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bits 24:27 - Message Buffer Code. This 4-bit field can be accessed (read or write) by the CPU and by the FlexCAN module itself, as part of the message buffer matching and arbitration process."]
#[inline(always)]
pub fn code(&self) -> CODE_R {
CODE_R::new(((self.bits >> 24) & 0x0f) as u8)
}
#[doc = "Bit 29 - Error State Indicator. This bit indicates if the transmitting node is error active or error passive."]
#[inline(always)]
pub fn esi(&self) -> ESI_R {
ESI_R::new(((self.bits >> 29) & 0x01) != 0)
}
#[doc = "Bit 30 - Bit Rate Switch. This bit defines whether the bit rate is switched inside a CAN FD format frame."]
#[inline(always)]
pub fn brs(&self) -> BRS_R {
BRS_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - Extended Data Length. This bit distinguishes between CAN format and CAN FD format frames. The EDL bit must not be set for Message Buffers configured to RANSWER with code field 0b1010."]
#[inline(always)]
pub fn edl(&self) -> EDL_R {
EDL_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:15 - Free-Running Counter Time stamp. This 16-bit field is a copy of the Free-Running Timer, captured for Tx and Rx frames at the time when the beginning of the Identifier field appears on the CAN bus."]
#[inline(always)]
pub fn time_stamp(&mut self) -> TIME_STAMP_W {
TIME_STAMP_W { w: self }
}
#[doc = "Bits 16:19 - Length of the data to be stored/transmitted."]
#[inline(always)]
pub fn dlc(&mut self) -> DLC_W {
DLC_W { w: self }
}
#[doc = "Bit 20 - Remote Transmission Request. One/zero for remote/data frame."]
#[inline(always)]
pub fn rtr(&mut self) -> RTR_W {
RTR_W { w: self }
}
#[doc = "Bit 21 - ID Extended. One/zero for extended/standard format frame."]
#[inline(always)]
pub fn ide(&mut self) -> IDE_W {
IDE_W { w: self }
}
#[doc = "Bit 22 - Substitute Remote Request. Contains a fixed recessive bit."]
#[inline(always)]
pub fn srr(&mut self) -> SRR_W {
SRR_W { w: self }
}
#[doc = "Bits 24:27 - Message Buffer Code. This 4-bit field can be accessed (read or write) by the CPU and by the FlexCAN module itself, as part of the message buffer matching and arbitration process."]
#[inline(always)]
pub fn code(&mut self) -> CODE_W {
CODE_W { w: self }
}
#[doc = "Bit 29 - Error State Indicator. This bit indicates if the transmitting node is error active or error passive."]
#[inline(always)]
pub fn esi(&mut self) -> ESI_W {
ESI_W { w: self }
}
#[doc = "Bit 30 - Bit Rate Switch. This bit defines whether the bit rate is switched inside a CAN FD format frame."]
#[inline(always)]
pub fn brs(&mut self) -> BRS_W {
BRS_W { w: self }
}
#[doc = "Bit 31 - Extended Data Length. This bit distinguishes between CAN format and CAN FD format frames. The EDL bit must not be set for Message Buffers configured to RANSWER with code field 0b1010."]
#[inline(always)]
pub fn edl(&mut self) -> EDL_W {
EDL_W { w: self }
}
}
| {
self.w.bits = (self.w.bits & !(0x01 << 22)) | (((value as u32) & 0x01) << 22);
self.w
} |
TeamPages.tsx | import { includes } from 'lodash';
import React, { PureComponent } from 'react';
import { connect, ConnectedProps } from 'react-redux';
import { NavModel } from '@grafana/data';
import { featureEnabled } from '@grafana/runtime';
import { Themeable2, withTheme2 } from '@grafana/ui';
import Page from 'app/core/components/Page/Page';
import { UpgradeBox } from 'app/core/components/Upgrade/UpgradeBox';
import config from 'app/core/config';
import { GrafanaRouteComponentProps } from 'app/core/navigation/types';
import { getNavModel } from 'app/core/selectors/navModel';
import { contextSrv } from 'app/core/services/context_srv';
import { AccessControlAction, StoreState } from 'app/types';
import TeamGroupSync, { TeamSyncUpgradeContent } from './TeamGroupSync';
import TeamMembers from './TeamMembers';
import TeamPermissions from './TeamPermissions';
import TeamSettings from './TeamSettings';
import { loadTeam, loadTeamMembers } from './state/actions';
import { getTeamLoadingNav } from './state/navModel';
import { getTeam, getTeamMembers, isSignedInUserTeamAdmin } from './state/selectors';
interface TeamPageRouteParams {
id: string;
page: string | null;
}
export interface OwnProps extends GrafanaRouteComponentProps<TeamPageRouteParams>, Themeable2 {}
interface State {
isSyncEnabled: boolean;
isLoading: boolean;
}
enum PageTypes {
Members = 'members',
Settings = 'settings',
GroupSync = 'groupsync',
}
function mapStateToProps(state: StoreState, props: OwnProps) { | let defaultPage = 'members';
if (contextSrv.accessControlEnabled()) {
// With RBAC the settings page will always be available
if (!team || !contextSrv.hasPermissionInMetadata(AccessControlAction.ActionTeamsPermissionsRead, team)) {
defaultPage = 'settings';
}
}
const pageName = props.match.params.page ?? defaultPage;
const teamLoadingNav = getTeamLoadingNav(pageName as string);
const navModel = getNavModel(state.navIndex, `team-${pageName}-${teamId}`, teamLoadingNav);
const members = getTeamMembers(state.team);
return {
navModel,
teamId: teamId,
pageName: pageName,
team,
members,
editorsCanAdmin: config.editorsCanAdmin, // this makes the feature toggle mockable/controllable from tests,
signedInUser: contextSrv.user, // this makes the feature toggle mockable/controllable from tests,
};
}
const mapDispatchToProps = {
loadTeam,
loadTeamMembers,
};
const connector = connect(mapStateToProps, mapDispatchToProps);
export type Props = OwnProps & ConnectedProps<typeof connector>;
export class TeamPages extends PureComponent<Props, State> {
constructor(props: Props) {
super(props);
this.state = {
isLoading: false,
isSyncEnabled: featureEnabled('teamsync'),
};
}
async componentDidMount() {
await this.fetchTeam();
}
async fetchTeam() {
const { loadTeam, teamId } = this.props;
this.setState({ isLoading: true });
const team = await loadTeam(teamId);
// With accesscontrol, the TeamPermissions will fetch team members
if (!contextSrv.accessControlEnabled()) {
await this.props.loadTeamMembers();
}
this.setState({ isLoading: false });
return team;
}
getCurrentPage() {
const pages = ['members', 'settings', 'groupsync'];
const currentPage = this.props.pageName;
return includes(pages, currentPage) ? currentPage : pages[0];
}
textsAreEqual = (text1: string, text2: string) => {
if (!text1 && !text2) {
return true;
}
if (!text1 || !text2) {
return false;
}
return text1.toLocaleLowerCase() === text2.toLocaleLowerCase();
};
hideTabsFromNonTeamAdmin = (navModel: NavModel, isSignedInUserTeamAdmin: boolean) => {
if (contextSrv.accessControlEnabled()) {
return navModel;
}
if (!isSignedInUserTeamAdmin && navModel.main && navModel.main.children) {
navModel.main.children
.filter((navItem) => !this.textsAreEqual(navItem.text, PageTypes.Members))
.map((navItem) => {
navItem.hideFromTabs = true;
});
}
return navModel;
};
renderPage(isSignedInUserTeamAdmin: boolean): React.ReactNode {
const { isSyncEnabled } = this.state;
const { members, team } = this.props;
const currentPage = this.getCurrentPage();
const canReadTeam = contextSrv.hasAccessInMetadata(
AccessControlAction.ActionTeamsRead,
team!,
isSignedInUserTeamAdmin
);
const canReadTeamPermissions = contextSrv.hasAccessInMetadata(
AccessControlAction.ActionTeamsPermissionsRead,
team!,
isSignedInUserTeamAdmin
);
const canWriteTeamPermissions = contextSrv.hasAccessInMetadata(
AccessControlAction.ActionTeamsPermissionsWrite,
team!,
isSignedInUserTeamAdmin
);
switch (currentPage) {
case PageTypes.Members:
if (contextSrv.accessControlEnabled()) {
return <TeamPermissions team={team!} />;
} else {
return <TeamMembers syncEnabled={isSyncEnabled} members={members} />;
}
case PageTypes.Settings:
return canReadTeam && <TeamSettings team={team!} />;
case PageTypes.GroupSync:
if (isSyncEnabled) {
if (canReadTeamPermissions) {
return <TeamGroupSync isReadOnly={!canWriteTeamPermissions} />;
}
} else if (config.featureToggles.featureHighlights) {
return (
<>
<UpgradeBox featureName={'team sync'} featureId={'team-sync'} />
<TeamSyncUpgradeContent />
</>
);
}
}
return null;
}
render() {
const { team, navModel, members, editorsCanAdmin, signedInUser } = this.props;
const isTeamAdmin = isSignedInUserTeamAdmin({ members, editorsCanAdmin, signedInUser });
return (
<Page navModel={this.hideTabsFromNonTeamAdmin(navModel, isTeamAdmin)}>
<Page.Contents isLoading={this.state.isLoading}>
{team && Object.keys(team).length !== 0 && this.renderPage(isTeamAdmin)}
</Page.Contents>
</Page>
);
}
}
export default connector(withTheme2(TeamPages)); | const teamId = parseInt(props.match.params.id, 10);
const team = getTeam(state.team, teamId); |
gallardo_wheel_bin.js | {
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "gallardo_wheel.obj",
"generatedBy" : "OBJConverter",
"vertices" : 4434,
"faces" : 4394,
"normals" : 2654,
"uvs" : 0,
"materials" : 2
},
"materials": [ {
"DbgColor" : 15658734,
"DbgIndex" : 0,
"DbgName" : "wire_255255255",
"colorAmbient" : [0.0, 0.0, 0.0],
"colorDiffuse" : [0.64, 0.64, 0.64],
"colorSpecular" : [0.175, 0.175, 0.175],
"illumination" : 2,
"opticalDensity" : 1.0,
"specularCoef" : 27.45098,
"opacity" : 0.0
},
| {
"DbgColor" : 15597568,
"DbgIndex" : 1,
"DbgName" : "wire_115115115",
"colorAmbient" : [0.0, 0.0, 0.0],
"colorDiffuse" : [0.28864, 0.28864, 0.28864],
"colorSpecular" : [0.175, 0.175, 0.175],
"illumination" : 2,
"opticalDensity" : 1.0,
"specularCoef" : 27.45098,
"opacity" : 0.0
}],
"buffers": "gallardo_wheel_bin.bin"
} | |
facetGetters.ts | import {
FacetSearchResult,
AgnosticCategoryTree,
AgnosticGroupedFacet,
AgnosticPagination,
FacetsGetters,
AgnosticBreadcrumb,
AgnosticFacet
} from '@vue-storefront/core';
import {
} from '../types';
import type { Facet, FacetSearchCriteria } from '@vue-storefront/woocommerce-api';
const availableSortingOptions = {
name_ASC: {
order: 'ASC',
orderby: 'name',
label: 'Name from a to z'
},
name_DESC: {
order: 'DESC',
orderby: 'name',
label: 'Name from z to a'
},
// 'id_ASC': {
// order: 'ASC',
// orderby: 'id',
// label: 'ID in ASCENDING order'
// },
// 'id_DESC': {
// order: 'DESC',
// orderby: 'id',
// label: 'ID in DESCENDING order'
// },
date_ASC: {
order: 'ASC',
orderby: 'date',
label: 'Date from old to new'
},
date_DESC: {
order: 'DESC',
orderby: 'date',
label: 'Date from new to old'
},
price_ASC: {
order: 'ASC',
orderby: 'price',
label: 'Price from low to high'
},
price_DESC: {
order: 'DESC',
orderby: 'price',
label: 'Price from high to low'
}
};
// eslint-disable-next-line @typescript-eslint/no-unused-vars
function getAll(params: FacetSearchResult<any>, criteria?: FacetSearchCriteria): AgnosticFacet[] {
const facets = [];
Object.keys(params?.data || {}).forEach(key => {
Object.keys(params?.data[key]?.values || {}).forEach(val => {
facets.push({
type: key,
id: val,
value: params.data[key].values[val].title,
selected: (params.input?.filters[key] || []).includes(val),
count: params.data[key].values[val].count
});
});
});
return facets;
}
function getGrouped(params: FacetSearchResult<any>): AgnosticGroupedFacet[] {
const groupedFacet = Object.keys(params?.data || {}).map(key => { | options: Object.keys(params?.data[key]?.values || {}).map(val => {
return {
type: params.data[key].id,
id: val,
value: params.data[key].values[val].title,
selected: (params.input?.filters[key] || []).includes(val),
count: params.data[key].values[val].count
};
})
};
});
return groupedFacet;
}
function getPreviousFacets(params: FacetSearchResult<any>): AgnosticGroupedFacet[] {
if (!params?.input?.prevFacets) {
return [];
}
const groupedFacet = params?.input?.prevFacets.map(key => {
return {
id: key.id,
label: key.label,
options: key.options.map(val => {
return {
type: val.type,
id: val.id,
value: val.value,
selected: (params.input?.filters[key.id] || []).includes(val.id),
count: val.count
};
})
};
});
return groupedFacet;
}
function getSortOptions(params: FacetSearchResult<any>): any {
return {
options: Object.keys(availableSortingOptions).map(key => {
return { key, label: availableSortingOptions[key].label };
}),
selected: params?.input?.sort || 'name_ASC'
};
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
function getCategoryTree(params: FacetSearchResult<any>): AgnosticCategoryTree {
return {
label: '',
slug: '',
items: null,
isCurrent: false,
count: 0
};
}
function getProducts(params: FacetSearchResult<any>): any {
return params?.data?.products || [];
}
function getPagination(params: FacetSearchResult<any>): AgnosticPagination {
return {
currentPage: params?.input?.page || 0,
totalPages: params?.data?.pages || 1,
totalItems: params?.data?.total || 20,
itemsPerPage: params?.input?.itemsPerPage || 20,
pageOptions: [20, 60, 100]
};
}
// eslint-disable-next-line @typescript-eslint/no-unused-vars
function getBreadcrumbs(params: FacetSearchResult<any>): AgnosticBreadcrumb[] {
return [];
}
export const facetGetters: FacetsGetters<Facet, FacetSearchCriteria> = {
getSortOptions,
getGrouped,
getAll,
getProducts,
getCategoryTree,
getBreadcrumbs,
getPagination,
getPreviousFacets
}; | return {
id: params.data[key].id,
label: params.data[key].title, |
build.rs | use winres;
| let mut res = winres::WindowsResource::new();
res.set_manifest(
r#"<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0" xmlns:asmv3="urn:schemas-microsoft-com:asm.v3">
<application xmlns="urn:schemas-microsoft-com:asm.v3">
<windowsSettings xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings">
<ws2:longPathAware>true</ws2:longPathAware>
</windowsSettings>
</application>
</assembly>"#);
res.compile()?;
}
Ok(())
} | fn main() -> std::io::Result<()> {
if cfg!(target_os = "windows") {
// We need to set the 'longPathAware' manifest key, so that file paths with length >260 chars will work.
// This happens sometimes since we encode IDs for duplicate files. |
localize.ts |
export default function | (literals: TemplateStringsArray, ...args) {
let result: string[] = []
for (let i = 0; i < literals.length; i++) {
result.push(literals[i])
if (args[i] !== undefined)
result.push(args[i])
}
return result.join("")
} | localize |
classify_command_descriptor.py | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .abstract_command_descriptor import AbstractCommandDescriptor
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ClassifyCommandDescriptor(AbstractCommandDescriptor):
"""
Command descriptor for querylanguage CLASSIFY command.
"""
def __init__(self, **kwargs):
"""
Initializes a new ClassifyCommandDescriptor object with values from keyword arguments. The default value of the :py:attr:`~oci.log_analytics.models.ClassifyCommandDescriptor.name` attribute
of this class is ``CLASSIFY`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param name:
The value to assign to the name property of this ClassifyCommandDescriptor.
Allowed values for this property are: "COMMAND", "SEARCH", "STATS", "GEO_STATS", "TIME_STATS", "SORT", "FIELDS", "ADD_FIELDS", "LINK", "LINK_DETAILS", "CLUSTER", "CLUSTER_DETAILS", "CLUSTER_SPLIT", "EVAL", "EXTRACT", "JSON_EXTRACT", "XML_EXTRACT", "EVENT_STATS", "BUCKET", "CLASSIFY", "TOP", "BOTTOM", "HEAD", "TAIL", "FIELD_SUMMARY", "REGEX", "RENAME", "TIME_COMPARE", "WHERE", "CLUSTER_COMPARE", "DELETE", "DELTA", "DISTINCT", "SEARCH_LOOKUP", "LOOKUP", "DEMO_MODE", "MACRO", "MULTI_SEARCH", "HIGHLIGHT", "HIGHLIGHT_ROWS", "HIGHLIGHT_GROUPS", "CREATE_VIEW", "MAP", "NLP", "COMPARE"
:type name: str
:param display_query_string:
The value to assign to the display_query_string property of this ClassifyCommandDescriptor.
:type display_query_string: str
:param internal_query_string:
The value to assign to the internal_query_string property of this ClassifyCommandDescriptor.
:type internal_query_string: str
:param category:
The value to assign to the category property of this ClassifyCommandDescriptor.
:type category: str
:param referenced_fields:
The value to assign to the referenced_fields property of this ClassifyCommandDescriptor.
:type referenced_fields: list[oci.log_analytics.models.AbstractField]
:param declared_fields:
The value to assign to the declared_fields property of this ClassifyCommandDescriptor.
:type declared_fields: list[oci.log_analytics.models.AbstractField]
:param top_count:
The value to assign to the top_count property of this ClassifyCommandDescriptor.
:type top_count: int
:param bottom_count:
The value to assign to the bottom_count property of this ClassifyCommandDescriptor.
:type bottom_count: int
:param correlate:
The value to assign to the correlate property of this ClassifyCommandDescriptor.
:type correlate: list[oci.log_analytics.models.FieldsAddRemoveField]
"""
self.swagger_types = {
'name': 'str',
'display_query_string': 'str',
'internal_query_string': 'str',
'category': 'str',
'referenced_fields': 'list[AbstractField]',
'declared_fields': 'list[AbstractField]',
'top_count': 'int',
'bottom_count': 'int',
'correlate': 'list[FieldsAddRemoveField]'
}
self.attribute_map = {
'name': 'name',
'display_query_string': 'displayQueryString',
'internal_query_string': 'internalQueryString',
'category': 'category',
'referenced_fields': 'referencedFields',
'declared_fields': 'declaredFields',
'top_count': 'topCount',
'bottom_count': 'bottomCount',
'correlate': 'correlate'
}
self._name = None
self._display_query_string = None
self._internal_query_string = None
self._category = None
self._referenced_fields = None
self._declared_fields = None
self._top_count = None
self._bottom_count = None
self._correlate = None
self._name = 'CLASSIFY'
@property
def top_count(self):
"""
Gets the top_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to top N.
:return: The top_count of this ClassifyCommandDescriptor.
:rtype: int
"""
return self._top_count
@top_count.setter
def top_count(self, top_count):
"""
Sets the top_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to top N.
:param top_count: The top_count of this ClassifyCommandDescriptor.
:type: int
"""
self._top_count = top_count
@property
def bottom_count(self):
"""
Gets the bottom_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to bottom N.
:return: The bottom_count of this ClassifyCommandDescriptor.
:rtype: int
"""
return self._bottom_count
@bottom_count.setter
def bottom_count(self, bottom_count):
"""
Sets the bottom_count of this ClassifyCommandDescriptor.
Value specified in CLASSIFY command in queryString if set limits the results returned to bottom N.
:param bottom_count: The bottom_count of this ClassifyCommandDescriptor.
:type: int
"""
self._bottom_count = bottom_count
@property
def correlate(self):
|
@correlate.setter
def correlate(self, correlate):
"""
Sets the correlate of this ClassifyCommandDescriptor.
Fields specified in CLASSIFY command in queryString if set include / exclude fields in correlate results.
:param correlate: The correlate of this ClassifyCommandDescriptor.
:type: list[oci.log_analytics.models.FieldsAddRemoveField]
"""
self._correlate = correlate
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| """
Gets the correlate of this ClassifyCommandDescriptor.
Fields specified in CLASSIFY command in queryString if set include / exclude fields in correlate results.
:return: The correlate of this ClassifyCommandDescriptor.
:rtype: list[oci.log_analytics.models.FieldsAddRemoveField]
"""
return self._correlate |
pretty.py | import builtins
import os
from rich.repr import RichReprResult
import sys
from array import array
from collections import Counter, defaultdict, deque, UserDict, UserList
import dataclasses
from dataclasses import dataclass, fields, is_dataclass
from inspect import isclass
from itertools import islice
import re
from typing import (
DefaultDict,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from types import MappingProxyType
try:
import attr as _attr_module
except ImportError: # pragma: no cover
_attr_module = None # type: ignore
from .highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
# Matches Jupyter's special methods
_re_jupyter_repr = re.compile(f"^_repr_.+_$")
def _is_attr_object(obj: Any) -> bool:
"""Check if an object was created with attrs module."""
return _attr_module is not None and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]:
"""Get fields for an attrs object."""
return _attr_module.fields(type(obj)) if _attr_module is not None else []
def _is_dataclass_repr(obj: object) -> bool:
"""Check if an instance of a dataclass contains the default repr.
Args:
obj (object): A dataclass instance.
Returns:
bool: True if the default repr is used, False if there is a custom repr.
"""
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
return obj.__repr__.__code__.co_filename == dataclasses.__file__
except Exception: # pragma: no coverage
return False
def install(
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations don't use rich
if console.is_jupyter and any(
_re_jupyter_repr.match(attr) for attr in dir(value)
):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
new_line_start=True,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
class RichFormatter(BaseFormatter): # type: ignore
pprint: bool = True
def __call__(self, value: Any) -> Any:
if self.pprint:
return ipy_display_hook(value)
else:
return repr(value)
# replace plain text formatter with rich formatter
rich_formatter = RichFormatter()
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
|
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield self.separator
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def traverse(
_object: Any, max_length: Optional[int] = None, max_string: Optional[int] = None
) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
elif _is_attr_object(obj) and not fake_attributes:
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> Iterable[
Tuple[str, Any, Optional[Callable[[Any], str]]]
]:
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and not fake_attributes
and (_is_dataclass_repr(obj) or py_version == (3, 6))
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(field for field in fields(obj) if field.repr):
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if isinstance(obj, container_type):
obj_type = container_type
break
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line |
server_test.go | package controlplane
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
func | (t *testing.T) {
testCases := []struct {
cfg *Config
headers map[string]string
method string
expectedCode int
}{
{
cfg: &Config{},
headers: map[string]string{
"Access-Control-Request-Headers": "something",
"Access-Control-Request-Method": "something",
"Authorization": "Bearer token",
"Origin": "localhost",
},
method: http.MethodOptions,
expectedCode: http.StatusMethodNotAllowed,
},
{
cfg: &Config{},
headers: map[string]string{
"Authorization": "Bearer token",
"Origin": "localhost",
},
method: http.MethodOptions,
expectedCode: http.StatusBadRequest,
},
{
cfg: &Config{},
headers: map[string]string{
"Access-Control-Request-Headers": "something",
"Authorization": "Bearer token",
},
method: http.MethodDelete,
expectedCode: http.StatusOK,
},
}
for _, testCase := range testCases {
router := mux.NewRouter()
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
})
server := NewServer(router, testCase.cfg)
rec := httptest.NewRecorder()
req, err := http.NewRequest(testCase.method, "/", nil)
if err != nil {
t.Errorf("create request %v", err)
}
for k, v := range testCase.headers {
req.Header.Set(k, v)
}
// Allow localhost as an origin
origins := handlers.AllowedOrigins([]string{"*"})
server.server.Handler = handlers.CORS(origins)(server.server.Handler)
server.server.Handler.ServeHTTP(rec, req)
if rec.Code != testCase.expectedCode {
t.Errorf("unexpected response code expected %d actual %d",
testCase.expectedCode, rec.Code)
}
}
}
func TestTrimPrefix(t *testing.T) {
testCases := []struct {
input string
output string
}{
{
input: "/hello",
output: "/",
},
{
input: "/static/vendor.js",
output: "static/vendor.js",
},
}
for _, testCase := range testCases {
called := false
actualURL := ""
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
called = true
actualURL = r.URL.Path
})
h2 := trimPrefix(h)
rec := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, testCase.input, nil)
h2.ServeHTTP(rec, req)
if !called {
t.Error("Handler has not been called")
}
if actualURL != testCase.output {
t.Errorf("url must be empty after trimming prefix actual %s", actualURL)
}
}
}
func TestNewVersionHandler(t *testing.T) {
rec := httptest.NewRecorder()
req, _ := http.NewRequest(http.MethodGet, "/version", nil)
version := "2.0.0"
h := NewVersionHandler(version)
h(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Wrong response code expected %d actual %d",
http.StatusOK, rec.Code)
}
if !strings.Contains(rec.Body.String(), version) {
t.Errorf("Version %s not found in response body %s",
rec.Body.String(), version)
}
}
| TestNewServer |
classECS_1_1Internal_1_1EntityIterator.js | var classECS_1_1Internal_1_1EntityIterator =
[
[ "EntityIterator", "classECS_1_1Internal_1_1EntityIterator.html#a4688e062cc850bfc38096aa50cd6a615", null ],
[ "get", "classECS_1_1Internal_1_1EntityIterator.html#a3b8c83a88cf662d363ae39aa102dc167", null ],
[ "getIndex", "classECS_1_1Internal_1_1EntityIterator.html#a6e199e3db15e0148f58e480bf9f2329b", null ],
[ "getWorld", "classECS_1_1Internal_1_1EntityIterator.html#a7eb7f85214a060f037f757fade8cbbd2", null ],
[ "includePendingDestroy", "classECS_1_1Internal_1_1EntityIterator.html#a960c8931aa81a0dbd4672b25b5cdcee5", null ],
[ "isEnd", "classECS_1_1Internal_1_1EntityIterator.html#adc0dc47359da6dcc63bb354c4b46dab5", null ], | [ "operator!=", "classECS_1_1Internal_1_1EntityIterator.html#a31e14d5170e2ca0e08e7258d0b04f3bc", null ],
[ "operator*", "classECS_1_1Internal_1_1EntityIterator.html#ad111fbf2f047d5d7be09679185565b3d", null ],
[ "operator++", "classECS_1_1Internal_1_1EntityIterator.html#aa8fbf17d7bc0477b649810a6ef915cf0", null ],
[ "operator==", "classECS_1_1Internal_1_1EntityIterator.html#a034dc369d5303db699691ca7f6c9c1f7", null ],
[ "bIncludePendingDestroy", "classECS_1_1Internal_1_1EntityIterator.html#a9d9fd505b27dc419d96985ac583e6178", null ],
[ "bIsEnd", "classECS_1_1Internal_1_1EntityIterator.html#ab58af7e2232328e9b0addeaf43e43add", null ],
[ "index", "classECS_1_1Internal_1_1EntityIterator.html#a5573953c0cd5103670065a133de4bebc", null ],
[ "world", "classECS_1_1Internal_1_1EntityIterator.html#a5a33404c48644938e426ebf102d405f8", null ]
]; | |
namespace.go | package namespace
import (
"context"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
k8sretry "k8s.io/client-go/util/retry"
"github.com/kyma-project/kyma/tests/function-controller/pkg/helpers"
"github.com/kyma-project/kyma/tests/function-controller/pkg/retry"
"github.com/kyma-project/kyma/tests/function-controller/pkg/shared"
)
const (
TestNamespaceLabelKey = "created-by"
TestNamespaceLabelValue = "serverless-controller-manager-test"
)
type Namespace struct {
coreCli typedcorev1.CoreV1Interface
name string
log *logrus.Entry
verbose bool
}
func New(coreCli typedcorev1.CoreV1Interface, container shared.Container) *Namespace {
return &Namespace{coreCli: coreCli, name: container.Namespace, log: container.Log, verbose: container.Verbose}
}
func (n Namespace) GetName() string {
return n.name
}
func (n Namespace) LogResource() error {
ns, err := n.get()
if err != nil {
return err
}
ns.TypeMeta.Kind = "namespace"
out, err := helpers.PrettyMarshall(ns)
if err != nil {
return err
}
n.log.Infof("%s", out)
return nil
}
func (n Namespace) get() (*corev1.Namespace, error) {
return n.coreCli.Namespaces().Get(context.Background(), n.name, metav1.GetOptions{})
} |
func (n *Namespace) Create() (string, error) {
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: n.name,
Labels: map[string]string{
TestNamespaceLabelKey: TestNamespaceLabelValue, // convenience for cleaning up stale namespaces during development
},
},
}
backoff := wait.Backoff{
Duration: 500 * time.Millisecond,
Factor: 2,
Jitter: 0.1,
Steps: 4,
}
err := k8sretry.OnError(backoff, func(err error) bool {
return true
}, func() error {
_, err := n.coreCli.Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil
}
return err
})
if err != nil {
return n.name, errors.Wrapf(err, "while creating namespace %s", n.name)
}
n.log.Infof("Create: namespace %s", n.name)
if n.verbose {
n.log.Infof("%+v", ns)
}
return n.name, nil
}
func (n *Namespace) Delete() error {
err := retry.WithIgnoreOnNotFound(retry.DefaultBackoff, func() error {
if n.verbose {
n.log.Infof("DELETE: namespace: %s", n.name)
}
return n.coreCli.Namespaces().Delete(context.Background(), n.name, metav1.DeleteOptions{})
}, n.log)
if err != nil {
return errors.Wrapf(err, "while deleting namespace %s", n.name)
}
return nil
} | |
index.js | /**
*
* ChallengeCard
*
*/
import React from 'react';
import PropTypes from 'prop-types';
import { withStyles } from '@material-ui/core/styles';
import { withRouter } from 'react-router-dom';
import moment from 'moment';
import {
Card,
Typography,
CardActionArea,
CardContent,
CardMedia,
CardHeader,
Divider,
} from '@material-ui/core';
// 'default', 'error', 'inherit', 'primary', 'secondary', 'textPrimary', 'textSecondary'
const styles = theme => ({
card: {
maxWidth: 340,
margin: 10,
backgroundColor: theme.palette.primary.light,
},
media: {
height: 160,
},
header: {
color: 'black',
},
});
function ChallengeCard(props) {
const handleClick = () => {
props.history.push(`/challenge/${props.id}`);
};
const { title, description, deadline, media } = props; // add link later
const { classes } = props;
return (
<div>
<Card className={classes.card}>
<CardActionArea onClick={handleClick}>
<CardMedia
className={classes.media}
image={`http://localhost:1337/${media.url}`}
title="Contemplative Reptile"
/>
<CardHeader
classes={{
title: classes.header,
subheader: classes.header,
}}
className={classes.header}
title={title}
subheader={moment(deadline).format('MMM Do YY')}
/>
<Divider />
<CardContent>
<Typography component="p" color="inherit">
{description}
</Typography>
</CardContent>
</CardActionArea>
</Card>
</div>
);
}
ChallengeCard.propTypes = {
classes: PropTypes.object.isRequired,
title: PropTypes.string.isRequired,
deadline: PropTypes.string.isRequired,
media: PropTypes.object.isRequired, | description: PropTypes.string.isRequired,
history: PropTypes.object.isRequired,
id: PropTypes.number.isRequired,
};
export default withRouter(withStyles(styles)(ChallengeCard)); | |
view.rs | use std::fmt;
use std::mem;
use std::borrow::Cow;
use utils::range::RangeArgument;
use std::marker::PhantomData;
use texture::{PixelValue, Texture1dDataSink};
use gl;
use backend::Facade;
use BufferExt;
use BufferSliceExt;
use GlObject;
use context::Context;
use context::CommandContext;
use std::rc::Rc;
use ContextExt;
use buffer::BufferType;
use buffer::BufferMode;
use buffer::BufferCreationError;
use buffer::Content;
use buffer::fences::Fences;
use buffer::fences::Inserter;
use buffer::alloc::Alloc;
use buffer::alloc::Mapping;
use buffer::alloc::ReadMapping;
use buffer::alloc::WriteMapping;
use buffer::alloc::ReadError;
use buffer::alloc::CopyError;
/// Represents a view of a buffer.
pub struct Buffer<T: ?Sized> where T: Content {
// TODO: this `Option` is here because we have a destructor and need to be able to move out
alloc: Option<Alloc>,
// TODO: this `Option` is here because we have a destructor and need to be able to move out
fence: Option<Fences>,
marker: PhantomData<T>,
}
impl<T: ?Sized> GlObject for Buffer<T> where T: Content {
type Id = gl::types::GLuint;
#[inline]
fn get_id(&self) -> gl::types::GLuint {
self.alloc.as_ref().unwrap().get_id()
}
}
impl<T: ?Sized> Buffer<T> where T: Content {
/// Builds a new buffer containing the given data. The size of the buffer is equal to the size
/// of the data.
pub fn new<F: ?Sized>(facade: &F, data: &T, ty: BufferType, mode: BufferMode)
-> Result<Buffer<T>, BufferCreationError>
where F: Facade
{
Alloc::new(facade, data, ty, mode)
.map(|buffer| {
Buffer {
alloc: Some(buffer),
fence: Some(Fences::new()),
marker: PhantomData,
}
})
}
/// Builds a new buffer of the given size.
pub fn empty_unsized<F: ?Sized>(facade: &F, ty: BufferType, size: usize, mode: BufferMode)
-> Result<Buffer<T>, BufferCreationError> where F: Facade
{
assert!(<T as Content>::is_size_suitable(size));
Alloc::empty(facade, ty, size, mode)
.map(|buffer| {
Buffer {
alloc: Some(buffer),
fence: Some(Fences::new()),
marker: PhantomData,
}
})
}
/// Returns the context corresponding to this buffer.
#[inline]
pub fn get_context(&self) -> &Rc<Context> {
self.alloc.as_ref().unwrap().get_context()
}
/// Returns the size in bytes of this buffer.
#[inline]
pub fn get_size(&self) -> usize {
self.alloc.as_ref().unwrap().get_size()
}
/// Returns true if this buffer uses persistent mapping.
#[inline]
pub fn is_persistent(&self) -> bool {
self.alloc.as_ref().unwrap().uses_persistent_mapping()
}
/// Uploads some data in this buffer.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits untils the data is no longer used by the GPU then
/// memcpies the data to the mapping.
/// - For immutable buffers, creates a temporary buffer that contains the data then calls
/// `glCopyBufferSubData` to copy from the temporary buffer to the real one.
/// - For other types, calls `glBufferSubData`.
///
/// # Panic
///
/// Panics if the length of `data` is different from the length of this buffer.
pub fn write(&self, data: &T) {
assert!(mem::size_of_val(data) == self.get_size());
self.fence.as_ref().unwrap().wait(&mut self.alloc.as_ref().unwrap().get_context().make_current(),
0 .. self.get_size());
unsafe { self.alloc.as_ref().unwrap().upload(0, data); }
}
/// Invalidates the content of the buffer. The data becomes undefined.
///
/// You should call this if you only use parts of a buffer. For example if you want to use
/// the first half of the buffer, you invalidate the whole buffer then write the first half.
///
/// This operation is a no-op if the backend doesn't support it and for persistent-mapped
/// buffers.
///
/// # Implementation
///
/// Calls `glInvalidateBufferData` if supported. Otherwise, calls `glBufferData` with a null
/// pointer for data. If `glBufferStorage` has been used to create the buffer and
/// `glInvalidateBufferData` is not supported, does nothing.
///
#[inline]
pub fn invalidate(&self) {
self.alloc.as_ref().unwrap().invalidate(0, self.get_size());
}
/// Reads the content of the buffer.
pub fn read(&self) -> Result<T::Owned, ReadError> {
self.fence.as_ref().unwrap().wait(&mut self.alloc.as_ref().unwrap().get_context().make_current(),
0 .. self.get_size());
unsafe {
self.alloc.as_ref().unwrap().read::<T>(0 .. self.get_size())
}
}
/// Maps the buffer in memory for both reading and writing.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits until the data is no longer accessed by the GPU then
/// returns a pointer to the existing mapping.
/// - For immutable buffers, creates a temporary buffer containing the data of the buffer and
/// maps it. When the mapping object is destroyed, copies the content of the temporary buffer
/// to the real buffer.
/// - For other types, calls `glMapBuffer` or `glMapSubBuffer`.
///
pub fn map(&mut self) -> Mapping<T> {
self.fence.as_ref().unwrap().wait(&mut self.alloc.as_ref().unwrap().get_context().make_current(),
0 .. self.get_size());
let size = self.get_size();
unsafe { self.alloc.as_mut().unwrap().map(0 .. size) }
}
/// Maps the buffer in memory for reading.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits until the data is no longer accessed by the GPU then
/// returns a pointer to the existing mapping.
/// - For immutable buffers, creates a temporary buffer containing the data of the buffer and
/// maps it.
/// - For other types, calls `glMapBuffer` or `glMapSubBuffer`.
///
pub fn map_read(&mut self) -> ReadMapping<T> {
self.fence.as_ref().unwrap().wait(&mut self.alloc.as_ref().unwrap().get_context().make_current(),
0 .. self.get_size());
let size = self.get_size();
unsafe { self.alloc.as_mut().unwrap().map_read(0 .. size) }
}
/// Maps the buffer in memory for writing only.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits until the data is no longer accessed by the GPU then
/// returns a pointer to the existing mapping.
/// - For immutable buffers, creates a temporary buffer and
/// maps it. When the mapping object is destroyed, copies the content of the temporary buffer
/// to the real buffer.
/// - For other types, calls `glMapBuffer` or `glMapSubBuffer`.
///
pub fn map_write(&mut self) -> WriteMapping<T> {
self.fence.as_ref().unwrap().wait(&mut self.alloc.as_ref().unwrap().get_context().make_current(),
0 .. self.get_size());
let size = self.get_size();
unsafe { self.alloc.as_mut().unwrap().map_write(0 .. size) }
}
/// Copies the content of the buffer to another buffer.
///
/// # Panic
///
/// Panics if `T` is unsized and the other buffer is too small.
///
pub fn copy_to<'a, S>(&self, target: S) -> Result<(), CopyError>
where S: Into<BufferSlice<'a, T>>, T: 'a
{
let target = target.into();
let alloc = self.alloc.as_ref().unwrap();
try!(alloc.copy_to(0 .. self.get_size(), &target.alloc, target.get_offset_bytes()));
if let Some(inserter) = self.as_slice().add_fence() {
let mut ctxt = alloc.get_context().make_current();
inserter.insert(&mut ctxt);
}
if let Some(inserter) = target.add_fence() {
let mut ctxt = alloc.get_context().make_current();
inserter.insert(&mut ctxt);
}
Ok(())
}
/// Builds a slice that contains an element from inside the buffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
///
/// # Example
///
/// ```no_run
/// #[derive(Copy, Clone)]
/// struct BufferContent {
/// value1: u16,
/// value2: u16,
/// }
/// # let buffer: glium::buffer::BufferSlice<BufferContent> =
/// # unsafe { std::mem::uninitialized() };
/// let slice = unsafe { buffer.slice_custom(|content| &content.value2) };
/// ```
///
/// # Safety
///
/// The object whose reference is passed to the closure is uninitialized. Therefore you
/// **must not** access the content of the object.
///
/// You **must** return a reference to an element from the parameter. The closure **must not**
/// panic.
#[inline]
pub unsafe fn slice_custom<F, R: ?Sized>(&self, f: F) -> BufferSlice<R>
where F: for<'r> FnOnce(&'r T) -> &'r R,
R: Content
{
self.as_slice().slice_custom(f)
}
/// Same as `slice_custom` but returns a mutable slice.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub unsafe fn slice_custom_mut<F, R: ?Sized>(&mut self, f: F) -> BufferMutSlice<R>
where F: for<'r> FnOnce(&'r T) -> &'r R,
R: Content
{
self.as_mut_slice().slice_custom(f)
}
/// Builds a slice containing the whole subbuffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn as_slice(&self) -> BufferSlice<T> {
BufferSlice {
alloc: self.alloc.as_ref().unwrap(),
bytes_start: 0,
bytes_end: self.get_size(),
fence: self.fence.as_ref().unwrap(),
marker: PhantomData,
}
}
/// Builds a slice containing the whole subbuffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn as_mut_slice(&mut self) -> BufferMutSlice<T> {
let size = self.get_size();
BufferMutSlice {
alloc: self.alloc.as_mut().unwrap(),
bytes_start: 0,
bytes_end: size,
fence: self.fence.as_ref().unwrap(),
marker: PhantomData,
}
}
/// Builds a slice-any containing the whole subbuffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
pub fn as_slice_any(&self) -> BufferAnySlice {
let size = self.get_size();
BufferAnySlice {
alloc: self.alloc.as_ref().unwrap(),
bytes_start: 0,
bytes_end: self.get_size(),
elements_size: <T as Content>::get_elements_size(),
fence: self.fence.as_ref().unwrap(),
}
}
}
impl<T> Buffer<T> where T: Content + Copy {
/// Builds a new buffer of the given size.
pub fn empty<F: ?Sized>(facade: &F, ty: BufferType, mode: BufferMode)
-> Result<Buffer<T>, BufferCreationError> where F: Facade
{
Alloc::empty(facade, ty, mem::size_of::<T>(), mode)
.map(|buffer| {
Buffer {
alloc: Some(buffer),
fence: Some(Fences::new()),
marker: PhantomData,
}
})
}
}
impl<T> Buffer<[T]> where [T]: Content, T: Copy {
/// Builds a new buffer of the given size.
pub fn empty_array<F: ?Sized>(facade: &F, ty: BufferType, len: usize, mode: BufferMode)
-> Result<Buffer<[T]>, BufferCreationError> where F: Facade
{
Alloc::empty(facade, ty, len * mem::size_of::<T>(), mode)
.map(|buffer| {
Buffer {
alloc: Some(buffer),
fence: Some(Fences::new()),
marker: PhantomData,
}
})
}
/// Returns the number of elements in this buffer.
#[inline]
pub fn len(&self) -> usize {
self.alloc.as_ref().unwrap().get_size() / mem::size_of::<T>()
}
/// Builds a slice of this subbuffer. Returns `None` if out of range.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn slice<R: RangeArgument<usize>>(&self, range: R) -> Option<BufferSlice<[T]>> {
self.as_slice().slice(range)
}
/// Builds a slice of this subbuffer. Returns `None` if out of range.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn slice_mut<R: RangeArgument<usize>>(&mut self, range: R) -> Option<BufferMutSlice<[T]>> {
self.as_mut_slice().slice(range)
}
}
impl<T> Buffer<[T]> where T: PixelValue {
/// Reads the content of the buffer.
#[inline]
pub fn read_as_texture_1d<S>(&self) -> Result<S, ReadError> where S: Texture1dDataSink<T> {
let data = try!(self.read());
Ok(S::from_raw(Cow::Owned(data), self.len() as u32))
}
}
impl<T: ?Sized> fmt::Debug for Buffer<T> where T: Content {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{:?}", self.alloc.as_ref().unwrap())
}
}
impl<T: ?Sized> Drop for Buffer<T> where T: Content {
#[inline]
fn drop(&mut self) {
if let (Some(alloc), Some(mut fence)) = (self.alloc.take(), self.fence.take()) {
fence.clean(&mut alloc.get_context().make_current());
}
}
}
impl<T: ?Sized> BufferExt for Buffer<T> where T: Content {
#[inline]
fn get_offset_bytes(&self) -> usize {
0
}
#[inline]
fn prepare_for_vertex_attrib_array(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_for_vertex_attrib_array(ctxt);
}
#[inline]
fn prepare_for_element_array(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_for_element_array(ctxt);
}
#[inline]
fn bind_to_element_array(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.bind_to_element_array(ctxt);
}
#[inline]
fn prepare_and_bind_for_pixel_pack(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_and_bind_for_pixel_pack(ctxt);
}
#[inline]
fn unbind_pixel_pack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_pack(ctxt)
}
#[inline]
fn prepare_and_bind_for_pixel_unpack(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_and_bind_for_pixel_unpack(ctxt);
}
#[inline]
fn unbind_pixel_unpack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_unpack(ctxt)
}
#[inline]
fn prepare_and_bind_for_query(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_and_bind_for_query(ctxt);
}
#[inline]
fn | (ctxt: &mut CommandContext) {
Alloc::unbind_query(ctxt)
}
#[inline]
fn prepare_and_bind_for_draw_indirect(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_and_bind_for_draw_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_dispatch_indirect(&self, ctxt: &mut CommandContext) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_and_bind_for_dispatch_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_uniform(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_and_bind_for_uniform(ctxt, index, 0 .. alloc.get_size());
}
#[inline]
fn prepare_and_bind_for_shared_storage(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
let alloc = self.alloc.as_ref().unwrap();
alloc.prepare_and_bind_for_shared_storage(ctxt, index, 0 .. alloc.get_size());
}
#[inline]
fn bind_to_transform_feedback(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
let alloc = self.alloc.as_ref().unwrap();
alloc.bind_to_transform_feedback(ctxt, index, 0 .. alloc.get_size());
}
}
/// Represents a sub-part of a buffer.
#[derive(Copy, Clone)]
pub struct BufferSlice<'a, T: ?Sized> where T: Content + 'a {
alloc: &'a Alloc,
bytes_start: usize,
bytes_end: usize,
fence: &'a Fences,
marker: PhantomData<&'a T>,
}
impl<'a, T: ?Sized> BufferSlice<'a, T> where T: Content + 'a {
/// Returns the size in bytes of this slice.
#[inline]
pub fn get_size(&self) -> usize {
self.bytes_end - self.bytes_start
}
/// Returns the context corresponding to this buffer.
#[inline]
pub fn get_context(&self) -> &Rc<Context> {
self.alloc.get_context()
}
/// Uploads some data in this buffer.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits untils the data is no longer used by the GPU then
/// memcpies the data to the mapping.
/// - For immutable buffers, creates a temporary buffer that contains the data then calls
/// `glCopyBufferSubData` to copy from the temporary buffer to the real one.
/// - For other types, calls `glBufferSubData`.
///
/// # Panic
///
/// Panics if the length of `data` is different from the length of this buffer.
pub fn write(&self, data: &T) {
assert_eq!(mem::size_of_val(data), self.get_size());
self.fence.wait(&mut self.alloc.get_context().make_current(),
self.bytes_start .. self.bytes_end);
unsafe { self.alloc.upload(self.bytes_start, data); }
}
/// Invalidates the content of the slice. The data becomes undefined.
///
/// This operation is a no-op if the backend doesn't support it and for persistent-mapped
/// buffers.
///
/// # Implementation
///
/// Calls `glInvalidateBufferSubData` if supported.
///
#[inline]
pub fn invalidate(&self) {
self.alloc.invalidate(self.bytes_start, self.get_size());
}
/// Reads the content of the buffer.
pub fn read(&self) -> Result<T::Owned, ReadError> {
self.fence.wait(&mut self.alloc.get_context().make_current(),
self.bytes_start .. self.bytes_end);
unsafe {
self.alloc.read::<T>(self.bytes_start .. self.bytes_end)
}
}
/// Copies the content of this slice to another slice.
///
/// # Panic
///
/// Panics if `T` is unsized and the other buffer is too small.
pub fn copy_to<S>(&self, target: S) -> Result<(), CopyError>
where S: Into<BufferSlice<'a, T>>
{
let target = target.into();
try!(self.alloc.copy_to(self.bytes_start .. self.bytes_end, &target.alloc,
target.get_offset_bytes()));
if let Some(inserter) = self.add_fence() {
let mut ctxt = self.alloc.get_context().make_current();
inserter.insert(&mut ctxt);
}
if let Some(inserter) = target.add_fence() {
let mut ctxt = self.alloc.get_context().make_current();
inserter.insert(&mut ctxt);
}
Ok(())
}
/// Builds a slice that contains an element from inside the buffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
///
/// # Example
///
/// ```no_run
/// #[derive(Copy, Clone)]
/// struct BufferContent {
/// value1: u16,
/// value2: u16,
/// }
/// # let buffer: glium::buffer::BufferSlice<BufferContent> =
/// # unsafe { std::mem::uninitialized() };
/// let slice = unsafe { buffer.slice_custom(|content| &content.value2) };
/// ```
///
/// # Safety
///
/// The object whose reference is passed to the closure is uninitialized. Therefore you
/// **must not** access the content of the object.
///
/// You **must** return a reference to an element from the parameter. The closure **must not**
/// panic.
#[inline]
pub unsafe fn slice_custom<F, R: ?Sized>(&self, f: F) -> BufferSlice<'a, R>
where F: for<'r> FnOnce(&'r T) -> &'r R,
R: Content
{
let data: &T = mem::zeroed();
let result = f(data);
let size = mem::size_of_val(result);
let result = result as *const R as *const () as usize;
assert!(result <= self.get_size());
assert!(result + size <= self.get_size());
BufferSlice {
alloc: self.alloc,
bytes_start: self.bytes_start + result,
bytes_end: self.bytes_start + result + size,
fence: self.fence,
marker: PhantomData,
}
}
/// Builds a slice-any containing the whole subbuffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn as_slice_any(&self) -> BufferAnySlice<'a> {
BufferAnySlice {
alloc: self.alloc,
bytes_start: self.bytes_start,
bytes_end: self.bytes_end,
elements_size: <T as Content>::get_elements_size(),
fence: self.fence,
}
}
}
impl<'a, T> BufferSlice<'a, [T]> where [T]: Content + 'a {
/// Returns the number of elements in this slice.
#[inline]
pub fn len(&self) -> usize {
(self.bytes_end - self.bytes_start) / mem::size_of::<T>()
}
/// Builds a subslice of this slice. Returns `None` if out of range.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn slice<R: RangeArgument<usize>>(&self, range: R) -> Option<BufferSlice<'a, [T]>> {
if range.start().map_or(0, |e| *e) > self.len() || range.end().map_or(0, |e| *e) > self.len() {
return None;
}
Some(BufferSlice {
alloc: self.alloc,
bytes_start: self.bytes_start + range.start().map_or(0, |e| *e) * mem::size_of::<T>(),
bytes_end: self.bytes_start + range.end().map_or(self.len(), |e| *e) * mem::size_of::<T>(),
fence: self.fence,
marker: PhantomData,
})
}
}
impl<'a, T> BufferSlice<'a, [T]> where T: PixelValue + 'a {
/// Reads the content of the buffer.
#[inline]
pub fn read_as_texture_1d<S>(&self) -> Result<S, ReadError> where S: Texture1dDataSink<T> {
let data = try!(self.read());
Ok(S::from_raw(Cow::Owned(data), self.len() as u32))
}
}
impl<'a, T: ?Sized> fmt::Debug for BufferSlice<'a, T> where T: Content {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{:?}", self.alloc)
}
}
impl<'a, T: ?Sized> From<BufferMutSlice<'a, T>> for BufferSlice<'a, T> where T: Content + 'a {
#[inline]
fn from(s: BufferMutSlice<'a, T>) -> BufferSlice<'a, T> {
BufferSlice {
alloc: s.alloc,
bytes_start: s.bytes_start,
bytes_end: s.bytes_end,
fence: s.fence,
marker: PhantomData,
}
}
}
impl<'a, T: ?Sized> From<&'a Buffer<T>> for BufferSlice<'a, T> where T: Content + 'a {
#[inline]
fn from(b: &'a Buffer<T>) -> BufferSlice<'a, T> {
b.as_slice()
}
}
impl<'a, T: ?Sized> From<&'a mut Buffer<T>> for BufferSlice<'a, T> where T: Content + 'a {
#[inline]
fn from(b: &'a mut Buffer<T>) -> BufferSlice<'a, T> {
b.as_slice()
}
}
impl<'a, T: ?Sized> BufferSliceExt<'a> for BufferSlice<'a, T> where T: Content {
#[inline]
fn add_fence(&self) -> Option<Inserter<'a>> {
if !self.alloc.uses_persistent_mapping() {
return None;
}
Some(self.fence.inserter(self.bytes_start .. self.bytes_end))
}
}
impl<'a, T: ?Sized> BufferExt for BufferSlice<'a, T> where T: Content {
#[inline]
fn get_offset_bytes(&self) -> usize {
self.bytes_start
}
#[inline]
fn prepare_for_vertex_attrib_array(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_for_vertex_attrib_array(ctxt);
}
#[inline]
fn prepare_for_element_array(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_for_element_array(ctxt);
}
#[inline]
fn bind_to_element_array(&self, ctxt: &mut CommandContext) {
self.alloc.bind_to_element_array(ctxt);
}
#[inline]
fn prepare_and_bind_for_pixel_pack(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_pixel_pack(ctxt);
}
#[inline]
fn unbind_pixel_pack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_pack(ctxt)
}
#[inline]
fn prepare_and_bind_for_pixel_unpack(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_pixel_unpack(ctxt);
}
#[inline]
fn unbind_pixel_unpack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_unpack(ctxt)
}
#[inline]
fn prepare_and_bind_for_query(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_query(ctxt);
}
#[inline]
fn unbind_query(ctxt: &mut CommandContext) {
Alloc::unbind_query(ctxt)
}
#[inline]
fn prepare_and_bind_for_draw_indirect(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_draw_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_dispatch_indirect(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_dispatch_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_uniform(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.prepare_and_bind_for_uniform(ctxt, index, 0 .. self.alloc.get_size());
}
#[inline]
fn prepare_and_bind_for_shared_storage(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.prepare_and_bind_for_shared_storage(ctxt, index, 0 .. self.alloc.get_size());
}
#[inline]
fn bind_to_transform_feedback(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.bind_to_transform_feedback(ctxt, index, 0 .. self.alloc.get_size());
}
}
/// Represents a sub-part of a buffer.
pub struct BufferMutSlice<'a, T: ?Sized> where T: Content {
alloc: &'a mut Alloc,
bytes_start: usize,
bytes_end: usize,
fence: &'a Fences,
marker: PhantomData<T>,
}
impl<'a, T: ?Sized> BufferMutSlice<'a, T> where T: Content + 'a {
/// Returns the size in bytes of this slice.
#[inline]
pub fn get_size(&self) -> usize {
self.bytes_end - self.bytes_start
}
/// Maps the buffer in memory for both reading and writing.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits until the data is no longer accessed by the GPU then
/// returns a pointer to the existing mapping.
/// - For immutable buffers, creates a temporary buffer containing the data of the buffer and
/// maps it. When the mapping object is destroyed, copies the content of the temporary buffer
/// to the real buffer.
/// - For other types, calls `glMapBuffer` or `glMapSubBuffer`.
///
#[inline]
pub fn map(self) -> Mapping<'a, T> {
self.fence.wait(&mut self.alloc.get_context().make_current(),
self.bytes_start .. self.bytes_end);
unsafe { self.alloc.map(self.bytes_start .. self.bytes_end) }
}
/// Maps the buffer in memory for reading.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits until the data is no longer accessed by the GPU then
/// returns a pointer to the existing mapping.
/// - For immutable buffers, creates a temporary buffer containing the data of the buffer and
/// maps it.
/// - For other types, calls `glMapBuffer` or `glMapSubBuffer`.
///
#[inline]
pub fn map_read(self) -> ReadMapping<'a, T> {
self.fence.wait(&mut self.alloc.get_context().make_current(),
self.bytes_start .. self.bytes_end);
unsafe { self.alloc.map_read(self.bytes_start .. self.bytes_end) }
}
/// Maps the buffer in memory for writing only.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits until the data is no longer accessed by the GPU then
/// returns a pointer to the existing mapping.
/// - For immutable buffers, creates a temporary buffer and maps it. When the mapping object
/// is destroyed, copies the content of the temporary buffer to the real buffer.
/// - For other types, calls `glMapBuffer` or `glMapSubBuffer`.
///
#[inline]
pub fn map_write(self) -> WriteMapping<'a, T> {
self.fence.wait(&mut self.alloc.get_context().make_current(),
self.bytes_start .. self.bytes_end);
unsafe { self.alloc.map_write(self.bytes_start .. self.bytes_end) }
}
/// Uploads some data in this buffer.
///
/// # Implementation
///
/// - For persistent-mapped buffers, waits untils the data is no longer used by the GPU then
/// memcpies the data to the mapping.
/// - For immutable buffers, creates a temporary buffer that contains the data then calls
/// `glCopyBufferSubData` to copy from the temporary buffer to the real one.
/// - For other types, calls `glBufferSubData`.
///
/// # Panic
///
/// Panics if the length of `data` is different from the length of this buffer.
#[inline]
pub fn write(&self, data: &T) {
self.fence.wait(&mut self.alloc.get_context().make_current(),
self.bytes_start .. self.bytes_end);
unsafe { self.alloc.upload(self.bytes_start, data); }
}
/// Invalidates the content of the slice. The data becomes undefined.
///
/// This operation is a no-op if the backend doesn't support it and for persistent-mapped
/// buffers.
///
/// # Implementation
///
/// Calls `glInvalidateBufferSubData` if supported.
///
#[inline]
pub fn invalidate(&self) {
self.alloc.invalidate(self.bytes_start, self.get_size());
}
/// Reads the content of the buffer.
#[inline]
pub fn read(&self) -> Result<T::Owned, ReadError> {
unsafe {
self.alloc.read::<T>(self.bytes_start .. self.bytes_end)
}
}
/// Copies the content of this slice to another slice.
///
/// # Panic
///
/// Panics if `T` is unsized and the other buffer is too small.
pub fn copy_to<S>(&self, target: S) -> Result<(), CopyError>
where S: Into<BufferSlice<'a, T>>
{
let target = target.into();
try!(self.alloc.copy_to(self.bytes_start .. self.bytes_end, &target.alloc,
target.get_offset_bytes()));
if let Some(inserter) = self.add_fence() {
let mut ctxt = self.alloc.get_context().make_current();
inserter.insert(&mut ctxt);
}
if let Some(inserter) = self.add_fence() {
let mut ctxt = self.alloc.get_context().make_current();
inserter.insert(&mut ctxt);
}
Ok(())
}
/// Builds a slice that contains an element from inside the buffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
///
/// # Example
///
/// ```no_run
/// #[derive(Copy, Clone)]
/// struct BufferContent {
/// value1: u16,
/// value2: u16,
/// }
/// # let buffer: glium::buffer::BufferSlice<BufferContent> =
/// # unsafe { std::mem::uninitialized() };
/// let slice = unsafe { buffer.slice_custom(|content| &content.value2) };
/// ```
///
/// # Safety
///
/// The object whose reference is passed to the closure is uninitialized. Therefore you
/// **must not** access the content of the object.
///
/// You **must** return a reference to an element from the parameter. The closure **must not**
/// panic.
#[inline]
pub unsafe fn slice_custom<F, R: ?Sized>(self, f: F) -> BufferMutSlice<'a, R>
where F: for<'r> FnOnce(&'r T) -> &'r R,
R: Content
{
let data: &T = mem::zeroed();
let result = f(data);
let size = mem::size_of_val(result);
let result = result as *const R as *const () as usize;
assert!(result <= self.get_size());
assert!(result + size <= self.get_size());
BufferMutSlice {
alloc: self.alloc,
bytes_start: self.bytes_start + result,
bytes_end: self.bytes_start + result + size,
fence: self.fence,
marker: PhantomData,
}
}
/// Builds a slice-any containing the whole subbuffer.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn as_slice_any(self) -> BufferAnySlice<'a> {
BufferAnySlice {
alloc: self.alloc,
bytes_start: self.bytes_start,
bytes_end: self.bytes_end,
elements_size: <T as Content>::get_elements_size(),
fence: self.fence,
}
}
}
impl<'a, T> BufferMutSlice<'a, [T]> where [T]: Content, T: Copy + 'a {
/// Returns the number of elements in this slice.
#[inline]
pub fn len(&self) -> usize {
(self.bytes_end - self.bytes_start) / mem::size_of::<T>()
}
/// Builds a subslice of this slice. Returns `None` if out of range.
///
/// This method builds an object that represents a slice of the buffer. No actual operation
/// OpenGL is performed.
#[inline]
pub fn slice<R: RangeArgument<usize>>(self, range: R) -> Option<BufferMutSlice<'a, [T]>> {
if range.start().map_or(0, |e| *e) > self.len() || range.end().map_or(0, |e| *e) > self.len() {
return None;
}
let len = self.len();
Some(BufferMutSlice {
alloc: self.alloc,
bytes_start: self.bytes_start + range.start().map_or(0, |e| *e) * mem::size_of::<T>(),
bytes_end: self.bytes_start + range.end().map_or(len, |e| *e) * mem::size_of::<T>(),
fence: self.fence,
marker: PhantomData,
})
}
}
impl<'a, T> BufferMutSlice<'a, [T]> where T: PixelValue + 'a {
/// Reads the content of the buffer.
#[inline]
pub fn read_as_texture_1d<S>(&self) -> Result<S, ReadError> where S: Texture1dDataSink<T> {
let data = try!(self.read());
Ok(S::from_raw(Cow::Owned(data), self.len() as u32))
}
}
impl<'a, T: ?Sized> BufferSliceExt<'a> for BufferMutSlice<'a, T> where T: Content {
#[inline]
fn add_fence(&self) -> Option<Inserter<'a>> {
if !self.alloc.uses_persistent_mapping() {
return None;
}
Some(self.fence.inserter(self.bytes_start .. self.bytes_end))
}
}
impl<'a, T: ?Sized> fmt::Debug for BufferMutSlice<'a, T> where T: Content {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{:?}", self.alloc)
}
}
impl<'a, T: ?Sized> From<&'a mut Buffer<T>> for BufferMutSlice<'a, T> where T: Content + 'a {
#[inline]
fn from(b: &'a mut Buffer<T>) -> BufferMutSlice<'a, T> {
b.as_mut_slice()
}
}
/// Represents a sub-part of a buffer.
///
/// Doesn't contain any information about the content, contrary to `Buffer`.
pub struct BufferAny {
alloc: Alloc,
size: usize,
elements_size: usize,
fence: Fences,
}
impl BufferAny {
/// Builds a slice-any containing the whole subbuffer.
#[inline]
pub fn as_slice_any(&self) -> BufferAnySlice {
BufferAnySlice {
alloc: &self.alloc,
bytes_start: 0,
bytes_end: self.size,
elements_size: self.elements_size,
fence: &self.fence,
}
}
/// Builds a mutable typed slice containing the whole subbuffer, without checking the type.
#[inline]
pub unsafe fn as_typed_slice_mut<T: ?Sized + Content>(&mut self) -> BufferMutSlice<T> {
assert_eq!(<T as Content>::get_elements_size(), self.elements_size);
BufferMutSlice {
alloc: &mut self.alloc,
bytes_start: 0,
bytes_end: self.size,
fence: &self.fence,
marker: PhantomData,
}
}
/// Builds a typed slice containing the whole subbuffer, without checking the type.
#[inline]
pub unsafe fn as_typed_slice<T: ?Sized + Content>(&self) -> BufferSlice<T> {
assert_eq!(<T as Content>::get_elements_size(), self.elements_size);
BufferSlice {
alloc: &self.alloc,
bytes_start: 0,
bytes_end: self.size,
fence: &self.fence,
marker: PhantomData,
}
}
/// Returns the size in bytes of each element in the buffer.
// TODO: clumsy, remove this function
#[inline]
pub fn get_elements_size(&self) -> usize {
self.elements_size
}
/// Returns the number of elements in the buffer.
// TODO: clumsy, remove this function
#[inline]
pub fn get_elements_count(&self) -> usize {
self.size / self.elements_size
}
/// Returns the context corresponding to this buffer.
#[inline]
pub fn get_context(&self) -> &Rc<Context> {
self.alloc.get_context()
}
/// Returns the number of bytes in this subbuffer.
#[inline]
pub fn get_size(&self) -> usize {
self.size
}
/// Invalidates the content of the buffer. The data becomes undefined.
///
/// This operation is a no-op if the backend doesn't support it and for persistent-mapped
/// buffers.
#[inline]
pub fn invalidate(&self) {
self.alloc.invalidate(0, self.size);
}
/// UNSTABLE. This function can be removed at any moment without any further notice.
///
/// Considers that the buffer is filled with elements of type `T` and reads them.
///
/// # Panic
///
/// Panics if the size of the buffer is not a multiple of the size of the data.
/// For example, trying to read some `(u8, u8, u8, u8)`s from a buffer of 7 bytes will panic.
///
#[inline]
pub unsafe fn read<T>(&self) -> Result<T::Owned, ReadError> where T: Content {
// TODO: add check
self.fence.wait(&mut self.alloc.get_context().make_current(), 0 .. self.get_size());
self.alloc.read::<T>(0 .. self.get_size())
}
}
impl<T: ?Sized> From<Buffer<T>> for BufferAny where T: Content + Send + 'static {
#[inline]
fn from(mut buffer: Buffer<T>) -> BufferAny {
let size = buffer.get_size();
BufferAny {
alloc: buffer.alloc.take().unwrap(),
size: size,
elements_size: <T as Content>::get_elements_size(),
fence: buffer.fence.take().unwrap(),
}
}
}
impl Drop for BufferAny {
#[inline]
fn drop(&mut self) {
self.fence.clean(&mut self.alloc.get_context().make_current());
}
}
impl fmt::Debug for BufferAny {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{:?}", self.alloc)
}
}
impl BufferExt for BufferAny {
#[inline]
fn get_offset_bytes(&self) -> usize {
0
}
#[inline]
fn prepare_for_vertex_attrib_array(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_for_vertex_attrib_array(ctxt);
}
#[inline]
fn prepare_for_element_array(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_for_element_array(ctxt);
}
#[inline]
fn bind_to_element_array(&self, ctxt: &mut CommandContext) {
self.alloc.bind_to_element_array(ctxt);
}
#[inline]
fn prepare_and_bind_for_pixel_pack(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_pixel_pack(ctxt);
}
#[inline]
fn unbind_pixel_pack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_pack(ctxt)
}
#[inline]
fn prepare_and_bind_for_pixel_unpack(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_pixel_unpack(ctxt);
}
#[inline]
fn unbind_pixel_unpack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_unpack(ctxt)
}
#[inline]
fn prepare_and_bind_for_query(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_query(ctxt);
}
#[inline]
fn unbind_query(ctxt: &mut CommandContext) {
Alloc::unbind_query(ctxt)
}
#[inline]
fn prepare_and_bind_for_draw_indirect(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_draw_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_dispatch_indirect(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_dispatch_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_uniform(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.prepare_and_bind_for_uniform(ctxt, index, 0 .. self.alloc.get_size());
}
#[inline]
fn prepare_and_bind_for_shared_storage(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.prepare_and_bind_for_shared_storage(ctxt, index, 0 .. self.alloc.get_size());
}
#[inline]
fn bind_to_transform_feedback(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.bind_to_transform_feedback(ctxt, index, 0 .. self.alloc.get_size());
}
}
/// Slice of a `Buffer` without any type info.
#[derive(Copy, Clone)]
pub struct BufferAnySlice<'a> {
alloc: &'a Alloc,
bytes_start: usize,
bytes_end: usize,
elements_size: usize,
fence: &'a Fences,
}
impl<'a> GlObject for BufferAnySlice<'a> {
type Id = gl::types::GLuint;
#[inline]
fn get_id(&self) -> gl::types::GLuint {
self.alloc.get_id()
}
}
impl<'a> BufferAnySlice<'a> {
/// Returns the number of bytes in this slice.
#[inline]
pub fn get_size(&self) -> usize {
self.bytes_end - self.bytes_start
}
/// Returns the size in bytes of each element in the buffer.
// TODO: clumsy, remove this function
#[inline]
pub fn get_elements_size(&self) -> usize {
self.elements_size
}
/// Returns the number of elements in the buffer.
// TODO: clumsy, remove this function
#[inline]
pub fn get_elements_count(&self) -> usize {
self.get_size() / self.elements_size
}
/// Invalidates the content of the slice. The data becomes undefined.
///
/// This operation is a no-op if the backend doesn't support it and for persistent-mapped
/// buffers.
#[inline]
pub fn invalidate(&self) {
self.alloc.invalidate(self.bytes_start, self.get_size());
}
/// Returns the context corresponding to this buffer.
#[inline]
pub fn get_context(&self) -> &Rc<Context> {
self.alloc.get_context()
}
}
impl<'a> fmt::Debug for BufferAnySlice<'a> {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{:?}", self.alloc)
}
}
impl<'a> BufferSliceExt<'a> for BufferAnySlice<'a> {
#[inline]
fn add_fence(&self) -> Option<Inserter<'a>> {
if !self.alloc.uses_persistent_mapping() {
return None;
}
Some(self.fence.inserter(self.bytes_start .. self.bytes_end))
}
}
impl<'a> BufferExt for BufferAnySlice<'a> {
#[inline]
fn get_offset_bytes(&self) -> usize {
self.bytes_start
}
#[inline]
fn prepare_for_vertex_attrib_array(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_for_vertex_attrib_array(ctxt);
}
#[inline]
fn prepare_for_element_array(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_for_element_array(ctxt);
}
#[inline]
fn bind_to_element_array(&self, ctxt: &mut CommandContext) {
self.alloc.bind_to_element_array(ctxt);
}
#[inline]
fn prepare_and_bind_for_pixel_pack(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_pixel_pack(ctxt);
}
#[inline]
fn unbind_pixel_pack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_pack(ctxt)
}
#[inline]
fn prepare_and_bind_for_pixel_unpack(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_pixel_unpack(ctxt);
}
#[inline]
fn unbind_pixel_unpack(ctxt: &mut CommandContext) {
Alloc::unbind_pixel_unpack(ctxt)
}
#[inline]
fn prepare_and_bind_for_query(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_query(ctxt);
}
#[inline]
fn unbind_query(ctxt: &mut CommandContext) {
Alloc::unbind_query(ctxt)
}
#[inline]
fn prepare_and_bind_for_draw_indirect(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_draw_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_dispatch_indirect(&self, ctxt: &mut CommandContext) {
self.alloc.prepare_and_bind_for_dispatch_indirect(ctxt);
}
#[inline]
fn prepare_and_bind_for_uniform(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.prepare_and_bind_for_uniform(ctxt, index, 0 .. self.alloc.get_size());
}
#[inline]
fn prepare_and_bind_for_shared_storage(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.prepare_and_bind_for_shared_storage(ctxt, index, 0 .. self.alloc.get_size());
}
#[inline]
fn bind_to_transform_feedback(&self, ctxt: &mut CommandContext, index: gl::types::GLuint) {
self.alloc.bind_to_transform_feedback(ctxt, index, 0 .. self.alloc.get_size());
}
}
| unbind_query |
anchored_path.py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Anchored path."""
import numpy as np
from collections import OrderedDict
from qiskit_metal import Dict
from qiskit_metal.qlibrary.core import QRoute, QRoutePoint
from qiskit_metal.toolbox_metal import math_and_overrides as mao
from qiskit_metal.toolbox_metal.exceptions import QiskitMetalDesignError
from collections.abc import Mapping
from shapely.ops import cascaded_union
from shapely.geometry import CAP_STYLE
import geopandas as gpd
def | (a: np.array, b: np.array, c: np.array, d: np.array) -> bool:
"""Returns whether segment ab intersects or overlaps with segment cd, where
a, b, c, and d are all coordinates.
.. meta::
Anchored Path
Args:
a (np.array): Coordinate
b (np.array): Coordinate
c (np.array): Coordinate
d (np.array): Coordinate
Returns:
bool: True if intersecting, False otherwise
"""
x0_start, y0_start = a
x0_end, y0_end = b
x1_start, y1_start = c
x1_end, y1_end = d
if (x0_start == x0_end) and (x1_start == x1_end):
# 2 vertical lines intersect only if they completely overlap at some point(s)
if x0_end == x1_start:
# Same x-intercept -> potential overlap, so check y coordinate
# Distinct, non-overlapping segments if and only if min y coord of one is above max y coord of the other
return not ((min(y0_start, y0_end) > max(y1_start, y1_end)) or
(min(y1_start, y1_end) > max(y0_start, y0_end)))
return False # Parallel lines with different x-intercepts don't overlap
elif (x0_start == x0_end) or (x1_start == x1_end):
# One segment is vertical, the other is not
# Express non-vertical line in the form of y = mx + b and check y value
if x1_start == x1_end:
# Exchange names; the analysis below assumes that line 0 is the vertical one
x0_start, x0_end, x1_start, x1_end = x1_start, x1_end, x0_start, x0_end
y0_start, y0_end, y1_start, y1_end = y1_start, y1_end, y0_start, y0_end
m = (y1_end - y1_start) / (x1_end - x1_start)
b = (x1_end * y1_start - x1_start * y1_end) / (x1_end - x1_start)
if min(x1_start, x1_end) <= x0_start <= max(x1_start, x1_end):
if min(y0_start, y0_end) <= m * x0_start + b <= max(
y0_start, y0_end):
return True
return False
else:
# Neither line is vertical; check slopes and y-intercepts
b0 = (y0_start * x0_end - y0_end * x0_start) / (
x0_end - x0_start) # y-intercept of line 0
b1 = (y1_start * x1_end - y1_end * x1_start) / (
x1_end - x1_start) # y-intercept of line 1
if (x1_end - x1_start) * (y0_end - y0_start) == (x0_end - x0_start) * (
y1_end - y1_start):
# Lines have identical slopes
if b0 == b1:
# Same y-intercept -> potential overlap, so check x coordinate
# Distinct, non-overlapping segments if and only if min x coord of one exceeds max x coord of the other
return not ((min(x0_start, x0_end) > max(x1_start, x1_end)) or
(min(x1_start, x1_end) > max(x0_start, x0_end)))
return False # Parallel lines with different y-intercepts don't overlap
else:
# Lines not parallel so must intersect somewhere -> examine slopes m0 and m1
m0 = (y0_end - y0_start) / (x0_end - x0_start) # slope of line 0
m1 = (y1_end - y1_start) / (x1_end - x1_start) # slope of line 1
x_intersect = (b1 - b0) / (m0 - m1
) # x coordinate of intersection point
if min(x0_start, x0_end) <= x_intersect <= max(x0_start, x0_end):
if min(x1_start, x1_end) <= x_intersect <= max(
x1_start, x1_end):
return True
return False
class RouteAnchors(QRoute):
"""Creates and connects a series of anchors through which the Route passes.
QRoute Default Options:
* pin_inputs: Dict
* start_pin: Dict -- Component and pin string pair. Define which pin to start from
* component: '' -- Name of component to start from, which has a pin
* pin: '' -- Name of pin used for pin_start
* end_pin=Dict -- Component and pin string pair. Define which pin to start from
* component: '' -- Name of component to end on, which has a pin
* pin: '' -- Name of pin used for pin_end
* fillet: '0'
* lead: Dict
* start_straight: '0mm' -- Lead-in, defined as the straight segment extension from start_pin. Defaults to 0.1um.
* end_straight: '0mm' -- Lead-out, defined as the straight segment extension from end_pin. Defaults to 0.1um.
* start_jogged_extension: '' -- Lead-in, jogged extension of lead-in. Described as list of tuples
* end_jogged_extension: '' -- Lead-out, jogged extension of lead-out. Described as list of tuples
* total_length: '7mm'
* trace_width: 'cpw_width' -- Defines the width of the line. Defaults to 'cpw_width'.
Default Options:
* anchors: OrderedDict -- Intermediate anchors only; doesn't include endpoints
* advanced: Dict
* avoid_collision: 'false' -- true/false, defines if the route needs to avoid collisions. Defaults to 'false'.
"""
component_metadata = Dict(short_name='cpw')
"""Component metadata"""
default_options = Dict(
anchors=OrderedDict(
), # Intermediate anchors only; doesn't include endpoints
# Example: {1: np.array([x1, y1]), 2: np.array([x2, y2])}
# startpin -> startpin + leadin -> anchors -> endpin + leadout -> endpin
advanced=Dict(avoid_collision='false'))
"""Default options"""
TOOLTIP = """Creates and connects a series of anchors through which the Route passes."""
from shapely.ops import cascaded_union
from matplotlib import pyplot as plt
import geopandas as gpd
from shapely.geometry import CAP_STYLE, JOIN_STYLE
def unobstructed_close_up(self, segment: list, component_name: str) -> bool:
"""Checks whether the given component's perimeter intersects or
overlaps a given segment.
Args:
segment (list): 2 vertices, in the form [np.array([x0, y0]), np.array([x1, y1])]
component_name (str): Alphanumeric component name
Returns:
bool: True is no obstacles
"""
# transform path to polygons
paths_converted = []
paths = self.design.components[component_name].qgeometry_table('path')
for _, row in paths.iterrows():
paths_converted.append(row['geometry'].buffer(
row['width'] / 2, cap_style=CAP_STYLE.flat))
# merge all the polygons
polygons = self.design.components[component_name].qgeometry_list('poly')
boundary = gpd.GeoSeries(cascaded_union(polygons + paths_converted))
boundary_coords = list(boundary.geometry.exterior[0].coords)
if any(
intersecting(segment[0], segment[1], boundary_coords[i],
boundary_coords[i + 1])
for i in range(len(boundary_coords) - 1)):
# At least 1 intersection with the actual component contour; do not proceed!
return False
# All clear, no intersections
return True
def unobstructed(self, segment: list) -> bool:
"""Check that no component's bounding box in self.design intersects or
overlaps a given segment.
Args:
segment (list): 2 vertices, in the form [np.array([x0, y0]), np.array([x1, y1])]
Returns:
bool: True is no obstacles
"""
# assumes rectangular bounding boxes
for component in self.design.components:
if component == self.name:
continue
xmin, ymin, xmax, ymax = self.design.components[
component].qgeometry_bounds()
# p, q, r, s are corner coordinates of each bounding box
p, q, r, s = [
np.array([xmin, ymin]),
np.array([xmin, ymax]),
np.array([xmax, ymin]),
np.array([xmax, ymax])
]
if any(
intersecting(segment[0], segment[1], k, l)
for k, l in [(p, q), (p, r), (r, s), (q, s)]):
# At least 1 intersection with the component bounding box. Check the actual contour.
if not self.unobstructed_close_up(segment, component):
# At least 1 intersection with the actual component contour; do not proceed!
return False
# All clear, no intersections
return True
def connect_simple(self, start_pt: QRoutePoint,
end_pt: QRoutePoint) -> np.ndarray:
"""Try connecting start and end with single or 2-segment/S-shaped CPWs
if possible.
Args:
start_pt (QRoutePoint): QRoutePoint of the start
end_pt (QRoutePoint): QRoutePoint of the end
Returns:
List of vertices of a CPW going from start to end
Raises:
QiskitMetalDesignError: If the connect_simple() has failed.
"""
avoid_collision = self.parse_options().advanced.avoid_collision
start_direction = start_pt.direction
start = start_pt.position
end_direction = end_pt.direction
end = end_pt.position
# end_direction originates strictly from endpoint + leadout (NOT intermediate stopping anchors)
self.assign_direction_to_anchor(start_pt, end_pt)
stop_direction = end_pt.direction
if (start[0] == end[0]) or (start[1] == end[1]):
# Matching x or y coordinates -> check if endpoints can be connected with a single segment
if mao.dot(start_direction, end - start) >= 0:
# Start direction and end - start for CPW must not be anti-aligned
if (end_direction is None) or (mao.dot(end - start,
end_direction) <= 0):
# If leadout + end has been reached, the single segment CPW must not be aligned with its direction
return np.empty((0, 2), float)
else:
# If the endpoints don't share a common x or y value:
# designate them as 2 corners of an axis aligned rectangle
# and check if both start and end directions are aligned with
# the displacement vectors between start/end and
# either of the 2 remaining corners ("perfect alignment").
corner1 = np.array([start[0],
end[1]]) # x coordinate matches with start
corner2 = np.array([end[0],
start[1]]) # x coordinate matches with end
if avoid_collision:
# Check for collisions at the outset to avoid repeat work
startc1end = bool(
self.unobstructed([start, corner1]) and
self.unobstructed([corner1, end]))
startc2end = bool(
self.unobstructed([start, corner2]) and
self.unobstructed([corner2, end]))
else:
startc1end = startc2end = True
if (mao.dot(start_direction, corner1 - start) > 0) and startc1end:
# corner1 is "in front of" the start_pt
if (end_direction is None) or (mao.dot(end_direction,
corner1 - end) >= 0):
# corner1 is also "in front of" the end_pt
return np.expand_dims(corner1, axis=0)
elif (mao.dot(start_direction, corner2 - start) > 0) and startc2end:
# corner2 is "in front of" the start_pt
if (end_direction is None) or (mao.dot(end_direction,
corner2 - end) >= 0):
# corner2 is also "in front of" the end_pt
return np.expand_dims(corner2, axis=0)
# In notation below, corners 3 and 4 correspond to
# the ends of the segment bisecting the longer rectangle formed by start and end
# while the segment formed by corners 5 and 6 bisect the shorter rectangle
if stop_direction[
0]: # "Wide" rectangle -> vertical middle segment is more natural
corner3 = np.array([(start[0] + end[0]) / 2, start[1]])
corner4 = np.array([(start[0] + end[0]) / 2, end[1]])
corner5 = np.array([start[0], (start[1] + end[1]) / 2])
corner6 = np.array([end[0], (start[1] + end[1]) / 2])
else: # "Tall" rectangle -> horizontal middle segment is more natural
corner3 = np.array([start[0], (start[1] + end[1]) / 2])
corner4 = np.array([end[0], (start[1] + end[1]) / 2])
corner5 = np.array([(start[0] + end[0]) / 2, start[1]])
corner6 = np.array([(start[0] + end[0]) / 2, end[1]])
if avoid_collision:
startc3c4end = bool(
self.unobstructed([start, corner3]) and
self.unobstructed([corner3, corner4]) and
self.unobstructed([corner4, end]))
startc5c6end = bool(
self.unobstructed([start, corner5]) and
self.unobstructed([corner5, corner6]) and
self.unobstructed([corner6, end]))
else:
startc3c4end = startc5c6end = True
if (mao.dot(start_direction, stop_direction) < 0) and (mao.dot(
start_direction, corner3 - start) > 0) and startc3c4end:
if (end_direction is None) or (mao.dot(end_direction,
corner4 - end) > 0):
# Perfectly aligned S-shaped CPW
return np.vstack((corner3, corner4))
# Relax constraints and check if imperfect 2-segment or S-segment works,
# where "imperfect" means 1 or more dot products of directions
# between successive segments is 0; otherwise return an empty list
if (mao.dot(start_direction, corner1 - start) >= 0) and startc1end:
if (end_direction is None) or (mao.dot(end_direction,
corner1 - end) >= 0):
return np.expand_dims(corner1, axis=0)
if (mao.dot(start_direction, corner2 - start) >= 0) and startc2end:
if (end_direction is None) or (mao.dot(end_direction,
corner2 - end) >= 0):
return np.expand_dims(corner2, axis=0)
if (mao.dot(start_direction, corner3 - start) >=
0) and startc3c4end:
if (end_direction is None) or (mao.dot(end_direction,
corner4 - end) >= 0):
return np.vstack((corner3, corner4))
if (mao.dot(start_direction, corner5 - start) >=
0) and startc5c6end:
if (end_direction is None) or (mao.dot(end_direction,
corner6 - end) >= 0):
return np.vstack((corner5, corner6))
raise QiskitMetalDesignError(
"connect_simple() has failed. This might be due to one of two reasons. "
f"1. Either one of the start point {start} or the end point {end} "
"provided are inside the bounding box of another QComponent. "
"Please move the point, or setup a \"lead\" to exit the QComponent area. "
"2. none of the 4 routing possibilities of this algorithm "
"(^|_, ^^|, __|, _|^) can complete. Please use Pathfinder instead")
def free_manhattan_length_anchors(self):
"""Computes the free-flight manhattan distance between start_pt and
end_pt passing through all of the given anchor points.
Returns:
float: Total length connecting all points in order
"""
anchors = self.parse_options().anchors
reference = [self.head.get_tip().position]
reference.extend(list(anchors.values()))
reference.append(self.tail.get_tip().position)
length = 0
for i in range(1, len(reference)):
length += abs(reference[i][0] -
reference[i - 1][0]) + abs(reference[i][1] -
reference[i - 1][1])
return length
def trim_pts(self):
"""Crops the sequence of points to concatenate.
For example, if a segment between two anchors has no points,
then the segment is eliminated (only anchor points will do).
Modified directly the self.intermediate_pts, thus nothing is
returned.
"""
if isinstance(self.intermediate_pts, Mapping):
keys_to_delete = set()
for key, value in self.intermediate_pts.items():
if value is None:
keys_to_delete.add(key)
try:
# value is a list
if not value:
keys_to_delete.add(key)
except ValueError:
# value is a numpy
if not value.size:
keys_to_delete.add(key)
for key in keys_to_delete:
del self.intermediate_pts[key]
def make(self):
"""Generates path from start pin to end pin."""
p = self.parse_options()
anchors = p.anchors
# Set the CPW pins and add the points/directions to the lead-in/out arrays
self.set_pin("start")
self.set_pin("end")
# Align the lead-in/out to the input options set from the user
start_point = self.set_lead("start")
end_point = self.set_lead("end")
self.intermediate_pts = OrderedDict()
for arc_num, coord in anchors.items():
arc_pts = self.connect_simple(self.get_tip(), QRoutePoint(coord))
if arc_pts is None:
self.intermediate_pts[arc_num] = [coord]
else:
self.intermediate_pts[arc_num] = np.concatenate(
[arc_pts, [coord]], axis=0)
arc_pts = self.connect_simple(self.get_tip(), end_point)
if arc_pts is not None:
self.intermediate_pts[len(anchors)] = np.array(arc_pts)
# concatenate all points, transforming the dictionary into a single numpy array
self.trim_pts()
self.intermediate_pts = np.concatenate(list(
self.intermediate_pts.values()),
axis=0)
# Make points into elements
self.make_elements(self.get_points())
| intersecting |
Tensorflow-JSON.py | import numpy as np
import json
prefixes = ['softmax', 'fc', 'conv', 'max_pool', 'avg_pool', 'relu'] # TODO: ADD CONCAT
# Validate that every dictionary key is the name of a valid layer format
def validate_prefixes(names):
for name in names:
index = name.rfind('/')
if index != -1: section = name[index + 1:]
else: section = name
hasPrefix = False
for prefix in prefixes:
if (section.startswith(prefix)):
hasPrefix = True
break
if not hasPrefix:
return False
return True
# Prefix of the namespaces in the dictionary
prefix = '/home/peter/Desktop/'
# Max pool must have entry in dict mapped to a list of the format [windowHeight, windowWidth, strideHeight, strideWidth]
# Also must be named 'max_pool' + etc.
# Average pool must have entry in dict mapped to a list of the format [windowHeight, windowWidth, strideHeight, strideWidth].
# Also must be named 'avg_pool' + etc.
# Softmax must be named 'softmax' + etc.
# Full connected must be named 'fc' + etc.
# Convolutional layer must have entry in dict mapped to a list of the format [strideHeight, strideWidth, padding]
# Padding is an optional entry for if you want custom padding, not 'SAME' padding,
def | (graph, namescopes, dict, session, channels, height, width):
if not validate_prefixes(namescopes):
return None
# Create a model specification file named "input" that specifies input tensor parameters
json_object = {}
json_object['num_input_channels'] = channels
json_object['input_height'] = height
json_object['input_width'] = width
with open(prefix + 'input', 'w') as outfile:
json.dump(json_object, outfile)
outfile.close()
counter = 0
# Create a model specification file for each layer in the network
for namescope in namescopes:
counter += 1
index = namescope.rfind('/')
if index != -1: section = namescope[index + 1:]
else: section = namescope
print(section)
layer = {}
if section.startswith(prefixes[0]):
# If layer is softmax
layer['name'] = 'softmax'
elif section.startswith(prefixes[1]) and namescope not in dict:
# If layer is fully connected
layer['name'] = 'fc'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
layer['weights'] = weight.tolist()
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
elif section.startswith(prefixes[2]) or (namescope in dict and (len(dict[namescope]) == 2 or len(dict[namescope]) == 3)):
# If layer is convolutional
layer['name'] = 'conv'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
shape = weight.shape
layer['weights_hwio'] = np.transpose(weight, (3,2,0,1)).tolist() # Rearrange order to be compatible with TensorRT
layer['filter_height'] = shape[0]
layer['filter_width'] = shape[1]
layer['out_maps'] = shape[3]
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
properties = dict[namescope]
layer['stride_height'] = properties[0]
layer['stride_width'] = properties[1]
if (len(properties) == 3): layer['padding'] = properties[2]
else: layer['padding'] = -1
print(layer['padding'])
elif section.startswith(prefixes[3]):
# If layer is max pool
layer['name'] = 'max_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[4]):
# If layer is average pool
layer['name'] = 'avg_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[5]):
# If layer is a ReLU activation
layer['name'] = 'relu'
with open(prefix + str(counter), 'w') as outfile:
json.dump(layer, outfile)
outfile.close()
def convert_entire(graph, namescopes, dict, session, channels, height, width):
if not validate_prefixes(namescopes):
return None
# Create a model specification file named "input" that specifies input tensor parameters
json_object = {}
json_object['num_input_channels'] = channels
json_object['input_height'] = height
json_object['input_width'] = width
json_object['layers'] = []
# Create a model specification file for each layer in the network
for namescope in namescopes:
index = namescope.rfind('/')
if index != -1: section = namescope[index + 1:]
else: section = namescope
print(section)
layer = {}
if section.startswith(prefixes[0]):
# If layer is softmax
layer['name'] = 'softmax'
elif section.startswith(prefixes[1]) and namescope not in dict:
# If layer is fully connected
layer['name'] = 'fc'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
layer['weights'] = weight.tolist()
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
elif section.startswith(prefixes[2]) or (namescope in dict and (len(dict[namescope]) == 2 or len(dict[namescope]) == 3)):
# If layer is convolutional
layer['name'] = 'conv'
for variable in graph.get_collection('trainable_variables', namescope):
name = variable.name[len(namescope) + 1:]
if name.startswith('weight'):
weight = session.run(variable)
shape = weight.shape
layer['weights_hwio'] = np.transpose(weight, (3,2,0,1)).tolist() # Rearrange order to be compatible with TensorRT
layer['filter_height'] = shape[0]
layer['filter_width'] = shape[1]
layer['out_maps'] = shape[3]
if name.startswith('bias'):
bias = session.run(variable)
layer['biases'] = bias.tolist()
layer['num_outputs'] = len(bias)
properties = dict[namescope]
layer['stride_height'] = properties[0]
layer['stride_width'] = properties[1]
if (len(properties) == 3): layer['padding'] = properties[2]
else: layer['padding'] = -1
print(layer['padding'])
elif section.startswith(prefixes[3]):
# If layer is max pool
layer['name'] = 'max_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[4]):
# If layer is average pool
layer['name'] = 'avg_pool'
properties = dict[namescope]
layer['window_height'] = properties[0]
layer['window_width'] = properties[1]
layer['stride_height'] = properties[2]
layer['stride_width'] = properties[3]
elif section.startswith(prefixes[5]):
# If layer is a ReLU activation
layer['name'] = 'relu'
json_object['layers'].append(layer)
with open("mnist_final", 'w') as outfile:
json.dump(json_object, outfile)
outfile.close()
| convert_separate |
sample.go | package main
import (
"fmt"
"errors"
"github.com/JesusIslam/rhs"
)
func main() | {
password := []byte("a password")
salt, err := rhs.GenSalt()
if err != nil {
panic(err)
}
e, err := rhs.New()
if err != nil {
panic(err)
}
pk, err := rhs.LoadPkRSA("./pk.pem")
if err != nil {
panic(err)
}
h, s, nonce, err := e.EncryptRSAStore(password, salt, pk)
if err != nil {
panic(err)
}
pub, err := rhs.LoadPubRSA("./pub.pem")
if err != nil {
panic(err)
}
err = e.ValidateRSA(password, salt, h, s, nonce, pub)
if err != nil {
panic(err)
}
fmt.Println("RSA OK")
ecpk, err := rhs.LoadPkECDSA("./ecpk.pem")
if err != nil {
panic(err)
}
h, r, ecs, nonce, err := e.EncryptECDSAStore(password, salt, ecpk)
if err != nil {
panic(err)
}
ecpub, err := rhs.LoadPubECDSA("./ecpub.pem")
if err != nil {
panic(err)
}
err = e.ValidateECDSA(password, salt, h, r, ecs, nonce, ecpub)
if err != nil {
panic(err)
}
fmt.Println("ECDSA OK")
ser, err := rhs.NewNACL()
if err != nil {
panic(err)
}
cli, err := rhs.NewNACL()
if err != nil {
panic(err)
}
enc, mh, n, err := ser.Encrypt(password, &cli.Pub)
if err != nil {
panic(err)
}
_, ok := cli.Decrypt(enc, mh, &n, &ser.Pub)
if ok == false {
panic(errors.New("FAILED TO DECRYPT"))
}
ser.GenSharedKey(&cli.Pub)
cli.GenSharedKey(&ser.Pub)
enc, mh, n, err = ser.EncryptSK(password)
if err != nil {
panic(err)
}
_, ok = cli.DecryptSK(enc, mh, &n)
if ok == false {
panic(errors.New("FAILED TO DECRYPT USING SK"))
}
fmt.Println("NACL OK")
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.