file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
api_op_DescribeInstanceHealth.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package elasticloadbalancing
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types"
"github.com/aws/smithy-go/middleware"
smithytime "github.com/aws/smithy-go/time"
smithyhttp "github.com/aws/smithy-go/transport/http"
smithywaiter "github.com/aws/smithy-go/waiter"
"github.com/jmespath/go-jmespath"
"time"
)
// Describes the state of the specified instances with respect to the specified
// load balancer. If no instances are specified, the call describes the state of
// all instances that are currently registered with the load balancer. If instances
// are specified, their state is returned even if they are no longer registered
// with the load balancer. The state of terminated instances is not returned.
func (c *Client) DescribeInstanceHealth(ctx context.Context, params *DescribeInstanceHealthInput, optFns ...func(*Options)) (*DescribeInstanceHealthOutput, error) {
if params == nil {
params = &DescribeInstanceHealthInput{}
}
result, metadata, err := c.invokeOperation(ctx, "DescribeInstanceHealth", params, optFns, c.addOperationDescribeInstanceHealthMiddlewares)
if err != nil {
return nil, err
}
out := result.(*DescribeInstanceHealthOutput)
out.ResultMetadata = metadata
return out, nil
}
// Contains the parameters for DescribeInstanceHealth.
type DescribeInstanceHealthInput struct {
// The name of the load balancer.
//
// This member is required.
LoadBalancerName *string
// The IDs of the instances.
Instances []types.Instance
noSmithyDocumentSerde
}
// Contains the output for DescribeInstanceHealth.
type DescribeInstanceHealthOutput struct {
// Information about the health of the instances.
InstanceStates []types.InstanceState
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationDescribeInstanceHealthMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsquery_serializeOpDescribeInstanceHealth{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDescribeInstanceHealth{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpDescribeInstanceHealthValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceHealth(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// DescribeInstanceHealthAPIClient is a client that implements the
// DescribeInstanceHealth operation.
type DescribeInstanceHealthAPIClient interface {
DescribeInstanceHealth(context.Context, *DescribeInstanceHealthInput, ...func(*Options)) (*DescribeInstanceHealthOutput, error)
}
var _ DescribeInstanceHealthAPIClient = (*Client)(nil)
// AnyInstanceInServiceWaiterOptions are waiter options for
// AnyInstanceInServiceWaiter
type AnyInstanceInServiceWaiterOptions struct {
// Set of options to modify how an operation is invoked. These apply to all
// operations invoked for this client. Use functional options on operation call to
// modify this list for per operation behavior.
APIOptions []func(*middleware.Stack) error
// MinDelay is the minimum amount of time to delay between retries. If unset,
// AnyInstanceInServiceWaiter will use default minimum delay of 15 seconds. Note
// that MinDelay must resolve to a value lesser than or equal to the MaxDelay.
MinDelay time.Duration
// MaxDelay is the maximum amount of time to delay between retries. If unset or set
// to zero, AnyInstanceInServiceWaiter will use default max delay of 120 seconds.
// Note that MaxDelay must resolve to value greater than or equal to the MinDelay.
MaxDelay time.Duration
// LogWaitAttempts is used to enable logging for waiter retry attempts
LogWaitAttempts bool
// Retryable is function that can be used to override the service defined
// waiter-behavior based on operation output, or returned error. This function is
// used by the waiter to decide if a state is retryable or a terminal state. By
// default service-modeled logic will populate this option. This option can thus be
// used to define a custom waiter state with fall-back to service-modeled waiter
// state mutators.The function returns an error in case of a failure state. In case
// of retry state, this function returns a bool value of true and nil error, while
// in case of success it returns a bool value of false and nil error.
Retryable func(context.Context, *DescribeInstanceHealthInput, *DescribeInstanceHealthOutput, error) (bool, error)
}
// AnyInstanceInServiceWaiter defines the waiters for AnyInstanceInService
type AnyInstanceInServiceWaiter struct {
client DescribeInstanceHealthAPIClient
options AnyInstanceInServiceWaiterOptions
}
// NewAnyInstanceInServiceWaiter constructs a AnyInstanceInServiceWaiter.
func NewAnyInstanceInServiceWaiter(client DescribeInstanceHealthAPIClient, optFns ...func(*AnyInstanceInServiceWaiterOptions)) *AnyInstanceInServiceWaiter {
options := AnyInstanceInServiceWaiterOptions{}
options.MinDelay = 15 * time.Second
options.MaxDelay = 120 * time.Second
options.Retryable = anyInstanceInServiceStateRetryable
for _, fn := range optFns {
fn(&options)
}
return &AnyInstanceInServiceWaiter{
client: client,
options: options,
}
}
// Wait calls the waiter function for AnyInstanceInService waiter. The maxWaitDur
// is the maximum wait duration the waiter will wait. The maxWaitDur is required
// and must be greater than zero.
func (w *AnyInstanceInServiceWaiter) Wait(ctx context.Context, params *DescribeInstanceHealthInput, maxWaitDur time.Duration, optFns ...func(*AnyInstanceInServiceWaiterOptions)) error {
if maxWaitDur <= 0 {
return fmt.Errorf("maximum wait time for waiter must be greater than zero")
}
options := w.options
for _, fn := range optFns {
fn(&options)
}
if options.MaxDelay <= 0 {
options.MaxDelay = 120 * time.Second
}
if options.MinDelay > options.MaxDelay {
return fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
}
ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
defer cancelFn()
logger := smithywaiter.Logger{}
remainingTime := maxWaitDur
var attempt int64
for {
attempt++
apiOptions := options.APIOptions
start := time.Now()
if options.LogWaitAttempts {
logger.Attempt = attempt
apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
apiOptions = append(apiOptions, logger.AddLogger)
}
out, err := w.client.DescribeInstanceHealth(ctx, params, func(o *Options) {
o.APIOptions = append(o.APIOptions, apiOptions...)
})
retryable, err := options.Retryable(ctx, params, out, err)
if err != nil {
return err
}
if !retryable {
return nil
}
remainingTime -= time.Since(start)
if remainingTime < options.MinDelay || remainingTime <= 0 {
break
}
// compute exponential backoff between waiter retries
delay, err := smithywaiter.ComputeDelay(
attempt, options.MinDelay, options.MaxDelay, remainingTime,
)
if err != nil {
return fmt.Errorf("error computing waiter delay, %w", err)
}
remainingTime -= delay
// sleep for the delay amount before invoking a request
if err := smithytime.SleepWithContext(ctx, delay); err != nil {
return fmt.Errorf("request cancelled while waiting, %w", err)
}
}
return fmt.Errorf("exceeded max wait time for AnyInstanceInService waiter")
}
func | (ctx context.Context, input *DescribeInstanceHealthInput, output *DescribeInstanceHealthOutput, err error) (bool, error) {
if err == nil {
pathValue, err := jmespath.Search("InstanceStates[].State", output)
if err != nil {
return false, fmt.Errorf("error evaluating waiter state: %w", err)
}
expectedValue := "InService"
listOfValues, ok := pathValue.([]interface{})
if !ok {
return false, fmt.Errorf("waiter comparator expected list got %T", pathValue)
}
for _, v := range listOfValues {
value, ok := v.(*string)
if !ok {
return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue)
}
if string(*value) == expectedValue {
return false, nil
}
}
}
return true, nil
}
func newServiceMetadataMiddleware_opDescribeInstanceHealth(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "elasticloadbalancing",
OperationName: "DescribeInstanceHealth",
}
}
| anyInstanceInServiceStateRetryable |
semesterMapper.js | class semesterMapper {
static toDto (semesterModel) {
return semesterModel != null ? {
id: semesterModel.idsemester
} : null;
}
| }
}
module.exports = semesterMapper; | static toModel (semesterDto) {
return {
idsemester: semesterDto.id
} |
ef_key_slot_0_w2.rs | #[doc = "Register `ef_key_slot_0_w2` reader"]
pub struct R(crate::R<EF_KEY_SLOT_0_W2_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<EF_KEY_SLOT_0_W2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<EF_KEY_SLOT_0_W2_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<EF_KEY_SLOT_0_W2_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `ef_key_slot_0_w2` writer"]
pub struct W(crate::W<EF_KEY_SLOT_0_W2_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<EF_KEY_SLOT_0_W2_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)] | }
}
impl From<crate::W<EF_KEY_SLOT_0_W2_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<EF_KEY_SLOT_0_W2_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `ef_key_slot_0_w2` reader - "]
pub struct EF_KEY_SLOT_0_W2_R(crate::FieldReader<u32, u32>);
impl EF_KEY_SLOT_0_W2_R {
#[inline(always)]
pub(crate) fn new(bits: u32) -> Self {
EF_KEY_SLOT_0_W2_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for EF_KEY_SLOT_0_W2_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `ef_key_slot_0_w2` writer - "]
pub struct EF_KEY_SLOT_0_W2_W<'a> {
w: &'a mut W,
}
impl<'a> EF_KEY_SLOT_0_W2_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = value;
self.w
}
}
impl R {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn ef_key_slot_0_w2(&self) -> EF_KEY_SLOT_0_W2_R {
EF_KEY_SLOT_0_W2_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn ef_key_slot_0_w2(&mut self) -> EF_KEY_SLOT_0_W2_W {
EF_KEY_SLOT_0_W2_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "ef_key_slot_0_w2.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ef_key_slot_0_w2](index.html) module"]
pub struct EF_KEY_SLOT_0_W2_SPEC;
impl crate::RegisterSpec for EF_KEY_SLOT_0_W2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [ef_key_slot_0_w2::R](R) reader structure"]
impl crate::Readable for EF_KEY_SLOT_0_W2_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [ef_key_slot_0_w2::W](W) writer structure"]
impl crate::Writable for EF_KEY_SLOT_0_W2_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets ef_key_slot_0_w2 to value 0"]
impl crate::Resettable for EF_KEY_SLOT_0_W2_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0 |
scraper_test.py | from itunes_app_scraper.scraper import AppStoreScraper
from itunes_app_scraper.util import AppStoreException, AppStoreCollections, AppStoreCategories, AppStoreUtils
import json
import pytest
import os
def test_term_no_exception():
scraper = AppStoreScraper()
results = scraper.get_app_ids_for_query("mindful", country="gb", lang="en")
assert len(results) > 0
def test_no_term_gives_exception():
scraper = AppStoreScraper()
with pytest.raises(AppStoreException, match = "No term was given"):
scraper.get_app_ids_for_query("", country="gb", lang="en")
def test_no_invalid_id_gives_exception():
scraper = AppStoreScraper()
with pytest.raises(AppStoreException, match = "No app found with ID 872"):
scraper.get_app_details('872')
def test_no_invalid_id_in_multiple_is_empty():
scraper = AppStoreScraper()
assert len(list(scraper.get_multiple_app_details(['872']))) == 0
def test_no_invalid_id_in_multiple_writes_log():
|
def test_log_file_write_message():
scraper = AppStoreScraper()
scraper._log_error("gb","test")
assert os.path.exists("gb_log.txt")
fh = open('gb_log.txt')
assert "test" in fh.read()
fh.close()
os.remove('gb_log.txt')
def test_country_code_does_exist():
scraper = AppStoreScraper()
assert scraper.get_store_id_for_country('gb') == 143444
def test_country_code_does_not_exist():
scraper = AppStoreScraper()
with pytest.raises(AppStoreException, match="Country code not found for XZ"):
scraper.get_store_id_for_country('xz') | scraper = AppStoreScraper()
scraper.get_multiple_app_details(['872'])
assert os.path.exists("nl_log.txt")
fh = open('nl_log.txt')
assert "No app found with ID 872" in fh.read()
fh.close()
os.remove('nl_log.txt') |
sql.py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from utils.config import DB_URL
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
engine = create_engine(
DB_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
# Dependency
def get_db():
| db = SessionLocal()
try:
yield db
finally:
db.close() |
|
index.js | import {
refreshQuestions,
paginateQuestions,
} from '@codetanzania/emis-api-states';
import { Button, Checkbox, Col, Pagination, Row } from 'antd';
import PropTypes from 'prop-types';
import React from 'react';
import { notifyError, notifySuccess } from '../../../../util';
import './styles.css';
/**
* @function
* @name QuestionsActionBar
* @description Render action bar for actions which are applicable to list
* content
*
* @param {object} props props object
* @param {number} props.page current page
* @param {number} props.total total number of question | * @returns {object} React Components
*
* @version 0.1.0
* @since 0.1.0
*/
const QuestionsActionBar = ({ page, total, onFilter }) => (
<div className="QuestionsActionBar">
<Row>
<Col span={1} xl={1} className="checkbox">
<Checkbox />
</Col>
<Col span={1} xl={1}>
<Button
shape="circle"
icon="reload"
title="Refresh Questions"
onClick={() =>
refreshQuestions(
() => {
notifySuccess('Questions refreshed successfully');
},
() => {
notifyError(
`An Error occurred while refreshing questions,
please contact system administrator!`
);
}
)
}
className="actionButton"
size="large"
/>
</Col>
<Col span={1} xl={1}>
<Button
type="circle"
icon="cloud-download"
title="Export selected Questions"
className="actionButton"
size="large"
/>
</Col>
<Col span={1} xl={1}>
<Button
type="circle"
icon="share-alt"
title="Share selected Questions"
className="actionButton"
size="large"
/>
</Col>
<Col span={1} xl={1}>
<Button
type="circle"
icon="hdd"
title="Archive selected Questions"
className="actionButton"
size="large"
/>
</Col>
<Col
span={1}
offset={15}
xl={{ span: 1, offset: 14 }}
xxl={{ span: 1, offset: 15 }}
>
<Button
type="circle"
icon="filter"
title="Filter Questions"
className="actionButton"
size="large"
onClick={onFilter}
/>
</Col>
<Col span={3} xl={4} xxl={3}>
<Pagination
simple
defaultCurrent={page}
total={total}
onChange={nextPage => paginateQuestions(nextPage)}
className="pagination"
/>
</Col>
</Row>
</div>
);
/* props validation */
QuestionsActionBar.propTypes = {
page: PropTypes.number.isRequired,
total: PropTypes.number.isRequired,
onFilter: PropTypes.func.isRequired,
};
export default QuestionsActionBar; | * @param {Function} props.onFilter function to filters question
* |
test_djangocache.py | # -*- coding: utf-8 -*-
# Most of this file was copied from:
# https://raw.githubusercontent.com/django/django/1.11.12/tests/cache/tests.py
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import copy
import io
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
ignore_warnings, mock, override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
################################################################################
# Setup Django for models import.
################################################################################
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
############################################################################
# GrantJ 2017-03-27 Ignore deprecation warnings. Django's metaclass magic does
# not always play well with Python 3.6. Read
# http://stackoverflow.com/questions/41343263/ for details
############################################################################
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import django
django.setup()
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable(object):
def __getstate__(self):
raise pickle.PickleError()
class UnpicklableType(object):
# Unpicklable using the default pickling protocol on Python 2.
__slots__ = 'a',
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
def custom_key_func2(key, key_prefix, version):
"Another customized cache key function"
return '-'.join(['CUSTOM', key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': custom_key_func2},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base.keys() if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertTrue(cache.touch('expire1', timeout=2))
time.sleep(1)
self.assertTrue(cache.has_key('expire1'))
time.sleep(2)
self.assertFalse(cache.has_key('expire1'))
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertTrue(cache.touch('expire1'))
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
self.assertFalse(cache.touch('nonexistent'))
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
cache.touch('key5', timeout=None)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
cache.touch('key5', timeout=0)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache.set(key, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
self.assertEqual(str(w[0].message.args[0]), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
expected_warning = (
"Cache key contains characters that will cause errors if used "
"with memcached: %r" % key
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request) | self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_callable_returning_none(self):
self.assertIsNone(cache.get_or_set('mykey', lambda: None))
# Previous get_or_set() doesn't store None in the cache.
self.assertEqual(cache.get('mykey', 'default'), 'default')
def test_get_or_set_version(self):
cache.get_or_set('brian', 1979, version=2)
with self.assertRaises(TypeError):
cache.get_or_set('brian')
with self.assertRaises(TypeError):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='diskcache.DjangoCache',
))
class DiskCacheTests(BaseCacheTests, TestCase):
"Specific test cases for diskcache.DjangoCache."
def setUp(self):
super(DiskCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Cache location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(DiskCacheTests, self).tearDown()
cache.close()
shutil.rmtree(self.dirname, ignore_errors=True)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_cache_write_unpicklable_type(self):
# This fails if not using the highest pickling protocol on Python 2.
cache.set('unpicklable', UnpicklableType())
def test_cull(self):
cache.cull()
def test_zero_cull(self):
pass # DiskCache has its own cull strategy.
def test_invalid_key_characters(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_invalid_key_length(self):
pass # DiskCache supports any Pickle-able value as a cache key.
def test_directory(self):
self.assertTrue('tmp' in cache.directory)
def test_read(self):
value = b'abcd' * 2 ** 20
result = cache.set(b'test-key', value)
self.assertTrue(result)
with cache.read(b'test-key') as reader:
self.assertEqual(reader.read(), value)
try:
with cache.read(b'dne') as reader:
error = False
except KeyError:
error = True
self.assertTrue(error)
def test_expire(self):
cache.clear()
cache.set(b'expire-key', 0, timeout=0.05)
time.sleep(0.1)
self.assertEqual(cache.expire(), 1)
self.assertEqual(cache.get(b'expire-key'), None)
def test_evict(self):
cache.clear()
for num in range(100):
cache.set(num, num, tag=(num % 4))
self.assertEqual(cache.evict(1), 25)
cache.create_tag_index()
self.assertEqual(cache.evict(2), 25)
cache.drop_tag_index()
self.assertEqual(cache.evict(3), 25)
for num in range(0, 100, 4):
self.assertEqual(cache.get(num), num)
def test_pop(self):
cache.clear()
for num in range(5):
cache.set(num, num, timeout=None)
self.assertEqual(cache.pop(0), 0)
self.assertEqual(cache.pop(0), None)
self.assertEqual(cache.pop(0, 1), 1)
self.assertEqual(cache.pop(0, default=1), 1)
self.assertEqual(cache.pop(1, expire_time=True), (1, None))
self.assertEqual(cache.pop(2, tag=True), (2, None))
self.assertEqual(cache.pop(3, expire_time=True, tag=True), (3, None, None))
self.assertEqual(cache.pop(4, retry=False), 4)
def test_pickle(self):
letters = 'abcde'
cache.clear()
for num, val in enumerate(letters):
cache.set(val, num)
data = pickle.dumps(cache)
other = pickle.loads(data)
for key in letters:
self.assertEqual(other.get(key), cache.get(key))
def test_cache(self):
subcache = cache.cache('test')
directory = os.path.join(cache.directory, 'cache', 'test')
self.assertEqual(subcache.directory, directory)
def test_deque(self):
deque = cache.deque('test')
directory = os.path.join(cache.directory, 'deque', 'test')
self.assertEqual(deque.directory, directory)
def test_index(self):
index = cache.index('test')
directory = os.path.join(cache.directory, 'index', 'test')
self.assertEqual(index.directory, directory)
def test_memoize(self):
with self.assertRaises(TypeError):
@cache.memoize # <-- Missing parens!
def test():
pass
count = 1000
def fibiter(num):
alpha, beta = 0, 1
for _ in range(num):
alpha, beta = beta, alpha + beta
return alpha
@cache.memoize()
def fibrec(num):
if num == 0:
return 0
elif num == 1:
return 1
else:
return fibrec(num - 1) + fibrec(num - 2)
cache.stats(enable=True)
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits1, misses1 = cache.stats()
for value in range(count):
self.assertEqual(fibrec(value), fibiter(value))
hits2, misses2 = cache.stats()
self.assertEqual(hits2, hits1 + count)
self.assertEqual(misses2, misses1) | self.assertIsNotNone(get_cache_data) |
parser.go | package parser
import (
"encoding/json"
"go/ast"
goparser "go/parser"
"go/token"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"fmt"
)
var vendoringPath string
type Parser struct {
Listing *ResourceListing
TopLevelApis map[string]*ApiDeclaration
PackagesCache map[string]map[string]*ast.Package
CurrentPackage string
TypeDefinitions map[string]map[string]*ast.TypeSpec
PackagePathCache map[string]string
PackageImports map[string]map[string][]string
BasePath, ControllerClass, Ignore string
IsController func(*ast.FuncDecl, string) bool
TypesImplementingMarshalInterface map[string]string
}
func NewParser() *Parser {
return &Parser{
Listing: &ResourceListing{
Infos: Infomation{},
Apis: make([]*ApiRef, 0),
},
PackagesCache: make(map[string]map[string]*ast.Package),
TopLevelApis: make(map[string]*ApiDeclaration),
TypeDefinitions: make(map[string]map[string]*ast.TypeSpec),
PackagePathCache: make(map[string]string),
PackageImports: make(map[string]map[string][]string),
TypesImplementingMarshalInterface: make(map[string]string),
}
}
func (parser *Parser) IsImplementMarshalInterface(typeName string) bool {
_, ok := parser.TypesImplementingMarshalInterface[typeName]
return ok
}
//Read web/main.go to get General info
func (parser *Parser) ParseGeneralApiInfo(mainApiFile string) {
fileSet := token.NewFileSet()
fileTree, err := goparser.ParseFile(fileSet, mainApiFile, nil, goparser.ParseComments)
if err != nil {
log.Fatalf("Can not parse general API information: %v\n", err)
}
parser.Listing.BasePath = "{{.}}"
parser.Listing.SwaggerVersion = SwaggerVersion
if fileTree.Comments != nil {
for _, comment := range fileTree.Comments {
for _, commentLine := range strings.Split(comment.Text(), "\n") {
attribute := strings.ToLower(strings.Split(commentLine, " ")[0])
switch attribute {
case "@apiversion":
parser.Listing.ApiVersion = strings.TrimSpace(commentLine[len(attribute):])
case "@apititle":
parser.Listing.Infos.Title = strings.TrimSpace(commentLine[len(attribute):])
case "@apidescription":
parser.Listing.Infos.Description = strings.TrimSpace(commentLine[len(attribute):])
case "@termsofserviceurl":
parser.Listing.Infos.TermsOfServiceUrl = strings.TrimSpace(commentLine[len(attribute):])
case "@contact":
parser.Listing.Infos.Contact = strings.TrimSpace(commentLine[len(attribute):])
case "@licenseurl":
parser.Listing.Infos.LicenseUrl = strings.TrimSpace(commentLine[len(attribute):])
case "@license":
parser.Listing.Infos.License = strings.TrimSpace(commentLine[len(attribute):])
case "@basepath":
parser.Listing.BasePath = strings.TrimSpace(commentLine[len(attribute):])
}
}
}
}
}
func (parser *Parser) GetResourceListingJson() []byte {
json, err := json.MarshalIndent(parser.Listing, "", " ")
if err != nil {
log.Fatalf("Can not serialise ResourceListing to JSON: %v\n", err)
}
return json
}
func (parser *Parser) GetApiDescriptionJson() []byte {
json, err := json.MarshalIndent(parser.TopLevelApis, "", " ")
if err != nil {
log.Fatalf("Can not serialise []ApiDescription to JSON: %v\n", err)
}
return json
}
func (parser *Parser) CheckRealPackagePath(packagePath string) string {
packagePath = strings.Trim(packagePath, "\"")
if cachedResult, ok := parser.PackagePathCache[packagePath]; ok {
return cachedResult
}
// Hack vendoring of 'golang.org/x' by the standard library
if strings.HasPrefix(packagePath, "golang_org/x/") {
packagePath = filepath.Join("vendor", packagePath)
}
pkgRealpath := ""
goVersion := runtime.Version()
// check if vendor is enabled for version GO 1.5 or 1.6
vendorEnable := true
if goVersion == "go1.5" || goVersion == "go1.6" {
if os.Getenv("GO15VENDOREXPERIMENT") == "0" {
vendorEnable = false
}
}
// first check vendor folder, vendoring in GO 1.7 and greater is officially supported
// evaluate if the user specified a different vendor directory rather
// than using current working directory to find vendor
if vendorEnable {
var vendorPath string
if vendoringPath == "" {
vendorPath = filepath.Join("vendor", packagePath)
} else {
vendorPath = fmt.Sprintf("%s/%s", vendoringPath, packagePath)
}
if evalutedPath, err := filepath.EvalSymlinks(vendorPath); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
}
}
}
// next, check GOPATH
if pkgRealpath == "" {
gopath := os.Getenv("GOPATH")
if gopath == "" {
log.Fatalf("Please, set $GOPATH environment variable\n")
}
gopathsList := filepath.SplitList(gopath)
for _, path := range gopathsList {
if evalutedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", packagePath)); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
break
}
}
}
}
// next, check GOROOT (/src)
if pkgRealpath == "" {
goroot := filepath.Clean(runtime.GOROOT())
if goroot == "" {
log.Fatalf("Please, set $GOROOT environment variable\n")
}
if evalutedPath, err := filepath.EvalSymlinks(filepath.Join(goroot, "src", packagePath)); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
}
}
// next, check GOROOT (/src/pkg) (for golang < v1.4)
if pkgRealpath == "" {
if evalutedPath, err := filepath.EvalSymlinks(filepath.Join(goroot, "src", "pkg", packagePath)); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
}
}
}
}
parser.PackagePathCache[packagePath] = pkgRealpath
return pkgRealpath
}
func (parser *Parser) GetRealPackagePath(packagePath string) string {
pkgRealpath := parser.CheckRealPackagePath(packagePath)
if pkgRealpath == "" {
log.Fatalf("Can not find package %s \n", packagePath)
}
return pkgRealpath
}
func (parser *Parser) GetPackageAst(packagePath string) map[string]*ast.Package {
//log.Printf("Parse %s package\n", packagePath)
if cache, ok := parser.PackagesCache[packagePath]; ok {
return cache
} else {
fileSet := token.NewFileSet()
astPackages, err := goparser.ParseDir(fileSet, packagePath, ParserFileFilter, goparser.ParseComments)
if err != nil {
log.Fatalf("Parse of %s pkg cause error: %s\n", packagePath, err)
}
parser.PackagesCache[packagePath] = astPackages
return astPackages
}
}
func (parser *Parser) AddOperation(op *Operation) {
path := []string{}
for _, pathPart := range strings.Split(op.Path, "/") {
if pathPart = strings.TrimSpace(pathPart); pathPart != "" {
path = append(path, pathPart)
}
}
resource := path[0]
if op.ForceResource != "" {
resource = op.ForceResource
}
api, ok := parser.TopLevelApis[resource]
if !ok {
api = NewApiDeclaration()
api.ApiVersion = parser.Listing.ApiVersion
api.SwaggerVersion = SwaggerVersion
api.ResourcePath = "/" + resource
api.BasePath = parser.Listing.BasePath
parser.TopLevelApis[resource] = api
}
found := false
for _, apiRef := range parser.Listing.Apis {
if apiRef.Path == api.ResourcePath |
}
if !found {
apiRef := &ApiRef{
Path: api.ResourcePath,
Description: op.Summary,
}
parser.Listing.Apis = append(parser.Listing.Apis, apiRef)
}
api.AddOperation(op)
}
func (parser *Parser) ParseApi(packageNames, vendorPath string) {
vendoringPath = vendorPath
packages := parser.ScanPackages(strings.Split(packageNames, ","))
for _, packageName := range packages {
parser.ParseTypeDefinitions(packageName)
}
for _, packageName := range packages {
parser.ParseApiDescription(packageName)
}
}
func (parser *Parser) ScanPackages(packages []string) []string {
res := make([]string, len(packages))
existsPackages := make(map[string]bool)
for _, packageName := range packages {
if v, ok := existsPackages[packageName]; !ok || v == false {
// Add package
existsPackages[packageName] = true
res = append(res, packageName)
// get it's real path
pkgRealPath := parser.GetRealPackagePath(packageName)
// Then walk
var walker filepath.WalkFunc = func(path string, info os.FileInfo, err error) error {
// avoid listing hidden directories with initial "_" names and vendor dir
if info.IsDir() && !strings.Contains(path, "/_") && !strings.Contains(path, "/vendor") {
if idx := strings.Index(path, packageName); idx != -1 {
pack := path[idx:]
if v, ok := existsPackages[pack]; !ok || v == false {
existsPackages[pack] = true
res = append(res, pack)
}
}
}
return nil
}
filepath.Walk(pkgRealPath, walker)
}
}
return res
}
func (parser *Parser) ParseTypeDefinitions(packageName string) {
parser.CurrentPackage = packageName
pkgRealPath := parser.GetRealPackagePath(packageName)
// log.Printf("Parse type definition of %#v\n", packageName)
if _, ok := parser.TypeDefinitions[pkgRealPath]; !ok {
parser.TypeDefinitions[pkgRealPath] = make(map[string]*ast.TypeSpec)
}
astPackages := parser.GetPackageAst(pkgRealPath)
for _, astPackage := range astPackages {
for _, astFile := range astPackage.Files {
for _, astDeclaration := range astFile.Decls {
if generalDeclaration, ok := astDeclaration.(*ast.GenDecl); ok && generalDeclaration.Tok == token.TYPE {
for _, astSpec := range generalDeclaration.Specs {
if typeSpec, ok := astSpec.(*ast.TypeSpec); ok {
parser.TypeDefinitions[pkgRealPath][typeSpec.Name.String()] = typeSpec
}
}
}
}
}
}
//log.Fatalf("Type definition parsed %#v\n", parser.ParseImportStatements(packageName))
for importedPackage, _ := range parser.ParseImportStatements(packageName) {
//log.Printf("Import: %v, %v\n", importedPackage, v)
parser.ParseTypeDefinitions(importedPackage)
}
}
func (parser *Parser) ParseImportStatements(packageName string) map[string]bool {
parser.CurrentPackage = packageName
pkgRealPath := parser.GetRealPackagePath(packageName)
imports := make(map[string]bool)
astPackages := parser.GetPackageAst(pkgRealPath)
parser.PackageImports[pkgRealPath] = make(map[string][]string)
for _, astPackage := range astPackages {
for _, astFile := range astPackage.Files {
for _, astImport := range astFile.Imports {
importedPackageName := strings.Trim(astImport.Path.Value, "\"")
if !parser.isIgnoredPackage(importedPackageName) {
realPath := parser.GetRealPackagePath(importedPackageName)
//log.Printf("path: %#v, original path: %#v", realPath, astImport.Path.Value)
if _, ok := parser.TypeDefinitions[realPath]; !ok {
imports[importedPackageName] = true
//log.Printf("Parse %s, Add new import definition:%s\n", packageName, astImport.Path.Value)
}
var importedPackageAlias string
if astImport.Name != nil && astImport.Name.Name != "." && astImport.Name.Name != "_" {
importedPackageAlias = astImport.Name.Name
} else {
importPath := strings.Split(importedPackageName, "/")
importedPackageAlias = importPath[len(importPath)-1]
}
isExists := false
for _, v := range parser.PackageImports[pkgRealPath][importedPackageAlias] {
if v == importedPackageName {
isExists = true
}
}
if !isExists {
parser.PackageImports[pkgRealPath][importedPackageAlias] = append(parser.PackageImports[pkgRealPath][importedPackageAlias], importedPackageName)
}
}
}
}
}
return imports
}
func (parser *Parser) GetModelDefinition(model string, packageName string) *ast.TypeSpec {
pkgRealPath := parser.CheckRealPackagePath(packageName)
if pkgRealPath == "" {
return nil
}
packageModels, ok := parser.TypeDefinitions[pkgRealPath]
if !ok {
return nil
}
astTypeSpec, _ := packageModels[model]
return astTypeSpec
}
func (parser *Parser) FindModelDefinition(modelName string, currentPackage string) (*ast.TypeSpec, string) {
var model *ast.TypeSpec
var modelPackage string
modelNameParts := strings.Split(modelName, ".")
//if no dot in name - it can be only model from current package
if len(modelNameParts) == 1 {
modelPackage = currentPackage
if model = parser.GetModelDefinition(modelName, currentPackage); model == nil {
log.Fatalf("Can not find definition of %s model. Current package %s", modelName, currentPackage)
}
} else {
//first try to assume what name is absolute
absolutePackageName := strings.Join(modelNameParts[:len(modelNameParts)-1], "/")
modelNameFromPath := modelNameParts[len(modelNameParts)-1]
modelPackage = absolutePackageName
if model = parser.GetModelDefinition(modelNameFromPath, absolutePackageName); model == nil {
//can not get model by absolute name.
if len(modelNameParts) > 2 {
log.Fatalf("Can not find definition of %s model. Name looks like absolute, but model not found in %s package", modelNameFromPath, absolutePackageName)
}
// lets try to find it in imported packages
pkgRealPath := parser.CheckRealPackagePath(currentPackage)
if imports, ok := parser.PackageImports[pkgRealPath]; !ok {
log.Fatalf("Can not find definition of %s model. Package %s dont import anything", modelNameFromPath, pkgRealPath)
} else if relativePackage, ok := imports[modelNameParts[0]]; !ok {
log.Fatalf("Package %s is not imported to %s, Imported: %#v\n", modelNameParts[0], currentPackage, imports)
} else {
var modelFound bool
for _, packageName := range relativePackage {
if model = parser.GetModelDefinition(modelNameFromPath, packageName); model != nil {
modelPackage = packageName
modelFound = true
break
}
}
if !modelFound {
log.Fatalf("Can not find definition of %s model in package %s", modelNameFromPath, relativePackage)
}
}
}
}
return model, modelPackage
}
func (parser *Parser) ParseApiDescription(packageName string) {
parser.CurrentPackage = packageName
pkgRealPath := parser.GetRealPackagePath(packageName)
astPackages := parser.GetPackageAst(pkgRealPath)
for _, astPackage := range astPackages {
for _, astFile := range astPackage.Files {
for _, astDescription := range astFile.Decls {
switch astDeclaration := astDescription.(type) {
case *ast.FuncDecl:
if parser.IsController(astDeclaration, parser.ControllerClass) {
operation := NewOperation(parser, packageName)
if astDeclaration.Doc != nil && astDeclaration.Doc.List != nil {
for _, comment := range astDeclaration.Doc.List {
if err := operation.ParseComment(comment.Text); err != nil {
log.Printf("Can not parse comment for function: %v, package: %v, got error: %v\n", astDeclaration.Name.String(), packageName, err)
}
}
}
if operation.Path != "" {
parser.AddOperation(operation)
}
}
}
}
for _, astComment := range astFile.Comments {
for _, commentLine := range strings.Split(astComment.Text(), "\n") {
parser.ParseSubApiDescription(commentLine)
}
}
}
}
}
// Parse sub api declaration
// @SubApi Very fancy API [/fancy-api]
func (parser *Parser) ParseSubApiDescription(commentLine string) {
if !strings.HasPrefix(commentLine, "@SubApi") {
return
} else {
commentLine = strings.TrimSpace(commentLine[len("@SubApi"):])
}
re := regexp.MustCompile(`([^\[]+)\[{1}([\w\_\-/]+)`)
if matches := re.FindStringSubmatch(commentLine); len(matches) != 3 {
log.Printf("Can not parse sub api description %s, skipped", commentLine)
} else {
found := false
for _, ref := range parser.Listing.Apis {
if ref.Path == matches[2] {
found = true
ref.Description = strings.TrimSpace(matches[1])
}
}
if !found {
subApi := &ApiRef{Path: matches[2],
Description: strings.TrimSpace(matches[1]),
}
parser.Listing.Apis = append(parser.Listing.Apis, subApi)
}
}
}
func (parser *Parser) isIgnoredPackage(packageName string) bool {
r, _ := regexp.Compile("appengine+")
matched, err := regexp.MatchString(parser.Ignore, packageName)
if err != nil {
log.Fatalf("The -ignore argument is not a valid regular expression: %v\n", err)
}
return packageName == "C" || r.MatchString(packageName) || matched
}
func ParserFileFilter(info os.FileInfo) bool {
name := info.Name()
return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go")
}
| {
found = true
} |
main.rs | use cluster_test::effects::RemoveNetworkEffects;
use cluster_test::experiments::{MultiRegionSimulation, PacketLossRandomValidators};
use cluster_test::github::GitHub;
use cluster_test::instance::Instance;
use cluster_test::prometheus::Prometheus;
use cluster_test::thread_pool_executor::ThreadPoolExecutor;
use cluster_test::tx_emitter::{EmitJobRequest, EmitThreadParams};
use cluster_test::util::unix_timestamp_now;
use cluster_test::{
aws::Aws,
cluster::Cluster,
deployment::{DeploymentManager, SOURCE_TAG},
effects::{Action, Effect, Reboot, StopContainer},
experiments::{Experiment, RebootRandomValidators},
health::{DebugPortLogThread, HealthCheckRunner, LogTail},
log_prune::LogPruner,
slack::SlackClient,
suite::ExperimentSuite,
tx_emitter::TxEmitter,
};
use failure::{
self,
prelude::{bail, format_err},
};
use rand::prelude::ThreadRng;
use rand::Rng;
use reqwest::Url;
use slog::{o, Drain};
use slog_scope::{info, warn};
use std::{
collections::HashSet,
env,
sync::mpsc::{self, TryRecvError},
thread,
time::{Duration, Instant},
};
use structopt::{clap::ArgGroup, StructOpt};
use termion::{color, style};
const HEALTH_POLL_INTERVAL: Duration = Duration::from_secs(5);
#[derive(StructOpt, Debug)]
#[structopt(group = ArgGroup::with_name("action").required(true))]
struct Args {
#[structopt(short = "w", long, conflicts_with = "swarm")]
workplace: Option<String>,
#[structopt(short = "p", long, use_delimiter = true, conflicts_with = "prune-logs")]
peers: Vec<String>,
#[structopt(
long,
help = "If set, tries to connect to a libra-swarm instead of aws"
)]
swarm: bool,
#[structopt(long, group = "action")]
wipe_all_db: bool,
#[structopt(long, group = "action")]
run: bool,
#[structopt(long, group = "action")]
run_once: bool,
#[structopt(long, group = "action")]
tail_logs: bool,
#[structopt(long, group = "action")]
health_check: bool,
#[structopt(long, group = "action")]
prune_logs: bool,
#[structopt(long, group = "action")]
reboot: bool,
#[structopt(long, group = "action")]
restart: bool,
#[structopt(long, group = "action")]
stop: bool,
#[structopt(long, group = "action")]
start: bool,
#[structopt(long, group = "action")]
emit_tx: bool,
#[structopt(long, group = "action")]
stop_experiment: bool,
#[structopt(long, group = "action")]
packet_loss_experiment: bool,
#[structopt(long, group = "action")]
perf_run: bool,
#[structopt(long, group = "action")]
cleanup: bool,
#[structopt(long, group = "action")]
multi_region_simulation: bool,
#[structopt(long, group = "action")]
changelog: Option<String>,
// emit_tx options
#[structopt(long, default_value = "10")]
accounts_per_client: usize,
#[structopt(long, default_value = "50")]
wait_millis: u64,
#[structopt(long)]
burst: bool,
#[structopt(long, default_value = "mint.key")]
mint_file: String,
//stop_experiment options
#[structopt(long, default_value = "10")]
max_stopped: usize,
// multi_region_simulation: options
#[structopt(
long,
default_value = "10",
help = "Number of instances which should be in region1. The remaining instances are in region 2."
)]
multi_region_split: usize,
#[structopt(
long,
default_value = "50",
help = "Delay in ms between the two regions"
)]
multi_region_delay_ms: u64,
#[structopt(
long,
default_value = "60",
help = "Duration in secs for which multi region experiment happens"
)]
multi_region_exp_duration_secs: u64,
//packet_loss_experiment options
#[structopt(
long,
default_value = "10",
help = "Percent of instances in which packet loss should be introduced"
)]
packet_loss_percent_instances: f32,
#[structopt(
long,
default_value = "10",
help = "Percent of packet loss for each instance"
)]
packet_loss_percent: f32,
#[structopt(
long,
default_value = "60",
help = "Duration in secs for which packet loss happens"
)]
packet_loss_duration_secs: u64,
}
pub fn main() {
setup_log();
let args = Args::from_args();
if args.swarm && !args.emit_tx {
panic!("Can only use --emit-tx option in --swarm mode");
}
if args.prune_logs {
let util = ClusterUtil::setup(&args);
util.prune_logs();
return;
} else if args.emit_tx {
let thread_params = EmitThreadParams {
wait_millis: args.wait_millis,
wait_committed: !args.burst,
};
if args.swarm {
let util = BasicSwarmUtil::setup(&args);
util.emit_tx(args.accounts_per_client, thread_params);
return;
} else {
let util = ClusterUtil::setup(&args);
util.emit_tx(args.accounts_per_client, thread_params);
return;
}
} else if args.stop_experiment {
let util = ClusterUtil::setup(&args);
util.stop_experiment(args.max_stopped);
return;
}
let mut runner = ClusterTestRunner::setup(&args);
if args.run {
runner.run_suite_in_loop();
} else if args.run_once {
let experiment = RebootRandomValidators::new(3, &runner.cluster);
runner.cleanup_and_run(Box::new(experiment)).unwrap();
} else if args.tail_logs {
runner.tail_logs();
} else if args.health_check {
runner.run_health_check();
} else if args.wipe_all_db {
runner.stop();
runner.wipe_all_db(true);
runner.start();
} else if args.reboot {
runner.reboot();
} else if args.restart {
runner.restart();
} else if args.stop {
runner.stop();
} else if args.start {
runner.start();
} else if args.packet_loss_experiment {
let total_instances = runner.cluster.instances().len();
let packet_loss_num_instances: usize = std::cmp::min(
((args.packet_loss_percent_instances / 100.0) * total_instances as f32).ceil() as usize,
total_instances,
);
let experiment = PacketLossRandomValidators::new(
packet_loss_num_instances,
args.packet_loss_percent,
Duration::from_secs(args.packet_loss_duration_secs),
&runner.cluster,
);
runner.cleanup_and_run(Box::new(experiment)).unwrap();
} else if args.multi_region_simulation {
let experiment = MultiRegionSimulation::new(
args.multi_region_split,
Duration::from_millis(args.multi_region_delay_ms),
Duration::from_secs(args.multi_region_exp_duration_secs),
&runner.cluster,
runner.thread_pool_executor.clone(),
);
runner.cleanup_and_run(Box::new(experiment)).unwrap();
} else if args.perf_run {
runner.perf_run();
} else if args.cleanup {
runner.cleanup();
} else if let Some(commit) = args.changelog {
let prev_commit = runner
.deployment_manager
.get_tested_upstream_commit()
.map_err(|e| warn!("Failed to get prev_commit: {:?}", e))
.ok();
println!("Prev commit: {:?}", prev_commit);
println!("{}", runner.get_changelog(prev_commit.as_ref(), &commit));
}
}
fn setup_log() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
let decorator = slog_term::PlainDecorator::new(std::io::stdout());
let drain = slog_term::CompactFormat::new(decorator).build().fuse();
let drain = slog_envlogger::new(drain);
let drain = slog_async::Async::new(drain).build().fuse();
let logger = slog::Logger::root(drain, o!());
let logger_guard = slog_scope::set_global_logger(logger);
std::mem::forget(logger_guard);
}
struct BasicSwarmUtil {
cluster: Cluster,
}
struct ClusterUtil {
cluster: Cluster,
aws: Aws,
prometheus: Prometheus,
}
struct ClusterTestRunner {
logs: LogTail,
cluster: Cluster,
health_check_runner: HealthCheckRunner,
deployment_manager: DeploymentManager,
experiment_interval: Duration,
thread_pool_executor: ThreadPoolExecutor,
slack: SlackClient,
slack_log_url: Option<Url>,
slack_changelog_url: Option<Url>,
tx_emitter: TxEmitter,
prometheus: Prometheus,
github: GitHub,
}
fn parse_host_port(s: &str) -> failure::Result<(String, u32)> {
let v = s.split(':').collect::<Vec<&str>>();
if v.len() != 2 {
return Err(format_err!("Failed to parse {:?} in host:port format", s));
}
let host = v[0].to_string();
let port = v[1].parse::<u32>()?;
Ok((host, port))
}
impl BasicSwarmUtil {
pub fn setup(args: &Args) -> Self {
if args.peers.is_empty() {
panic!("Peers not set in args");
}
let parsed_peers: Vec<_> = args
.peers
.iter()
.map(|peer| parse_host_port(peer).unwrap())
.collect();
Self {
cluster: Cluster::from_host_port(parsed_peers, &args.mint_file),
}
}
pub fn emit_tx(self, accounts_per_client: usize, thread_params: EmitThreadParams) {
let mut emitter = TxEmitter::new(&self.cluster);
emitter
.start_job(EmitJobRequest {
instances: self.cluster.instances().to_vec(),
accounts_per_client,
thread_params,
})
.expect("Failed to start emit job");
thread::park();
}
}
impl ClusterUtil {
pub fn setup(args: &Args) -> Self |
pub fn prune_logs(&self) {
let log_prune = LogPruner::new(self.aws.clone());
log_prune.prune_logs();
}
pub fn emit_tx(self, accounts_per_client: usize, thread_params: EmitThreadParams) {
let mut emitter = TxEmitter::new(&self.cluster);
emitter
.start_job(EmitJobRequest {
instances: self.cluster.instances().to_vec(),
accounts_per_client,
thread_params,
})
.expect("Failed to start emit job");
self.run_stat_loop();
}
pub fn stop_experiment(self, max_stopped: usize) {
let mut emitter = TxEmitter::new(&self.cluster);
let mut instances = self.cluster.instances().to_vec();
let mut rng = ThreadRng::default();
let mut stop_effects = vec![];
let mut stopped_instance_ids = vec![];
let mut results = vec![];
let window = Duration::from_secs(60);
loop {
let job = emitter
.start_job(EmitJobRequest {
instances: instances.clone(),
accounts_per_client: 10,
thread_params: EmitThreadParams::default(),
})
.expect("Failed to start emit job");
thread::sleep(Duration::from_secs(30) + window);
match print_stat(&self.prometheus, window) {
Err(e) => info!("Failed to get stats: {:?}", e),
Ok((tps, lat)) => results.push((stop_effects.len(), tps, lat)),
}
emitter.stop_job(job);
if stop_effects.len() > max_stopped {
break;
}
let stop_validator = rng.gen_range(0, instances.len());
let stop_validator = instances.remove(stop_validator);
stopped_instance_ids.push(stop_validator.short_hash().clone());
let stop_effect = StopContainer::new(stop_validator);
info!(
"Stopped {} validators: {}",
stopped_instance_ids.len(),
stopped_instance_ids.join(",")
);
stop_effect.activate().expect("Failed to stop container");
stop_effects.push(stop_effect);
thread::sleep(Duration::from_secs(30));
}
println!("Results in csv format:");
println!("DOWN\tTPS\tLAT");
for (stopped, tps, lat) in results {
println!("{}\t{:.0}\t{:.0}", stopped, tps, lat * 1000.);
}
for stop_effect in stop_effects {
if let Err(e) = stop_effect.deactivate() {
info!("Failed to deactivate {}: {:?}", stop_effect, e);
}
}
}
fn run_stat_loop(&self) {
let window = Duration::from_secs(30);
thread::sleep(Duration::from_secs(30)); // warm up
loop {
thread::sleep(Duration::from_secs(10));
if let Err(err) = print_stat(&self.prometheus, window) {
info!("Stat error: {:?}", err);
}
}
}
}
fn print_stat(prometheus: &Prometheus, window: Duration) -> failure::Result<(f64, f64)> {
let step = 10;
let end = unix_timestamp_now();
let start = end - window;
let tps = prometheus.query_range(
"irate(consensus_gauge{op='last_committed_version'}[1m])".to_string(),
&start,
&end,
step,
)?;
let avg_tps = tps.avg().ok_or_else(|| format_err!("No tps data"))?;
let latency = prometheus.query_range(
"irate(mempool_duration_sum{op='e2e.latency'}[1m])/irate(mempool_duration_count{op='e2e.latency'}[1m])"
.to_string(),
&start,
&end,
step,
)?;
let avg_latency = latency
.avg()
.ok_or_else(|| format_err!("No latency data"))?;
info!(
"Tps: {:.0}, latency: {:.0} ms",
avg_tps,
avg_latency * 1000.
);
Ok((avg_tps, avg_latency))
}
impl ClusterTestRunner {
/// Discovers cluster, setup log, etc
pub fn setup(args: &Args) -> Self {
let util = ClusterUtil::setup(args);
let cluster = util.cluster;
let aws = util.aws;
let log_tail_started = Instant::now();
let logs = DebugPortLogThread::spawn_new(&cluster);
let log_tail_startup_time = Instant::now() - log_tail_started;
info!(
"Log tail thread started in {} ms",
log_tail_startup_time.as_millis()
);
let health_check_runner = HealthCheckRunner::new_all(cluster.clone());
let experiment_interval_sec = match env::var("EXPERIMENT_INTERVAL") {
Ok(s) => s.parse().expect("EXPERIMENT_INTERVAL env is not a number"),
Err(..) => 15,
};
let experiment_interval = Duration::from_secs(experiment_interval_sec);
let deployment_manager = DeploymentManager::new(aws.clone(), cluster.clone());
let slack = SlackClient::new();
let slack_log_url = env::var("SLACK_LOG_URL")
.map(|u| u.parse().expect("Failed to parse SLACK_LOG_URL"))
.ok();
let slack_changelog_url = env::var("SLACK_CHANGELOG_URL")
.map(|u| u.parse().expect("Failed to parse SLACK_CHANGELOG_URL"))
.ok();
let thread_pool_executor = ThreadPoolExecutor::new("ssh-pool".into());
let tx_emitter = TxEmitter::new(&cluster);
let prometheus = Prometheus::new(
cluster
.prometheus_ip()
.expect("Failed to discover prometheus ip in aws"),
);
let github = GitHub::new();
Self {
logs,
cluster,
health_check_runner,
deployment_manager,
experiment_interval,
slack,
thread_pool_executor,
slack_log_url,
slack_changelog_url,
tx_emitter,
prometheus,
github,
}
}
pub fn run_suite_in_loop(&mut self) {
self.cleanup();
let mut hash_to_tag = None;
loop {
if let Some(hash) = self.deployment_manager.latest_hash_changed() {
info!(
"New version of `{}` tag is available: `{}`",
SOURCE_TAG, hash
);
match self.redeploy(hash.clone()) {
Err(e) => {
self.report_failure(format!("Failed to deploy `{}`: {}", hash, e));
return;
}
Ok(true) => {
info!("Deployed new version `{}`, running test suite", hash);
hash_to_tag = Some(hash);
}
Ok(false) => {}
}
}
let suite = ExperimentSuite::new_pre_release(&self.cluster);
if let Err(e) = self.run_suite(suite) {
self.report_failure(format!("{}", e));
return;
}
if let Some(hash_to_tag) = hash_to_tag.take() {
info!("Test suite succeed first time for `{}`", hash_to_tag);
let prev_commit = self
.deployment_manager
.get_tested_upstream_commit()
.map_err(|e| warn!("Failed to get prev_commit: {:?}", e))
.ok();
let upstream_commit = match self
.deployment_manager
.tag_tested_image(hash_to_tag.clone())
{
Err(e) => {
self.report_failure(format!("Failed to tag tested image: {}", e));
return;
}
Ok(upstream_commit) => upstream_commit,
};
let perf_msg = match self.measure_performance() {
Ok(report) => format!(
"Performance report:\n```\n{}\n```",
report.to_slack_message()
),
Err(err) => {
warn!("No performance data: {}", err);
"No performance data".to_string()
}
};
info!(
"prev_commit: {:?}, upstream_commit: {}",
prev_commit, upstream_commit
);
let changelog = self.get_changelog(prev_commit.as_ref(), &upstream_commit);
self.slack_changelog_message(format!("{}\n\n{}", changelog, perf_msg));
}
thread::sleep(self.experiment_interval);
}
}
fn get_changelog(&self, prev_commit: Option<&String>, upstream_commit: &str) -> String {
let commits = self.github.get_commits("libra/libra", &upstream_commit);
match commits {
Err(e) => {
info!("Failed to get github commits: {:?}", e);
format!("*Revision upstream_{}*", upstream_commit)
}
Ok(commits) => {
let mut msg = format!("*Revision {}*", upstream_commit);
for commit in commits {
if let Some(prev_commit) = prev_commit {
if commit.sha.starts_with(prev_commit) {
break;
}
}
let commit_lines: Vec<_> = commit.commit.message.split('\n').collect();
let commit_head = commit_lines[0];
let short_sha = &commit.sha[..6];
let email_parts: Vec<_> = commit.commit.author.email.split('@').collect();
let author = email_parts[0];
let line = format!("\n>\u{2022} {} _{}_ {}", short_sha, author, commit_head);
msg.push_str(&line);
}
msg
}
}
}
fn report_failure(&self, msg: String) {
self.slack_message(msg);
}
fn redeploy(&mut self, hash: String) -> failure::Result<bool> {
if env::var("ALLOW_DEPLOY") != Ok("yes".to_string()) {
info!("Deploying is disabled. Run with ALLOW_DEPLOY=yes to enable deploy");
return Ok(false);
}
self.stop();
if env::var("WIPE_ON_DEPLOY") != Ok("no".to_string()) {
info!("Wiping validators");
self.wipe_all_db(false);
} else {
info!("WIPE_ON_DEPLOY is set to no, keeping database");
}
self.deployment_manager.redeploy(hash)?;
thread::sleep(Duration::from_secs(60));
self.logs.recv_all();
self.health_check_runner.clear();
self.tx_emitter.clear();
self.start();
info!("Waiting until all validators healthy after deployment");
self.wait_until_all_healthy()?;
Ok(true)
}
fn run_suite(&mut self, suite: ExperimentSuite) -> failure::Result<()> {
info!("Starting suite");
let suite_started = Instant::now();
for experiment in suite.experiments {
let experiment_name = format!("{}", experiment);
self.run_single_experiment(experiment).map_err(move |e| {
format_err!("Experiment `{}` failed: `{}`", experiment_name, e)
})?;
thread::sleep(self.experiment_interval);
}
info!(
"Suite completed in {:?}",
Instant::now().duration_since(suite_started)
);
Ok(())
}
pub fn perf_run(&mut self) {
let results = self.measure_performance().unwrap();
println!("{}", results.to_slack_message())
}
fn measure_performance(&mut self) -> failure::Result<SuiteReport> {
info!("Starting warm up job");
self.emit_txn_for(Duration::from_secs(60), self.cluster.instances().clone())?;
info!("Warm up done, measuring tps");
let window = Duration::from_secs(180);
self.emit_txn_for(
window + Duration::from_secs(30),
self.cluster.instances().clone(),
)?;
let stats_all_up = print_stat(&self.prometheus, window)
.map_err(|e| format_err!("Failed to query stats: {}", e))?;
let (stop, keep) = self.cluster.split_n_random(10);
let mut stop_effects: Vec<_> = stop
.into_instances()
.into_iter()
.map(StopContainer::new)
.collect();
self.activate_all(&mut stop_effects);
self.emit_txn_for(window + Duration::from_secs(30), keep.instances().clone())?;
let stats_10_down = print_stat(&self.prometheus, window)
.map_err(|e| format_err!("Failed to query stats: {}", e))?;
self.deactivate_all(&mut stop_effects);
self.wait_until_all_healthy()?;
Ok(SuiteReport {
stats_all_up,
stats_10_down,
})
}
fn emit_txn_for(
&mut self,
duration: Duration,
instances: Vec<Instance>,
) -> failure::Result<()> {
let job = self.tx_emitter.start_job(EmitJobRequest {
instances,
accounts_per_client: 10,
thread_params: EmitThreadParams::default(),
})?;
thread::sleep(duration);
self.tx_emitter.stop_job(job);
Ok(())
}
pub fn cleanup_and_run(&mut self, experiment: Box<dyn Experiment>) -> failure::Result<()> {
self.cleanup();
self.run_single_experiment(experiment)
}
pub fn run_single_experiment(
&mut self,
experiment: Box<dyn Experiment>,
) -> failure::Result<()> {
let events = self.logs.recv_all();
if let Err(s) = self.health_check_runner.run(&events, &HashSet::new(), true) {
bail!(
"Some validators are unhealthy before experiment started : {}",
s
);
}
info!(
"{}Starting experiment {}{}{}{}",
style::Bold,
color::Fg(color::Blue),
experiment,
color::Fg(color::Reset),
style::Reset
);
let affected_validators = experiment.affected_validators();
let (exp_result_sender, exp_result_recv) = mpsc::channel();
thread::spawn(move || {
let result = experiment.run();
exp_result_sender
.send(result)
.expect("Failed to send experiment result");
});
// We expect experiments completes and cluster go into healthy state within timeout
let experiment_deadline = Instant::now() + Duration::from_secs(10 * 60);
loop {
if Instant::now() > experiment_deadline {
bail!("Experiment did not complete in time");
}
let deadline = Instant::now() + HEALTH_POLL_INTERVAL;
// Receive all events that arrived to aws log tail within next 1 second
// This assumes so far that event propagation time is << 1s, this need to be refined
// in future to account for actual event propagation delay
let events = self.logs.recv_all_until_deadline(deadline);
if let Err(s) = self
.health_check_runner
.run(&events, &affected_validators, true)
{
bail!("Validators which were not under experiment failed : {}", s);
}
match exp_result_recv.try_recv() {
Ok(result) => {
result.expect("Failed to run experiment");
break;
}
Err(TryRecvError::Empty) => {
// Experiment in progress, continue monitoring health
}
Err(TryRecvError::Disconnected) => {
panic!("Experiment thread exited without returning result");
}
}
}
info!(
"{}Experiment finished, waiting until all affected validators recover{}",
style::Bold,
style::Reset
);
for validator in affected_validators.iter() {
self.health_check_runner.invalidate(validator);
}
loop {
if Instant::now() > experiment_deadline {
bail!("Cluster did not become healthy in time");
}
let deadline = Instant::now() + HEALTH_POLL_INTERVAL;
// Receive all events that arrived to aws log tail within next 1 second
// This assumes so far that event propagation time is << 1s, this need to be refined
// in future to account for actual event propagation delay
let events = self.logs.recv_all_until_deadline(deadline);
let unhealthy_validators;
match self
.health_check_runner
.run(&events, &affected_validators, true)
{
Err(s) => bail!("Validators which were not under experiment failed : {}", s),
Ok(r) => unhealthy_validators = r,
}
if unhealthy_validators.is_empty() {
break;
}
}
info!("Experiment completed");
Ok(())
}
fn run_health_check(&mut self) {
loop {
let deadline = Instant::now() + Duration::from_secs(1);
// Receive all events that arrived to aws log tail within next 1 second
// This assumes so far that event propagation time is << 1s, this need to be refined
// in future to account for actual event propagation delay
let events = self.logs.recv_all_until_deadline(deadline);
let _ignore = self
.health_check_runner
.run(&events, &HashSet::new(), false);
}
}
fn wait_until_all_healthy(&mut self) -> failure::Result<()> {
let wait_deadline = Instant::now() + Duration::from_secs(10 * 60);
for instance in self.cluster.instances() {
self.health_check_runner.invalidate(instance.short_hash());
}
loop {
let now = Instant::now();
if now > wait_deadline {
bail!("Validators did not become healthy after deployment");
}
let deadline = now + HEALTH_POLL_INTERVAL;
let events = self.logs.recv_all_until_deadline(deadline);
if let Ok(failed_instances) =
self.health_check_runner.run(&events, &HashSet::new(), true)
{
if failed_instances.is_empty() {
break;
}
}
}
Ok(())
}
fn tail_logs(self) {
for log in self.logs.event_receiver {
info!("{:?}", log);
}
}
fn slack_message(&self, msg: String) {
info!("{}", msg);
if let Some(ref log_url) = self.slack_log_url {
if let Err(e) = self.slack.send_message(log_url, &msg) {
info!("Failed to send slack message: {}", e);
}
}
}
fn slack_changelog_message(&self, msg: String) {
info!("{}", msg);
if let Some(ref changelog_url) = self.slack_changelog_url {
if let Err(e) = self.slack.send_message(changelog_url, &msg) {
info!("Failed to send slack message: {}", e);
}
}
}
fn wipe_all_db(&self, safety_wait: bool) {
info!("Going to wipe db on all validators in cluster!");
if safety_wait {
info!("Waiting 10 seconds before proceed");
thread::sleep(Duration::from_secs(10));
info!("Starting...");
}
let jobs = self
.cluster
.instances()
.iter()
.map(|instance| {
let instance = instance.clone();
move || {
if let Err(e) =
instance.run_cmd_tee_err(vec!["sudo", "rm", "-rf", "/data/libra/"])
{
info!("Failed to wipe {}: {:?}", instance, e);
}
}
})
.collect();
self.thread_pool_executor.execute_jobs(jobs);
info!("Done");
}
fn reboot(self) {
let mut reboots = vec![];
for instance in self.cluster.instances() {
info!("Rebooting {}", instance);
let reboot = Reboot::new(instance.clone());
if let Err(err) = reboot.apply() {
info!("Failed to reboot {}: {:?}", instance, err);
} else {
reboots.push(reboot);
}
}
info!("Waiting to complete");
while reboots.iter().any(|r| !r.is_complete()) {
thread::sleep(Duration::from_secs(5));
}
info!("Completed");
}
fn restart(&self) {
self.stop();
self.start();
info!("Completed");
}
fn cleanup(&self) {
let cleanup_all_instances: Vec<_> = self
.cluster
.instances()
.clone()
.into_iter()
.map(|instance| {
move || {
if let Err(e) = RemoveNetworkEffects::new(instance.clone()).apply() {
info!(
"Failed to remove network effects for {}. Error: {}",
instance, e
);
}
}
})
.collect();
self.thread_pool_executor
.execute_jobs(cleanup_all_instances);
}
pub fn stop(&self) {
self.activate_all(&mut self.make_stop_effects())
}
pub fn start(&self) {
self.deactivate_all(&mut self.make_stop_effects())
}
fn make_stop_effects(&self) -> Vec<StopContainer> {
self.cluster
.instances()
.clone()
.into_iter()
.map(StopContainer::new)
.collect()
}
fn activate_all<T: Effect>(&self, effects: &mut [T]) {
let jobs = effects
.iter_mut()
.map(|effect| {
move || {
if let Err(e) = effect.activate() {
info!("Failed to activate {}: {:?}", effect, e);
}
}
})
.collect();
self.thread_pool_executor.execute_jobs(jobs);
}
fn deactivate_all<T: Effect>(&self, effects: &mut [T]) {
let jobs = effects
.iter_mut()
.map(|effect| {
move || {
if let Err(e) = effect.deactivate() {
info!("Failed to deactivate {}: {:?}", effect, e);
}
}
})
.collect();
self.thread_pool_executor.execute_jobs(jobs);
}
}
struct SuiteReport {
stats_all_up: (f64, f64),
stats_10_down: (f64, f64),
}
impl SuiteReport {
pub fn to_slack_message(&self) -> String {
format!(
"all up: {:.0} TPS, {:.1} s latency\n10% down: {:.0} TPS, {:.1} s latency",
self.stats_all_up.0, self.stats_all_up.1, self.stats_10_down.0, self.stats_10_down.1,
)
}
}
| {
let aws = Aws::new(
args.workplace
.as_ref()
.expect("--workplace not set")
.clone(),
);
let cluster = Cluster::discover(&aws, &args.mint_file).expect("Failed to discover cluster");
let cluster = if args.peers.is_empty() {
cluster
} else {
cluster.sub_cluster(args.peers.clone())
};
let prometheus = Prometheus::new(
cluster
.prometheus_ip()
.expect("Failed to discover prometheus ip in aws"),
);
info!("Discovered {} peers", cluster.instances().len());
Self {
cluster,
aws,
prometheus,
}
} |
hhas_coeffects.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use hhbc_by_ref_hhbc_string_utils::strip_ns;
use naming_special_names_rust::{self as sn, coeffects as c};
use ocamlrep_derive::{FromOcamlRep, ToOcamlRep};
use oxidized::{
aast as a,
aast_defs::{Hint, Hint_},
ast_defs::Id,
};
use std::fmt;
#[derive(Debug, Clone, Copy, PartialEq, ToOcamlRep, FromOcamlRep)]
pub enum Ctx {
Defaults,
// Shared
WriteProps,
// Rx hierarchy
RxLocal,
RxShallow,
Rx,
// Policied hierarchy
PoliciedOfLocal,
PoliciedOfShallow,
PoliciedOf,
PoliciedLocal,
PoliciedShallow,
Policied,
ReadGlobals,
Globals,
// Pure
Pure,
}
impl fmt::Display for Ctx {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use Ctx::*;
match self {
Defaults => write!(f, "{}", c::DEFAULTS),
RxLocal => write!(f, "{}", c::RX_LOCAL),
RxShallow => write!(f, "{}", c::RX_SHALLOW),
Rx => write!(f, "{}", c::RX),
WriteProps => write!(f, "{}", c::WRITE_PROPS),
PoliciedOfLocal => write!(f, "{}", c::POLICIED_OF_LOCAL),
PoliciedOfShallow => write!(f, "{}", c::POLICIED_OF_SHALLOW),
PoliciedOf => write!(f, "{}", c::POLICIED_OF),
PoliciedLocal => write!(f, "{}", c::POLICIED_LOCAL),
PoliciedShallow => write!(f, "{}", c::POLICIED_SHALLOW),
Policied => write!(f, "{}", c::POLICIED),
Pure => write!(f, "{}", c::PURE),
ReadGlobals => write!(f, "{}", c::READ_GLOBALS),
Globals => write!(f, "{}", c::GLOBALS),
}
}
}
#[derive(Debug)]
pub struct HhasCtxConstant {
pub name: String,
pub coeffects: Vec<Ctx>,
pub is_abstract: bool,
}
#[derive(Clone, Debug, Default, ToOcamlRep, FromOcamlRep)]
pub struct HhasCoeffects {
static_coeffects: Vec<Ctx>,
unenforced_static_coeffects: Vec<String>,
fun_param: Vec<usize>,
cc_param: Vec<(usize, String)>,
cc_this: Vec<Vec<String>>,
is_any_rx: bool,
is_pure: bool,
closure_parent_scope: bool,
generator_this: bool,
caller: bool,
}
impl HhasCoeffects {
pub fn vec_to_string<T, F: Fn(&T) -> String>(v: &[T], f: F) -> Option<String> {
if v.is_empty() {
return None;
}
Some(v.iter().map(|x| f(x)).collect::<Vec<String>>().join(" "))
}
pub fn coeffects_to_hhas(coeffects: &Self) -> Vec<String> {
let mut results = vec![];
let static_coeffect =
HhasCoeffects::vec_to_string(coeffects.get_static_coeffects(), |c| c.to_string());
let unenforced_static_coeffects =
HhasCoeffects::vec_to_string(coeffects.get_unenforced_static_coeffects(), |c| {
c.to_string()
});
match (static_coeffect, unenforced_static_coeffects) {
(None, None) => {}
(Some(s), None) | (None, Some(s)) => results.push(format!(".coeffects_static {};", s)),
(Some(s1), Some(s2)) => results.push(format!(".coeffects_static {} {};", s1, s2)),
};
if let Some(str) =
HhasCoeffects::vec_to_string(coeffects.get_fun_param(), |c| c.to_string())
{
results.push(format!(".coeffects_fun_param {};", str));
}
if let Some(str) =
HhasCoeffects::vec_to_string(coeffects.get_cc_param(), |c| format!("{} {}", c.0, c.1))
{
results.push(format!(".coeffects_cc_param {};", str));
}
for v in coeffects.get_cc_this() {
match HhasCoeffects::vec_to_string(v.as_slice(), |c| c.to_string()) {
Some(str) => results.push(format!(".coeffects_cc_this {};", str)),
None => panic!("Not possible"),
}
}
if coeffects.is_closure_parent_scope() {
results.push(".coeffects_closure_parent_scope;".to_string());
}
if coeffects.generator_this() {
results.push(".coeffects_generator_this;".to_string());
}
if coeffects.caller() {
results.push(".coeffects_caller;".to_string());
}
results
}
fn from_type_static(hint: &Hint) -> Option<Ctx> {
let Hint(_, h) = hint;
match &**h {
Hint_::Happly(Id(_, id), _) => match strip_ns(id.as_str()) {
c::DEFAULTS => Some(Ctx::Defaults),
c::RX_LOCAL => Some(Ctx::RxLocal),
c::RX_SHALLOW => Some(Ctx::RxShallow),
c::RX => Some(Ctx::Rx),
c::WRITE_PROPS => Some(Ctx::WriteProps),
c::POLICIED_OF_LOCAL => Some(Ctx::PoliciedOfLocal),
c::POLICIED_OF_SHALLOW => Some(Ctx::PoliciedOfShallow),
c::POLICIED_OF => Some(Ctx::PoliciedOf),
c::POLICIED_LOCAL => Some(Ctx::PoliciedLocal),
c::POLICIED_SHALLOW => Some(Ctx::PoliciedShallow),
c::POLICIED => Some(Ctx::Policied),
_ => None,
},
_ => None,
}
}
pub fn local_to_shallow(coeffects: &[Ctx]) -> Vec<Ctx> {
use Ctx::*;
let mut result = vec![];
for c in coeffects.iter() {
result.push(match c {
RxLocal => RxShallow,
PoliciedOfLocal => PoliciedOfShallow,
PoliciedLocal => PoliciedShallow,
_ => *c,
})
}
result
}
pub fn from_ctx_constant(hint: &Hint) -> Vec<Ctx> {
let Hint(_, h) = hint;
match &**h {
Hint_::Hintersection(hl) if hl.is_empty() => vec![Ctx::Pure],
Hint_::Hintersection(hl) => {
let mut result = vec![];
for h in hl {
if let Some(c) = HhasCoeffects::from_type_static(h) {
result.push(c);
}
}
result
}
_ => vec![],
}
}
pub fn from_ast<Ex, Fb, En, Hi>(
ctxs_opt: &Option<a::Contexts>,
params: impl AsRef<[a::FunParam<Ex, Fb, En, Hi>]>,
) -> Self {
let mut static_coeffects = vec![];
let mut unenforced_static_coeffects = vec![];
let mut fun_param = vec![];
let mut cc_param = vec![];
let mut cc_this = vec![];
let mut is_any_rx = false;
let mut is_pure = false;
let get_arg_pos = |name: &String| -> usize {
if let Some(pos) = params.as_ref().iter().position(|x| x.name == *name) {
pos
} else {
panic!("Invalid context");
}
};
// From coeffect syntax
if let Some(ctxs) = ctxs_opt {
if ctxs.1.is_empty() {
is_pure = true;
static_coeffects.push(Ctx::Pure);
}
for ctx in &ctxs.1 {
let Hint(_, h) = ctx;
match &**h {
Hint_::Happly(Id(_, id), _) => {
if let Some(c) = HhasCoeffects::from_type_static(ctx) {
static_coeffects.push(c)
} else {
unenforced_static_coeffects.push(strip_ns(id.as_str()).to_string());
}
if let c::RX_LOCAL | c::RX_SHALLOW | c::RX = strip_ns(id.as_str()) {
is_any_rx = true;
}
}
Hint_::HfunContext(name) => fun_param.push(get_arg_pos(name)),
Hint_::Haccess(Hint(_, hint), sids) => match &**hint {
Hint_::Happly(Id(_, id), _)
if strip_ns(id.as_str()) == sn::typehints::THIS && !sids.is_empty() =>
{
cc_this.push(sids.into_iter().map(|Id(_, id)| id.clone()).collect());
}
Hint_::Hvar(name) if sids.len() == 1 => {
let pos = get_arg_pos(name);
let Id(_, sid_name) = &sids[0];
cc_param.push((pos, sid_name.clone()));
}
_ => {}
},
_ => {}
}
}
}
// If there are no static coeffects but there are coeffect rules, then
// the static coeffects are pure
if static_coeffects.is_empty()
&& (!fun_param.is_empty() || !cc_param.is_empty() || !cc_this.is_empty())
{
static_coeffects.push(Ctx::Pure);
}
Self {
static_coeffects,
unenforced_static_coeffects,
fun_param,
cc_param,
cc_this,
is_any_rx,
is_pure,
..HhasCoeffects::default()
}
}
pub fn inherit_to_child_closure(&self) -> Self {
let static_coeffects = HhasCoeffects::local_to_shallow(self.get_static_coeffects());
if self.has_coeffect_rules() {
Self {
static_coeffects,
closure_parent_scope: true,
..HhasCoeffects::default()
}
} else {
Self {
static_coeffects,
..self.clone()
}
}
}
pub fn with_gen_coeffect(&self) -> Self {
Self {
generator_this: true,
..self.clone()
}
}
pub fn with_caller(&self) -> Self {
Self {
caller: true,
..self.clone()
}
}
pub fn get_static_coeffects(&self) -> &[Ctx] {
self.static_coeffects.as_slice()
}
pub fn get_unenforced_static_coeffects(&self) -> &[String] {
self.unenforced_static_coeffects.as_slice()
}
pub fn get_fun_param(&self) -> &[usize] {
self.fun_param.as_slice()
}
pub fn get_cc_param(&self) -> &[(usize, String)] {
self.cc_param.as_slice()
}
pub fn get_cc_this(&self) -> &[Vec<String>] {
self.cc_this.as_slice()
}
pub fn is_any_rx(&self) -> bool {
self.is_any_rx
}
pub fn is_any_rx_or_pure(&self) -> bool |
pub fn generator_this(&self) -> bool {
self.generator_this
}
pub fn caller(&self) -> bool {
self.caller
}
fn has_coeffect_rules(&self) -> bool {
!self.fun_param.is_empty()
|| !self.cc_param.is_empty()
|| !self.cc_this.is_empty()
|| self.closure_parent_scope
|| self.generator_this
|| self.caller
}
pub fn has_coeffects_local(&self) -> bool {
self.has_coeffect_rules() && !self.generator_this()
}
pub fn is_closure_parent_scope(&self) -> bool {
self.closure_parent_scope
}
}
pub fn halves_of_is_enabled_body<Ex, Fb, En, Hi>(
body: &a::FuncBody<Ex, Fb, En, Hi>,
) -> Option<(&a::Block<Ex, Fb, En, Hi>, &a::Block<Ex, Fb, En, Hi>)> {
use a::*;
if let [Stmt(_, Stmt_::If(if_))] = body.ast.as_slice() {
if let (Expr(_, Expr_::Id(sid)), enabled, disabled) = &**if_ {
let Id(_, name) = &**sid;
return if name != sn::rx::IS_ENABLED {
None
} else {
match disabled.as_slice() {
[] | [Stmt(_, Stmt_::Noop)] => None,
_ => Some((enabled, disabled)),
}
};
}
}
None
}
| {
self.is_any_rx() || self.is_pure
} |
server-errors.interceptor.ts | import { Injectable } from '@angular/core'; | HttpRequest,
HttpHandler,
HttpEvent,
HttpInterceptor,
HttpErrorResponse,
} from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import 'rxjs/add/operator/retry'
import { ErrorsService } from '../@services/error.service';
@Injectable()
export class ServerErrorsInterceptor implements HttpInterceptor {
constructor(private errorsService: ErrorsService) { }
intercept(request: HttpRequest<any>, next: HttpHandler): Observable<HttpEvent<any>> {
return next.handle(request).retry(0).do((event: HttpEvent<any>) => {}, (err: any) => {
this.errorsService.handleError(err)
});
}
} |
import { |
test_api.py | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Base TestCase for proposal api."""
import ddt
from ggrc.models import all_models
from integration.ggrc import TestCase, generator
from integration.ggrc.models import factories
from integration.ggrc.api_helper import Api
from integration.ggrc.review import build_reviewer_acl
@ddt.ddt
class TestReviewApi(TestCase):
"""Base TestCase class proposal api tests."""
def setUp(self):
super(TestReviewApi, self).setUp()
self.api = Api()
self.api.client.get("/login")
self.generator = generator.ObjectGenerator()
def test_simple_get(self):
"""Test simple get"""
with factories.single_commit():
program = factories.ProgramFactory()
review = factories.ReviewFactory(
email_message="test email message",
notification_type="email",
reviewable=program,
status=all_models.Review.STATES.UNREVIEWED,
)
resp = self.api.get(all_models.Review, review.id)
self.assert200(resp)
self.assertIn("review", resp.json)
resp_review = resp.json["review"]
self.assertEqual(all_models.Review.STATES.UNREVIEWED,
resp_review["status"])
self.assertEqual(all_models.Review.NotificationTypes.EMAIL_TYPE,
resp_review["notification_type"])
self.assertEqual("test email message",
resp_review["email_message"])
def test_collection_get(self):
"""Test simple collection get"""
with factories.single_commit():
review1 = factories.ReviewFactory(
status=all_models.Review.STATES.UNREVIEWED
)
review2 = factories.ReviewFactory(
status=all_models.Review.STATES.REVIEWED
)
resp = self.api.get_collection(all_models.Review,
[review1.id, review2.id])
self.assert200(resp)
self.assertIn("reviews_collection", resp.json)
self.assertIn("reviews", resp.json["reviews_collection"])
self.assertEquals(2, len(resp.json["reviews_collection"]["reviews"]))
def test_create_review(self):
"""Create review via API, check that single relationship is created"""
program = factories.ProgramFactory()
program_id = program.id
resp = self.api.post(
all_models.Review,
{
"review": {
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"notification_type": "email",
"status": all_models.Review.STATES.UNREVIEWED, | self.assertEqual(201, resp.status_code)
review_id = resp.json["review"]["id"]
review = all_models.Review.query.get(review_id)
self.assertEqual(all_models.Review.STATES.UNREVIEWED, review.status)
self.assertEqual(program.type, review.reviewable_type)
self.assertEqual(program_id, review.reviewable_id)
control_review_rel_count = all_models.Relationship.query.filter(
all_models.Relationship.source_id == review.id,
all_models.Relationship.source_type == review.type,
all_models.Relationship.destination_id == program_id,
all_models.Relationship.destination_type == program.type,
).union(
all_models.Relationship.query.filter(
all_models.Relationship.destination_id == review.id,
all_models.Relationship.destination_type == review.type,
all_models.Relationship.source_id == program_id,
all_models.Relationship.source_type == program.type,
)
).count()
self.assertEqual(1, control_review_rel_count)
def test_delete_review(self):
"""Test delete review via API"""
with factories.single_commit():
program = factories.ProgramFactory()
program_id = program.id
review = factories.ReviewFactory(reviewable=program)
review_id = review.id
resp = self.api.delete(review)
self.assert200(resp)
review = all_models.Review.query.get(review_id)
program = all_models.Program.query.get(program_id)
self.assertIsNone(review)
self.assertEquals(0, len(program.related_objects(_types=["Review"])))
def test_last_reviewed(self):
"""last_reviewed_by, last_reviewed_by should be set if reviewed"""
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
review_id = review.id
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
self.assertIsNotNone(resp.json["review"]["last_reviewed_by"])
self.assertIsNotNone(resp.json["review"]["last_reviewed_at"])
review = all_models.Review.query.get(review_id)
self.assertIsNotNone(review.last_reviewed_by)
self.assertIsNotNone(review.last_reviewed_at)
def test_reviewable_revisions(self):
"""Check that proper revisions are created"""
program = factories.ProgramFactory()
resp, review = self.generator.generate_object(
all_models.Review,
{
"reviewable": {
"type": program.type,
"id": program.id,
},
"context": None,
"status": all_models.Review.STATES.UNREVIEWED,
"access_control_list": build_reviewer_acl(),
"notification_type": all_models.Review.NotificationTypes.EMAIL_TYPE
},
)
program_id = program.id
reviewable = review.reviewable
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(2, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[0].content["review_status"])
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[1].content["review_status"])
resp = self.api.put(
review,
{
"status": all_models.Review.STATES.REVIEWED,
},
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(3, len(program_revisions))
self.assertEquals(all_models.Review.STATES.REVIEWED,
program_revisions[2].content["review_status"])
resp = self.api.put(
reviewable,
{
"description": "some new description"
}
)
self.assert200(resp)
program_revisions = all_models.Revision.query.filter_by(
resource_id=program_id,
resource_type=program.type
).order_by(
all_models.Revision.id,
).all()
self.assertEquals(4, len(program_revisions))
self.assertEquals(all_models.Review.STATES.UNREVIEWED,
program_revisions[3].content["review_status"]) | "access_control_list": build_reviewer_acl()
},
},
) |
model_skopes_rules.py | """Skopes rules """
import uuid
import os
import datatable as dt
import numpy as np
from h2oaicore.models import CustomModel
from sklearn.preprocessing import LabelEncoder
from h2oaicore.systemutils import physical_cores_count
from h2oaicore.systemutils import user_dir, remove, config
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning, loggerdebug
class SKOPE_RULES(CustomModel):
_regression = False
_binary = True
_multiclass = False
_display_name = "SKOPE RULES"
_description = "SKOPE RULES"
# using git master because pypi is very out of date (Jan 2020) but need Sept 1-ish master with fix for updated scikit-learn
_modules_needed_by_name = ['git+https://github.com/scikit-learn-contrib/skope-rules.git']
@staticmethod
def do_acceptance_test():
return True
def set_default_params(self, accuracy=None, time_tolerance=None,
interpretability=None, **kwargs):
# Fill up parameters we care about
self.params = dict(random_state=kwargs.get("random_state", 1234),
max_depth_duplication=None, n_estimators=10,
precision_min=0.5, recall_min=0.01, max_samples=0.8,
max_samples_features=1.0, max_depth=3,
max_features="auto", min_samples_split=2,
bootstrap=False, bootstrap_features=False)
def | (self, accuracy=10, **kwargs):
if accuracy > 8:
max_depth_duplication = [None, 2, 3]
n_estimators = [10, 20, 40]
precision_min = [0.1, 0.2, 0.3]
recall_min = [0.01, 0.05]
max_samples = [0.5, 0.8, 1.0]
max_samples_features = [0.5, 0.8, 1.0]
max_depth = [3, 4, 5]
max_features = ["sqrt", "log2", "auto"]
min_samples_split = [2, 11, 21]
bootstrap = [True, False]
bootstrap_features = [True, False]
elif accuracy >= 5:
max_depth_duplication = [None]
n_estimators = [10, 20]
precision_min = [0.1, 0.2, 0.3]
recall_min = [0.01]
max_samples = [0.8, 1.0]
max_samples_features = [1.0]
max_depth = [3, 4]
max_features = ["sqrt", "log2", "auto"]
min_samples_split = [2, 5, 11]
bootstrap = [True, False]
bootstrap_features = [True, False]
else:
max_depth_duplication = [None]
n_estimators = [10]
precision_min = [0.1, 0.2]
recall_min = [0.01]
max_samples = [0.8, 1.0]
max_samples_features = [0.8, 1.0]
max_depth = [3, 4]
max_features = ["auto"]
min_samples_split = [2]
bootstrap = [True, False]
bootstrap_features = [True, False]
self.params["max_depth_duplication"] = np.random.choice(max_depth_duplication)
self.params["n_estimators"] = np.random.choice(n_estimators)
self.params["precision_min"] = np.random.choice(precision_min)
self.params["recall_min"] = np.random.choice(recall_min)
self.params["max_samples"] = np.random.choice(max_samples)
self.params["max_samples_features"] = np.random.choice(max_samples_features)
self.params["max_depth"] = np.random.choice(max_depth)
self.params["max_features"] = np.random.choice(max_features)
self.params["min_samples_split"] = np.random.choice(min_samples_split)
self.params["bootstrap"] = np.random.choice(bootstrap)
self.params["bootstrap_features"] = np.random.choice(bootstrap_features)
def _create_tmp_folder(self, logger):
# Create a temp folder to store files
# Set the default value without context available (required to pass acceptance test)
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
# Make a real tmp folder when experiment is available
if self.context and self.context.experiment_id:
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_SKOPE_model_folder" % uuid.uuid4())
# Now let's try to create that folder
try:
os.mkdir(tmp_folder)
except PermissionError:
# This not occur so log a warning
loggerwarning(logger, "SKOPE was denied temp folder creation rights")
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except FileExistsError:
# We should never be here since temp dir name is expected to be unique
loggerwarning(logger, "SKOPE temp folder already exists")
tmp_folder = os.path.join(self.context.experiment_tmp_dir, "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
except:
# Revert to temporary file path
tmp_folder = os.path.join(user_dir(), "%s_SKOPE_model_folder" % uuid.uuid4())
os.mkdir(tmp_folder)
loggerinfo(logger, "SKOPE temp folder {}".format(tmp_folder))
return tmp_folder
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
orig_cols = list(X.names)
import pandas as pd
import numpy as np
from skrules import SkopeRules
from sklearn.preprocessing import OneHotEncoder
from collections import Counter
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir)
# Set up temp folder
tmp_folder = self._create_tmp_folder(logger)
# Set up model
if self.num_classes >= 2:
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
model = SkopeRules(max_depth_duplication=self.params["max_depth_duplication"],
n_estimators=self.params["n_estimators"],
precision_min=self.params["precision_min"],
recall_min=self.params["recall_min"],
max_samples=self.params["max_samples"],
max_samples_features=self.params["max_samples_features"],
max_depth=self.params["max_depth"],
max_features=self.params["max_features"],
min_samples_split=self.params["min_samples_split"],
bootstrap=self.params["bootstrap"],
bootstrap_features=self.params["bootstrap_features"],
random_state=self.params["random_state"],
feature_names=orig_cols)
else:
# Skopes doesn't work for regression
loggerinfo(logger, "PASS, no skopes model")
pass
# Find the datatypes
X = X.to_pandas()
X.columns = orig_cols
# Change continuous features to categorical
X_datatypes = [str(item) for item in list(X.dtypes)]
# Change all float32 values to float64
for ii in range(len(X_datatypes)):
if X_datatypes[ii] == 'float32':
X = X.astype({orig_cols[ii]: np.float64})
X_datatypes = [str(item) for item in list(X.dtypes)]
# List the categorical and numerical features
self.X_categorical = [orig_cols[col_count] for col_count in range(len(orig_cols)) if
(X_datatypes[col_count] == 'category') or (X_datatypes[col_count] == 'object')]
self.X_numeric = [item for item in orig_cols if item not in self.X_categorical]
# Find the levels and mode for each categorical feature
# for use in the test set
self.train_levels = {}
for item in self.X_categorical:
self.train_levels[item] = list(set(X[item]))
self.train_mode[item] = Counter(X[item]).most_common(1)[0][0]
# One hot encode the categorical features
# And replace missing values with a Missing category
if len(self.X_categorical) > 0:
loggerinfo(logger, "PCategorical encode")
for colname in self.X_categorical:
X[colname] = list(X[colname].fillna("Missing"))
self.enc = OneHotEncoder(handle_unknown='ignore')
self.enc.fit(X[self.X_categorical])
self.encoded_categories = list(self.enc.get_feature_names(input_features=self.X_categorical))
X_enc = self.enc.transform(X[self.X_categorical]).toarray()
X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)
# Replace missing values with a missing value code
if len(self.X_numeric) > 0:
for colname in self.X_numeric:
X[colname] = list(X[colname].fillna(-999))
model.fit(np.array(X), np.array(y))
# Find the rule list
self.rule_list = model.rules_
# Calculate feature importances
var_imp = []
for var in orig_cols:
var_imp.append(sum(int(var in item[0]) for item in self.rule_list))
if max(var_imp) != 0:
importances = list(np.array(var_imp) / max(var_imp))
else:
importances = [1] * len(var_imp)
pd.DataFrame(model.rules_, columns=['Rule', '(Precision, Recall, nb)']).to_csv(
os.path.join(tmp_folder, 'Skope_rules.csv'), index=False)
self.mean_target = np.array(sum(y) / len(y))
# Set model properties
self.set_model_properties(model=model,
features=list(X.columns),
importances=importances,
iterations=self.params['n_estimators'])
def predict(self, X, **kwargs):
orig_cols = list(X.names)
import pandas as pd
X = dt.Frame(X)
# Find datatypes
X = X.to_pandas()
X_datatypes = [str(item) for item in list(X.dtypes)]
# Change float 32 values to float 64
for ii in range(len(X_datatypes)):
if X_datatypes[ii] == 'float32':
X = X.astype({orig_cols[ii]: np.float64})
# Replace missing values with a missing category
# Replace categories that weren't in the training set with the mode
if len(self.X_categorical) > 0:
for colname in self.X_categorical:
X[colname] = list(X[colname].fillna("Missing"))
for label in self.X_categorical:
# Replace anything not in the test set
train_categories = self.train_levels[label]
X_label = np.array(X[label])
mmode = self.train_mode[label]
X_label[~np.isin(X_label, train_categories)] = mmode
X[label] = X_label
# Replace missing values with a missing value code
if len(self.X_numeric) > 0:
for colname in self.X_numeric:
X[colname] = list(X[colname].fillna(-999))
# Get model
model, _, _, _ = self.get_model_properties()
# One hot encode categorical features
if len(self.X_categorical) > 0:
X_enc = self.enc.transform(X[self.X_categorical]).toarray()
X = pd.concat([X[self.X_numeric], pd.DataFrame(X_enc, columns=self.encoded_categories)], axis=1)
# Make predictions on the test set
preds = model.score_top_rules(X) / len(self.rule_list)
preds = np.array(preds)
epsilon = 10 ** (-3)
preds[np.isnan(preds)] = self.mean_target
preds[preds > 1 - epsilon] = 1.0 - epsilon
preds[preds < 0 + epsilon] = 0.0 + epsilon
return preds
| mutate_params |
util.py | #!/usr/bin/env python3
from pathlib import Path
def get_file(fname): | return Path(__file__).resolve().parent / fname |
|
kerasify.py | import numpy as np
import struct
LAYER_DENSE = 1
LAYER_CONVOLUTION2D = 2
LAYER_FLATTEN = 3
LAYER_ELU = 4
LAYER_ACTIVATION = 5
LAYER_MAXPOOLING2D = 6
LAYER_LSTM = 7
LAYER_EMBEDDING = 8
ACTIVATION_LINEAR = 1
ACTIVATION_RELU = 2
ACTIVATION_SOFTPLUS = 3
ACTIVATION_SIGMOID = 4
ACTIVATION_TANH = 5
ACTIVATION_HARD_SIGMOID = 6
def write_floats(file, floats):
'''
Writes floats to file in 1024 chunks.. prevents memory explosion
writing very large arrays to disk when calling struct.pack().
'''
step = 1024
written = 0
for i in np.arange(0, len(floats), step):
remaining = min(len(floats) - i, step)
written += remaining
file.write(struct.pack('=%sf' % remaining, *floats[i:i+remaining]))
assert written == len(floats)
def export_model(model, filename):
with open(filename, 'wb') as f:
def write_activation(activation):
if activation == 'linear':
f.write(struct.pack('I', ACTIVATION_LINEAR))
elif activation == 'relu':
f.write(struct.pack('I', ACTIVATION_RELU))
elif activation == 'softplus':
f.write(struct.pack('I', ACTIVATION_SOFTPLUS))
elif activation == 'tanh':
f.write(struct.pack('I', ACTIVATION_TANH))
elif activation == 'sigmoid':
f.write(struct.pack('I', ACTIVATION_SIGMOID))
elif activation == 'hard_sigmoid':
f.write(struct.pack('I', ACTIVATION_HARD_SIGMOID))
else:
assert False, "Unsupported activation type: %s" % activation
model_layers = [l for l in model.layers if type(l).__name__ not in ['Dropout']]
num_layers = len(model_layers)
f.write(struct.pack('I', num_layers))
for layer in model_layers:
layer_type = type(layer).__name__
if layer_type == 'Dense':
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_DENSE))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Convolution2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
activation = layer.get_config()['activation']
# The kernel is accessed in reverse order. To simplify the C side we'll
# flip the weight matrix for each kernel.
weights = weights[:,:,::-1,::-1]
f.write(struct.pack('I', LAYER_CONVOLUTION2D))
f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
f.write(struct.pack('I', weights.shape[2]))
f.write(struct.pack('I', weights.shape[3]))
f.write(struct.pack('I', biases.shape[0]))
weights = weights.flatten()
biases = biases.flatten()
write_floats(f, weights)
write_floats(f, biases)
write_activation(activation)
elif layer_type == 'Flatten':
f.write(struct.pack('I', LAYER_FLATTEN))
elif layer_type == 'ELU':
f.write(struct.pack('I', LAYER_ELU))
f.write(struct.pack('f', layer.alpha))
elif layer_type == 'Activation':
activation = layer.get_config()['activation']
f.write(struct.pack('I', LAYER_ACTIVATION))
write_activation(activation)
elif layer_type == 'MaxPooling2D':
assert layer.border_mode == 'valid', "Only border_mode=valid is implemented"
pool_size = layer.get_config()['pool_size']
f.write(struct.pack('I', LAYER_MAXPOOLING2D))
f.write(struct.pack('I', pool_size[0]))
f.write(struct.pack('I', pool_size[1]))
elif layer_type == 'LSTM':
inner_activation = layer.get_config()['inner_activation']
activation = layer.get_config()['activation']
return_sequences = int(layer.get_config()['return_sequences'])
weights = layer.get_weights()
W_i = weights[0]
U_i = weights[1]
b_i = weights[2]
W_c = weights[3]
U_c = weights[4]
b_c = weights[5]
W_f = weights[6]
U_f = weights[7]
b_f = weights[8]
W_o = weights[9]
U_o = weights[10]
b_o = weights[11]
f.write(struct.pack('I', LAYER_LSTM))
f.write(struct.pack('I', W_i.shape[0]))
f.write(struct.pack('I', W_i.shape[1]))
f.write(struct.pack('I', U_i.shape[0]))
f.write(struct.pack('I', U_i.shape[1]))
f.write(struct.pack('I', b_i.shape[0]))
f.write(struct.pack('I', W_f.shape[0]))
f.write(struct.pack('I', W_f.shape[1]))
f.write(struct.pack('I', U_f.shape[0]))
f.write(struct.pack('I', U_f.shape[1]))
f.write(struct.pack('I', b_f.shape[0]))
f.write(struct.pack('I', W_c.shape[0]))
f.write(struct.pack('I', W_c.shape[1]))
f.write(struct.pack('I', U_c.shape[0]))
f.write(struct.pack('I', U_c.shape[1]))
f.write(struct.pack('I', b_c.shape[0]))
f.write(struct.pack('I', W_o.shape[0]))
f.write(struct.pack('I', W_o.shape[1]))
f.write(struct.pack('I', U_o.shape[0]))
f.write(struct.pack('I', U_o.shape[1]))
f.write(struct.pack('I', b_o.shape[0]))
W_i = W_i.flatten()
U_i = U_i.flatten()
b_i = b_i.flatten()
W_f = W_f.flatten()
U_f = U_f.flatten()
b_f = b_f.flatten()
W_c = W_c.flatten()
U_c = U_c.flatten()
b_c = b_c.flatten()
W_o = W_o.flatten()
U_o = U_o.flatten()
b_o = b_o.flatten()
write_floats(f, W_i)
write_floats(f, U_i)
write_floats(f, b_i)
write_floats(f, W_f)
write_floats(f, U_f)
write_floats(f, b_f)
write_floats(f, W_c)
write_floats(f, U_c)
write_floats(f, b_c)
write_floats(f, W_o)
write_floats(f, U_o)
write_floats(f, b_o)
write_activation(inner_activation)
write_activation(activation)
f.write(struct.pack('I', return_sequences))
elif layer_type == 'Embedding':
weights = layer.get_weights()[0]
f.write(struct.pack('I', LAYER_EMBEDDING)) | f.write(struct.pack('I', weights.shape[0]))
f.write(struct.pack('I', weights.shape[1]))
weights = weights.flatten()
write_floats(f, weights)
else:
assert False, "Unsupported layer type: %s" % layer_type | |
tool.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package history
import (
"github.com/apache/servicecomb-kie/pkg/model"
)
//clearRevisionKV clean attr which don't need to return to client side
func clearRevisionKV(revision *model.LabelRevisionDoc) | {
for _, v := range revision.KVs {
v.Domain = ""
v.Project = ""
v.LabelID = ""
}
} |
|
api.go | // Copyright 2022 Tigris Data, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1
import (
"context"
"net/http"
"github.com/fullstorydev/grpchan/inprocgrpc"
"github.com/go-chi/chi/v5"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
jsoniter "github.com/json-iterator/go"
"github.com/rs/zerolog/log"
api "github.com/tigrisdata/tigris/api/server/v1"
"github.com/tigrisdata/tigris/cdc"
"github.com/tigrisdata/tigris/server/metadata"
"github.com/tigrisdata/tigris/server/transaction"
"github.com/tigrisdata/tigris/store/kv"
"github.com/tigrisdata/tigris/util"
ulog "github.com/tigrisdata/tigris/util/log"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
const (
databasePath = "/databases"
databasePathPattern = databasePath + "/*"
collectionPath = databasePath + "/collections"
collectionPathPattern = collectionPath + "/*"
documentPath = collectionPath + "/documents"
documentPathPattern = documentPath + "/*"
infoPath = "/info"
)
type apiService struct {
api.UnimplementedTigrisServer
kvStore kv.KeyValueStore
txMgr *transaction.Manager
encoder metadata.Encoder
tenantMgr *metadata.TenantManager
cdcMgr *cdc.Manager
queryLifecycleFactory *QueryLifecycleFactory
queryRunnerFactory *QueryRunnerFactory
}
func newApiService(kv kv.KeyValueStore) *apiService |
func (s *apiService) RegisterHTTP(router chi.Router, inproc *inprocgrpc.Channel) error {
mux := runtime.NewServeMux(runtime.WithMarshalerOption(runtime.MIMEWildcard, &api.CustomMarshaler{
JSONBuiltin: &runtime.JSONBuiltin{},
}))
if err := api.RegisterTigrisHandlerClient(context.TODO(), mux, api.NewTigrisClient(inproc)); err != nil {
return err
}
api.RegisterTigrisServer(inproc, s)
router.HandleFunc(apiPathPrefix+databasePathPattern, func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r)
})
router.HandleFunc(apiPathPrefix+collectionPathPattern, func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r)
})
router.HandleFunc(apiPathPrefix+documentPathPattern, func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r)
})
router.HandleFunc(apiPathPrefix+infoPath, func(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r)
})
return nil
}
func (s *apiService) RegisterGRPC(grpc *grpc.Server) error {
api.RegisterTigrisServer(grpc, s)
return nil
}
func (s *apiService) BeginTransaction(ctx context.Context, r *api.BeginTransactionRequest) (*api.BeginTransactionResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
_, txCtx, err := s.txMgr.StartTx(ctx, true)
if err != nil {
return nil, err
}
return &api.BeginTransactionResponse{
TxCtx: txCtx,
}, nil
}
func (s *apiService) CommitTransaction(ctx context.Context, r *api.CommitTransactionRequest) (*api.CommitTransactionResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
tx, err := s.txMgr.GetTx(r.TxCtx)
if err != nil {
return nil, err
}
if err = tx.Commit(ctx); err != nil {
return nil, err
}
_ = tx.Context().ExecuteCB()
return &api.CommitTransactionResponse{}, nil
}
func (s *apiService) RollbackTransaction(ctx context.Context, r *api.RollbackTransactionRequest) (*api.RollbackTransactionResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
tx, err := s.txMgr.GetTx(r.TxCtx)
if err != nil {
return nil, err
}
if err = tx.Rollback(ctx); err != nil {
// ToDo: Do we need to return here in this case? Or silently return success?
return nil, err
}
return &api.RollbackTransactionResponse{}, nil
}
// Insert new object returns an error if object already exists
// Operations done individually not in actual batch
func (s *apiService) Insert(ctx context.Context, r *api.InsertRequest) (*api.InsertResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
resp, err := s.Run(ctx, &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: s.queryRunnerFactory.GetInsertQueryRunner(r),
})
if err != nil {
return nil, err
}
return &api.InsertResponse{
Status: resp.status,
Metadata: &api.ResponseMetadata{
CreatedAt: resp.createdAt.GetProtoTS(),
},
}, nil
}
func (s *apiService) Replace(ctx context.Context, r *api.ReplaceRequest) (*api.ReplaceResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
resp, err := s.Run(ctx, &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: s.queryRunnerFactory.GetReplaceQueryRunner(r),
})
if err != nil {
return nil, err
}
return &api.ReplaceResponse{
Status: resp.status,
Metadata: &api.ResponseMetadata{
CreatedAt: resp.createdAt.GetProtoTS(),
},
}, nil
}
func (s *apiService) Update(ctx context.Context, r *api.UpdateRequest) (*api.UpdateResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
resp, err := s.Run(ctx, &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: s.queryRunnerFactory.GetUpdateQueryRunner(r),
})
if err != nil {
return nil, err
}
return &api.UpdateResponse{
Status: resp.status,
ModifiedCount: resp.modifiedCount,
Metadata: &api.ResponseMetadata{
UpdatedAt: resp.updatedAt.GetProtoTS(),
},
}, nil
}
func (s *apiService) Delete(ctx context.Context, r *api.DeleteRequest) (*api.DeleteResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
resp, err := s.Run(ctx, &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: s.queryRunnerFactory.GetDeleteQueryRunner(r),
})
if err != nil {
return nil, err
}
return &api.DeleteResponse{
Status: resp.status,
Metadata: &api.ResponseMetadata{
UpdatedAt: resp.updatedAt.GetProtoTS(),
},
}, nil
}
func (s *apiService) Read(r *api.ReadRequest, stream api.Tigris_ReadServer) error {
if err := r.Validate(); err != nil {
return err
}
_, err := s.Run(stream.Context(), &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: s.queryRunnerFactory.GetStreamingQueryRunner(r, stream),
})
if err != nil {
return err
}
return nil
}
func (s *apiService) CreateOrUpdateCollection(ctx context.Context, r *api.CreateOrUpdateCollectionRequest) (*api.CreateOrUpdateCollectionResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
runner := s.queryRunnerFactory.GetCollectionQueryRunner()
runner.SetCreateOrUpdateCollectionReq(r)
resp, err := s.Run(ctx, &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: runner,
})
if err != nil {
return nil, err
}
return &api.CreateOrUpdateCollectionResponse{
Status: resp.status,
Message: "collection created successfully",
}, nil
}
func (s *apiService) DropCollection(ctx context.Context, r *api.DropCollectionRequest) (*api.DropCollectionResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
runner := s.queryRunnerFactory.GetCollectionQueryRunner()
runner.SetDropCollectionReq(r)
resp, err := s.Run(ctx, &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: runner,
})
if err != nil {
return nil, err
}
return &api.DropCollectionResponse{
Status: resp.status,
Message: "collection dropped successfully",
}, nil
}
func (s *apiService) ListCollections(ctx context.Context, r *api.ListCollectionsRequest) (*api.ListCollectionsResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
runner := s.queryRunnerFactory.GetCollectionQueryRunner()
runner.SetListCollectionReq(r)
resp, err := s.Run(ctx, &ReqOptions{
txCtx: api.GetTransaction(r),
queryRunner: runner,
})
if err != nil {
return nil, err
}
return resp.Response.(*api.ListCollectionsResponse), nil
}
func (s *apiService) ListDatabases(ctx context.Context, r *api.ListDatabasesRequest) (*api.ListDatabasesResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
queryRunner := s.queryRunnerFactory.GetDatabaseQueryRunner()
queryRunner.SetListDatabaseReq(r)
resp, err := s.Run(ctx, &ReqOptions{
queryRunner: queryRunner,
})
if err != nil {
return nil, err
}
return resp.Response.(*api.ListDatabasesResponse), nil
}
func (s *apiService) CreateDatabase(ctx context.Context, r *api.CreateDatabaseRequest) (*api.CreateDatabaseResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
queryRunner := s.queryRunnerFactory.GetDatabaseQueryRunner()
queryRunner.SetCreateDatabaseReq(r)
resp, err := s.Run(ctx, &ReqOptions{
queryRunner: queryRunner,
metadataChange: true,
})
if err != nil {
return nil, err
}
return &api.CreateDatabaseResponse{
Status: resp.status,
Message: "database created successfully",
}, nil
}
func (s *apiService) DropDatabase(ctx context.Context, r *api.DropDatabaseRequest) (*api.DropDatabaseResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
queryRunner := s.queryRunnerFactory.GetDatabaseQueryRunner()
queryRunner.SetDropDatabaseReq(r)
resp, err := s.Run(ctx, &ReqOptions{
queryRunner: queryRunner,
metadataChange: true,
})
if err != nil {
return nil, err
}
return &api.DropDatabaseResponse{
Status: resp.status,
Message: "database dropped successfully",
}, nil
}
func (s *apiService) DescribeCollection(ctx context.Context, r *api.DescribeCollectionRequest) (*api.DescribeCollectionResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
runner := s.queryRunnerFactory.GetCollectionQueryRunner()
runner.SetDescribeCollectionReq(r)
resp, err := s.Run(ctx, &ReqOptions{
queryRunner: runner,
})
if err != nil {
return nil, err
}
return resp.Response.(*api.DescribeCollectionResponse), nil
}
func (s *apiService) DescribeDatabase(ctx context.Context, r *api.DescribeDatabaseRequest) (*api.DescribeDatabaseResponse, error) {
if err := r.Validate(); err != nil {
return nil, err
}
runner := s.queryRunnerFactory.GetDatabaseQueryRunner()
runner.SetDescribeDatabaseReq(r)
resp, err := s.Run(ctx, &ReqOptions{
queryRunner: runner,
})
if err != nil {
return nil, err
}
return resp.Response.(*api.DescribeDatabaseResponse), nil
}
func (s *apiService) GetInfo(_ context.Context, _ *api.GetInfoRequest) (*api.GetInfoResponse, error) {
return &api.GetInfoResponse{
ServerVersion: util.Version,
}, nil
}
func (s *apiService) Run(ctx context.Context, req *ReqOptions) (*Response, error) {
queryLifecycle := s.queryLifecycleFactory.Get()
resp, err := queryLifecycle.run(ctx, req)
if err == kv.ErrConflictingTransaction {
return nil, api.Errorf(codes.Aborted, err.Error())
}
return resp, err
}
func (s *apiService) Stream(r *api.StreamRequest, stream api.Tigris_StreamServer) error {
if err := r.Validate(); err != nil {
return err
}
publisher := s.cdcMgr.GetPublisher(r.GetDb())
streamer, err := publisher.NewStreamer(s.kvStore)
if err != nil {
return err
}
defer streamer.Close()
for {
select {
case tx, ok := <-streamer.Txs:
if !ok {
return api.Error(codes.Canceled, "buffer overflow")
}
changes := make([]*api.StreamChange, 0)
for _, op := range tx.Ops {
data, err := jsoniter.Marshal(op)
if err != nil {
return err
}
changes = append(changes, &api.StreamChange{
CollectionName: "todo", // TODO: CDC extract name from op
Data: data,
})
}
if err := stream.Send(&api.StreamResponse{
Changes: changes,
}); ulog.E(err) {
return err
}
}
}
}
| {
u := &apiService{
kvStore: kv,
txMgr: transaction.NewManager(kv),
}
ctx := context.TODO()
tx, err := u.txMgr.StartTxWithoutTracking(ctx)
if ulog.E(err) {
log.Fatal().Err(err).Msgf("error starting server: starting transaction failed")
}
tenantMgr := metadata.NewTenantManager()
if err := tenantMgr.Reload(ctx, tx); ulog.E(err) {
// ToDo: no need to panic, probably handle through async thread.
log.Err(err).Msgf("error starting server: reloading tenants failed")
}
_ = tx.Commit(ctx)
u.tenantMgr = tenantMgr
u.encoder = metadata.NewEncoder(tenantMgr)
u.cdcMgr = cdc.NewManager()
u.queryLifecycleFactory = NewQueryLifecycleFactory(u.txMgr, u.tenantMgr, u.cdcMgr)
u.queryRunnerFactory = NewQueryRunnerFactory(u.txMgr, u.encoder, u.cdcMgr)
return u
} |
tests.py | #!/usr/bin/env python
# coding: utf-8
"""
tests
~~~~~
Provides the tests for opts.
:copyright: 2010 by Daniel Neuhäuser
:license: BSD, see LICENSE for details
"""
import unittest
import sys
from decimal import Decimal
from StringIO import StringIO
from opts import (Node, Option, BooleanOption, IntOption, FloatOption,
DecimalOption, MultipleOptions, Positional, IntPositional,
FloatPositional, DecimalPositional, Command, Parser)
def xrange(*args):
if len(args) == 1:
start, stop, step = 0, args[0], 1
elif len(args) == 2:
start, stop, step = args[0], args[1], 1
else:
start, stop, step = args
i = start
while i <= stop:
yield i
i += step
class TestCase(unittest.TestCase):
def assertContains(self, container, item):
if item not in container:
raise AssertionError('{0!r} not in {1!r}'.format(item, container))
def assertContainsAll(self, container, items):
for item in items:
self.assertContains(container, item)
class TestNode(TestCase):
def test_short_description_fallback(self):
n = Node()
self.assertEqual(n.short_description, u"No short description.")
def test_long_description_fallback(self):
n = Node()
self.assertEqual(n.long_description, u"No long description.")
def test_long_description_fallback_to_short(self):
n = Node(short_description=u"Foobar")
self.assertEqual(n.long_description, u"Foobar")
class TestOption(TestCase):
def test_valueerror_on_init(self):
self.assertRaises(ValueError, Option)
class TestBooleanOption(TestCase):
def test_evaluate(self):
o = BooleanOption(short="b")
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate([u'-b']), ({'b': True}, []))
o = BooleanOption(short="b", default=True)
p = Parser(options=dict(b=o))
self.assertEqual(p.evaluate(['-b']), ({'b': False}, []))
class TestNumberOptions(TestCase):
def test_intoption_evaluate(self):
self.make_test(xrange(-10, 10), IntOption(short='o'))
def test_floatoption_evaluate(self):
self.make_test(xrange(-10.0, 10.0, 0.5), FloatOption(short='o'))
def test_decimaloption_evaluate(self):
self.make_test(
xrange(Decimal('-10.0'), Decimal('10.0'), Decimal('0.5')),
DecimalOption(short='o')
)
def make_test(self, range, o):
p = Parser(options=dict(o=o))
for i in range:
self.assertEqual(p.evaluate([u'-o', unicode(i)]), ({'o': i}, []))
class TestMultipleOptions(TestCase):
def test_evaluate_no_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,bar,baz']),
({'o': [u'foo', u'bar', u'baz']}, [])
)
def test_evaluate_with_quotes(self):
o = MultipleOptions(short='o')
p = Parser(options=dict(o=o))
self.assertEqual(
p.evaluate([u'-o', u'foo,"bar,baz"']),
({'o': [u'foo', u'bar,baz']}, [])
)
self.assertEqual(
p.evaluate([u'-o', u'"foo,bar",baz']),
({'o': [u'foo,bar', u'baz']}, [])
)
class TestPositional(TestCase):
def test_evaluate(self):
p = Parser(positionals=[Positional('foo')])
self.assertEquals(p.evaluate([u'spam']), ({}, [u'spam']))
class TestNumberPositionals(TestCase):
def test_intpositional_evaluate(self):
self.make_test(xrange(10), IntPositional('foo'))
def test_floatpositional_evaluate(self):
self.make_test(xrange(10, 0.5), FloatPositional('foo'))
def test_decimalpositional_evaluate(self):
self.make_test(
xrange(Decimal('0'), Decimal('10.0'), Decimal('0.5')),
DecimalPositional('foo')
)
def make_test(self, range, p):
parser = Parser(positionals=[p])
for i in range:
self.assertEqual(parser.evaluate([unicode(i)]), ({}, [i]))
class TestCommand(TestCase):
def test_remaining_arguments(self):
c = Command(options={'a': Option('a')})
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'foo']),
({'c': ({}, [u'foo'])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'a': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo', u'bar']),
({u'c': ({'a': u'foo'}, [u'bar'])}, [])
)
def test_options(self):
class TestDeclarative(Command):
spam = Option('a', 'asomething')
eggs = Option('b', 'bsomething')
a = TestDeclarative()
b = Command(options={
'spam': Option('a', 'asomething'),
'eggs': Option('b', 'bsomething')})
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'-a', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--asomething', u'foo']),
({'c': ({'spam': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'-b', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--bsomething', u'foo']),
({'c': ({u'eggs': u'foo'}, [])}, [])
)
def test_commands(self):
class TestDeclarative(Command):
spam = Command()
eggs = Command()
a = TestDeclarative()
b = Command(commands={
'spam': Command(),
'eggs': Command()})
cp = [u'script_name']
for c in [a, b]:
p = Parser(commands=dict(c=c))
self.assertEqual(
p.evaluate([u'c', u'spam']),
({'c': ({u'spam': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'eggs']),
({'c': ({'eggs': ({}, [])}, [])}, [])
)
def test_abbreviations(self):
c = Command(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')},
commands={
'stack': Command(),
'stash': Command()})
p = Parser(commands=dict(c=c))
cp = [u'script_name']
for s in [u's', u'st', u'sta']:
cmd = [u'c', s]
result = ({'c': ({}, [s])}, [])
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(p.evaluate(cmd), result)
self.assertEqual(
p.evaluate([u'c', u'stac']),
({'c': ({u'stack': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'stas']),
({'c': ({u'stash': ({}, [])}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stac', u'foo']),
({'c': ({u'stack': u'foo'}, [])}, [])
)
self.assertEqual(
p.evaluate([u'c', u'--stas', u'foo']),
({'c': ({u'stash': u'foo'}, [])}, [])
)
def test_disallow_abbreviated_commands(self):
class NewCommand(Command):
allow_abbreviated_commands = False
c = NewCommand(commands={
'foo': Command()
})
p = Parser(commands=dict(c=c))
self.assertEqual(p.evaluate([u'c', u'f']), ({'c': ({}, [u'f'])}, []))
def test_apply_defaults(self):
class FooParser(Parser):
activate = BooleanOption('a')
foo = Command(
options={
'spam': Option('a'),
'eggs': Option('b')
}
)
p = FooParser()
p.apply_defaults({
'activate': 'huhu',
'foo': {
'spam': 'bla',
'eggs': 'blubb'
}
})
self.assertEquals(p.options['activate'].default, 'huhu')
self.assertEquals(p.commands['foo'].options['spam'].default, 'bla')
self.assertEquals(p.commands['foo'].options['eggs'].default, 'blubb')
def test_getattr(self):
p = Parser(
options={
'activate': Option('a')
},
commands={
'foo': Command(options={
'spam': Option('b'),
'eggs': Option('c')
})
}
)
p.activate
p.foo
p.foo.spam
p.foo.eggs
def test_dynamically_adding_nodes(self):
p = Parser()
p.commands['foo'] = Command()
p.commands['foo'].options['a'] = BooleanOption('a')
p.options['bar'] = Option('b')
self.assertEquals(p.evaluate([u'-b', u'spam']), ({'bar': u'spam'}, []))
self.assertEquals(
p.evaluate([u'foo']),
({'foo': ({'a': False}, [])}, [])
)
self.assertEquals(
p.evaluate([u'foo', u'-a']),
({'foo': ({'a': True}, [])}, [])
)
class TestParser(TestCase):
def test_default_evaluate_arguments(self):
old_argv = sys.argv
enc = sys.stdin.encoding or sys.getdefaultencoding()
sys.argv = [s.encode(enc) for s in [u'script_name', u'foo', u'bar']]
p = Parser()
self.assertEqual(p.evaluate(), ({}, [u'foo', u'bar']))
sys.argv = old_argv
class OutputTest(TestCase):
def setUp(self):
self.out_file = StringIO()
self._old_argv = sys.argv
sys.argv = ['script']
def tearDown(self):
self.out_file = StringIO()
sys.argv = self._old_argv
class TestParserOutput(OutputTest):
def test_alternative_commands(self):
p = Parser(
commands={
'stack': Command(),
'stash': Command(),
},
out_file=self.out_file,
takes_arguments=False
)
for cmd in [u's', u'st', u'sta']:
self.assertRaises(SystemExit, p.evaluate, [cmd])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
self.assertContains(
output,
u'command "{0}" does not exist, did you mean?'.format(cmd)
)
self.assertContains(output, u'stack')
self.assertContains(output, u'stash')
def test_alternative_options(self):
p = Parser(
options={
'stack': Option(long='stack'),
'stash': Option(long='stash')
},
out_file=self.out_file
)
for option in [u'--s', u'--st', u'--sta']:
self.assertRaises(SystemExit, p.evaluate, [option])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
self.assertContains(
output,
u'option "{0}" does not exist, did you mean?'.format(option)
)
self.assertContains(output, u'--stack')
self.assertContains(output, u'--stash')
def test_nonexisting_command(self):
p = Parser(
out_file=self.out_file,
takes_arguments=False
)
self.assertRaises(SystemExit, p.evaluate, [u'foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'command "foo" does not exist')
def test_nonexisting_long_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'--foo'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "--foo" does not exist')
def test_nonexisting_short_option(self):
p = Parser(out_file=self.out_file)
self.assertRaises(SystemExit, p.evaluate, [u'-f'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script')
self.assertContains(output, u'option "-f" does not exist')
class TestHelp(OutputTest):
def test_commands(self):
p = Parser(
commands={
'foo': Command(short_description=u'foo description'),
'bar': Command(short_description=u'bar description')
},
description=u'The script description',
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [commands]',
p.long_description,
u'Commands:',
u' foo',
p.commands['foo'].short_description,
u' bar',
p.commands['bar'].short_description
])
def test_options(self):
p = Parser(
options={
'foo': Option('f'),
'bar': Option(long='bar'),
'baz': Option('b', 'baz')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options]',
u'Options:',
u' -f',
u' --bar',
u' -b --baz'
])
def test_positional_arguments(self):
p = Parser(
positionals=[
Positional(u'foo'),
Positional(u'bar', short_description=u'something')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script foo bar',
u'Positional arguments:',
u' foo',
u'No short description.',
u' bar',
u'something'
])
def test_commands_and_options(self):
p = Parser(
commands={
'spam': Command(),
'eggs': Command()
},
options={
'foo': Option('f'),
'bar': Option('b')
},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContainsAll(output, [
u'usage: script [options] [commands]',
u'Commands:',
u' spam',
u' eggs',
u'Options:',
u' -f',
u' -b'
])
class TestUsage(OutputTest):
def test_only_commands(self):
p = Parser(
commands={'foo': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [commands]')
| p = Parser(
options={'foo': Option('f')},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options]')
def test_commands_and_options(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands]')
def test_positionals(self):
p = Parser(
positionals=[
Positional('a'),
Positional('b')
],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script a b')
def test_all(self):
p = Parser(
options={'foo': Option('f')},
commands={'bar': Command()},
positionals=[Positional('baz')],
out_file=self.out_file
)
self.assertRaises(SystemExit, p.evaluate, [u'help'])
output = self.out_file.getvalue()
self.assertContains(output, u'usage: script [options] [commands] baz')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestNode))
suite.addTest(unittest.makeSuite(TestOption))
suite.addTest(unittest.makeSuite(TestBooleanOption))
suite.addTest(unittest.makeSuite(TestNumberOptions))
suite.addTest(unittest.makeSuite(TestMultipleOptions))
suite.addTest(unittest.makeSuite(TestPositional))
suite.addTest(unittest.makeSuite(TestNumberPositionals))
suite.addTest(unittest.makeSuite(TestCommand))
suite.addTest(unittest.makeSuite(TestParser))
suite.addTest(unittest.makeSuite(TestParserOutput))
suite.addTest(unittest.makeSuite(TestHelp))
suite.addTest(unittest.makeSuite(TestUsage))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite') | def test_only_options(self): |
boot.rs |
const MAX_INITRAMFS_SIZE: usize = 16066 * 4 * 512;
const MAX_KERNEL_SIZE: usize = 14624 * 4 * 512;
const MAX_DTB_SIZE: usize = 100 * 512;
// #[repr(align(2097152))]
pub struct InitRamfsEntry(pub [u8; MAX_INITRAMFS_SIZE]);
#[repr(align(2097152))]
pub struct KernelEntry(pub [u8; MAX_KERNEL_SIZE]);
#[repr(align(2097152))]
pub struct DtbEntry(pub [u8; MAX_DTB_SIZE]);
impl KernelEntry {
/// Get the kernel's entry point. We assume all Aarch64 kernels use a 2MB aligned base.
/// i.e. this impl wont work for kernels that aren't 2MB aligned.
///
/// The flags field (introduced in v3.17) is a little-endian 64-bit field.
/// Bit 3 of the flags field specifies `Kernel physical placement`
/// - 0 - 2MB aligned base should be as close as possible to the base of DRAM, since memory
/// below it is not accessible via the linear mapping
/// - 1 - 2MB aligned base may be anywhere in physical memory
pub const fn new() -> Self {
Self([0u8; MAX_KERNEL_SIZE])
}
}
impl DtbEntry {
/// Get a 2MB aligned entry point to the DTB.
pub const fn new() -> Self {
Self([0u8; MAX_DTB_SIZE])
}
}
impl InitRamfsEntry {
/// Get an entry point to the `initramfs`.
pub const fn new() -> Self {
Self([0u8; MAX_INITRAMFS_SIZE])
}
}
#[link_section = ".initramfs_load_addr._initramfs_start"]
pub static mut INITRAMFS_LOAD_ADDR: InitRamfsEntry = InitRamfsEntry::new();
// #[link_section = ".kernel_load_addr._kernel_start"]
pub static mut KERNEL_LOAD_ADDR: KernelEntry = KernelEntry::new();
// #[link_section = ".dtb_load_addr._dtb_start"]
pub static mut DTB_LOAD_ADDR: DtbEntry = DtbEntry::new();
type EntryPoint = unsafe extern "C" fn(dtb: usize, rsv0: usize, rsv1: usize, rsv2: usize);
#[no_mangle]
#[inline(never)]
/// Jump to kernel. I like this method better as it has a safe abstraction around the `unsafe jump`
pub fn boot_to_kernel(kernel_entry: usize, dtb_addr: usize) -> ! {
unsafe {
let f = core::mem::transmute::<usize, EntryPoint>(kernel_entry);
f(dtb_addr, 0, 0, 0);
}
halt()
}
pub fn | () -> ! {
loop {
unsafe { core::arch::asm!("wfe") }
}
}
// #[no_mangle]
// #[inline(never)]
// /// Unconditionally jump to the kernel. This method uses `inline assembly`. I'd much rather avoid this.
// pub unsafe extern "C" fn boot_into_kernel(img: usize, dtb: usize) -> ! {
// asm!(
// "mov x4, {img}", // move linux kernel pointer into register x4
// "mov x5, {dtb}", // move dtb pointer into register x5
// img = in(reg) img,
// dtb = in(reg) dtb,
// options(nomem, nostack, preserves_flags)
// );
// asm!(
// "mov x3, xzr", // zero-out registers x1, x2, x3
// "mov x2, xzr",
// "mov x1, xzr",
// "mov x0, x5", // move the dtb pointer to x0 (as first argument)
// "br x4", // unconditionally jump to kernel entry at x4
// options(nomem, nostack, preserves_flags)
// );
// // we dont intend to return, i.e. `boot_into_kernel` diverges.
// halt()
// }
| halt |
DevContent.tsx | import React from 'react';
import { withRouter, RouteComponentProps } from 'react-router-dom';
import LeftMenu from './components/left-menu/LeftMenu';
import { getRouteConfig } from './config/routeConfig';
import Intro from './Intro';
type Props = RouteComponentProps;
const DevContent: React.FunctionComponent<Props> = ({ history }) => {
const {
location: { pathname },
} = history;
const routeConfig = getRouteConfig(pathname);
return (
<>
<nav className="asideContent">
<LeftMenu /> | {routeConfig ? routeConfig.renderContent() : <Intro />}
</article>
</>
);
};
export default withRouter(DevContent); | </nav>
<article style={{ maxWidth: '1000px' }} className="mainContent"> |
local-storage.js | export const getToken = () => {
localStorage.getItem('jwt')
}
export const setToken = (token) => {
localStorage.setItem('jwt') | }
export const clearToken = () => {
localStorage.removeItem('jwt')
} | |
rm.rs | use crate::command_registry::CommandRegistry;
use crate::commands::WholeStreamCommand;
use crate::prelude::*;
use nu_errors::ShellError;
use nu_protocol::{Signature, SyntaxShape};
use nu_source::Tagged;
use std::path::PathBuf;
pub struct Remove;
#[derive(Deserialize)]
pub struct RemoveArgs {
pub rest: Vec<Tagged<PathBuf>>,
pub recursive: Tagged<bool>,
#[allow(unused)]
pub trash: Tagged<bool>,
#[allow(unused)]
pub permanent: Tagged<bool>,
pub force: Tagged<bool>,
}
#[async_trait]
impl WholeStreamCommand for Remove {
fn name(&self) -> &str {
"rm"
}
fn signature(&self) -> Signature {
Signature::build("rm")
.switch(
"trash",
"use the platform's recycle bin instead of permanently deleting",
Some('t'),
)
.switch(
"permanent",
"don't use recycle bin, delete permanently",
Some('p'),
)
.switch("recursive", "delete subdirectories recursively", Some('r'))
.switch("force", "suppress error when no file", Some('f'))
.rest(SyntaxShape::Pattern, "the file path(s) to remove")
}
fn usage(&self) -> &str {
"Remove file(s)"
}
async fn run(
&self,
args: CommandArgs,
registry: &CommandRegistry,
) -> Result<OutputStream, ShellError> {
rm(args, registry).await
}
fn examples(&self) -> Vec<Example> {
vec![
Example {
description: "Delete or move a file to the system trash (depending on 'rm_always_trash' config option)",
example: "rm file.txt",
result: None,
},
Example {
description: "Move a file to the system trash",
example: "rm --trash file.txt",
result: None,
},
Example {
description: "Delete a file permanently",
example: "rm --permanent file.txt",
result: None,
},
Example {
description: "Delete a file, and suppress errors if no file is found",
example: "rm --force file.txt",
result: None,
}
]
}
}
async fn | (args: CommandArgs, registry: &CommandRegistry) -> Result<OutputStream, ShellError> {
let registry = registry.clone();
let name = args.call_info.name_tag.clone();
let shell_manager = args.shell_manager.clone();
let (args, _): (RemoveArgs, _) = args.process(®istry).await?;
if args.trash.item && args.permanent.item {
return Ok(OutputStream::one(Err(ShellError::labeled_error(
"only one of --permanent and --trash can be used",
"conflicting flags",
name,
))));
}
shell_manager.rm(args, name)
}
#[cfg(test)]
mod tests {
use super::Remove;
#[test]
fn examples_work_as_expected() {
use crate::examples::test as test_examples;
test_examples(Remove {})
}
}
| rm |
work_item_tracking_client.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...vss_client import VssClient
from . import models
class WorkItemTrackingClient(VssClient):
"""WorkItemTracking
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(WorkItemTrackingClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '5264459e-e5e0-4bd8-b118-0985e68a4ec5'
def get_work_artifact_link_types(self):
"""GetWorkArtifactLinkTypes.
[Preview API] Get the list of work item tracking outbound artifact link types.
:rtype: [WorkArtifactLink]
"""
response = self._send(http_method='GET',
location_id='1a31de40-e318-41cd-a6c6-881077df52e3',
version='4.1-preview.1')
return self._deserialize('[WorkArtifactLink]', self._unwrap_collection(response))
def query_work_items_for_artifact_uris(self, artifact_uri_query, project=None):
"""QueryWorkItemsForArtifactUris.
[Preview API] Queries work items linked to a given list of artifact URI.
:param :class:`<ArtifactUriQuery> <work-item-tracking.v4_1.models.ArtifactUriQuery>` artifact_uri_query: Defines a list of artifact URI for querying work items.
:param str project: Project ID or project name
:rtype: :class:`<ArtifactUriQueryResult> <work-item-tracking.v4_1.models.ArtifactUriQueryResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(artifact_uri_query, 'ArtifactUriQuery')
response = self._send(http_method='POST',
location_id='a9a9aa7a-8c09-44d3-ad1b-46e855c1e3d3',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('ArtifactUriQueryResult', response)
def create_attachment(self, upload_stream, project=None, file_name=None, upload_type=None, area_path=None, **kwargs):
"""CreateAttachment.
Uploads an attachment.
:param object upload_stream: Stream to upload
:param str project: Project ID or project name
:param str file_name: The name of the file
:param str upload_type: Attachment upload type: Simple or Chunked
:param str area_path: Target project Area Path
:rtype: :class:`<AttachmentReference> <work-item-tracking.v4_1.models.AttachmentReference>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if upload_type is not None:
query_parameters['uploadType'] = self._serialize.query('upload_type', upload_type, 'str')
if area_path is not None:
query_parameters['areaPath'] = self._serialize.query('area_path', area_path, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='POST',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('AttachmentReference', response)
def get_attachment_content(self, id, project=None, file_name=None, download=None, **kwargs):
"""GetAttachmentContent.
Downloads an attachment.
:param str id: Attachment ID
:param str project: Project ID or project name
:param str file_name: Name of the file
:param bool download: If set to <c>true</c> always download attachment
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if download is not None:
query_parameters['download'] = self._serialize.query('download', download, 'bool')
response = self._send(http_method='GET',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/octet-stream')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_attachment_zip(self, id, project=None, file_name=None, download=None, **kwargs):
"""GetAttachmentZip.
Downloads an attachment.
:param str id: Attachment ID
:param str project: Project ID or project name
:param str file_name: Name of the file
:param bool download: If set to <c>true</c> always download attachment
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if file_name is not None:
query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str')
if download is not None:
query_parameters['download'] = self._serialize.query('download', download, 'bool')
response = self._send(http_method='GET',
location_id='e07b5fa4-1499-494d-a496-64b860fd64ff',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='application/zip')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_classification_nodes(self, project, ids, depth=None, error_policy=None):
"""GetClassificationNodes.
Gets root classification nodes or list of classification nodes for a given list of nodes ids, for a given project. In case ids parameter is supplied you will get list of classification nodes for those ids. Otherwise you will get root classification nodes for this project.
:param str project: Project ID or project name
:param [int] ids: Comma seperated integer classification nodes ids. It's not required, if you want root nodes.
:param int depth: Depth of children to fetch.
:param str error_policy: Flag to handle errors in getting some nodes. Possible options are Fail and Omit.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if error_policy is not None:
query_parameters['errorPolicy'] = self._serialize.query('error_policy', error_policy, 'str')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response))
def get_root_nodes(self, project, depth=None):
"""GetRootNodes.
Gets root classification nodes under the project.
:param str project: Project ID or project name
:param int depth: Depth of children to fetch.
:rtype: [WorkItemClassificationNode]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='a70579d1-f53a-48ee-a5be-7be8659023b9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response))
def create_or_update_classification_node(self, posted_node, project, structure_group, path=None):
"""CreateOrUpdateClassificationNode.
Create new or update an existing classification node.
:param :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>` posted_node: Node to create or update.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(posted_node, 'WorkItemClassificationNode')
response = self._send(http_method='POST',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemClassificationNode', response)
def delete_classification_node(self, project, structure_group, path=None, reclassify_id=None):
"""DeleteClassificationNode.
Delete an existing classification node.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int reclassify_id: Id of the target classification node for reclassification.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if reclassify_id is not None:
query_parameters['$reclassifyId'] = self._serialize.query('reclassify_id', reclassify_id, 'int')
self._send(http_method='DELETE',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
def get_classification_node(self, project, structure_group, path=None, depth=None):
"""GetClassificationNode.
Gets the classification node for a given node path.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:param int depth: Depth of children to fetch.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
query_parameters = {}
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
response = self._send(http_method='GET',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemClassificationNode', response)
def update_classification_node(self, posted_node, project, structure_group, path=None):
"""UpdateClassificationNode.
Update an existing classification node.
:param :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>` posted_node: Node to create or update.
:param str project: Project ID or project name
:param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration.
:param str path: Path of the classification node.
:rtype: :class:`<WorkItemClassificationNode> <work-item-tracking.v4_1.models.WorkItemClassificationNode>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if structure_group is not None:
route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup')
if path is not None:
route_values['path'] = self._serialize.url('path', path, 'str')
content = self._serialize.body(posted_node, 'WorkItemClassificationNode')
response = self._send(http_method='PATCH',
location_id='5a172953-1b41-49d3-840a-33f79c3ce89f',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemClassificationNode', response)
def get_comment(self, id, revision, project=None):
"""GetComment.
[Preview API] Gets a comment for a work item at the specified revision.
:param int id: Work item id
:param int revision: Revision for which the comment need to be fetched
:param str project: Project ID or project name
:rtype: :class:`<WorkItemComment> <work-item-tracking.v4_1.models.WorkItemComment>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if revision is not None:
route_values['revision'] = self._serialize.url('revision', revision, 'int')
response = self._send(http_method='GET',
location_id='19335ae7-22f7-4308-93d8-261f9384b7cf',
version='4.1-preview.2',
route_values=route_values)
return self._deserialize('WorkItemComment', response)
def get_comments(self, id, project=None, from_revision=None, top=None, order=None):
"""GetComments.
[Preview API] Gets the specified number of comments for a work item from the specified revision.
:param int id: Work item id
:param str project: Project ID or project name
:param int from_revision: Revision from which comments are to be fetched (default is 1)
:param int top: The number of comments to return (default is 200)
:param str order: Ascending or descending by revision id (default is ascending)
:rtype: :class:`<WorkItemComments> <work-item-tracking.v4_1.models.WorkItemComments>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if from_revision is not None:
query_parameters['fromRevision'] = self._serialize.query('from_revision', from_revision, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if order is not None:
query_parameters['order'] = self._serialize.query('order', order, 'str')
response = self._send(http_method='GET',
location_id='19335ae7-22f7-4308-93d8-261f9384b7cf',
version='4.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemComments', response)
def delete_field(self, field_name_or_ref_name, project=None):
"""DeleteField.
Deletes the field.
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
self._send(http_method='DELETE',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values)
def get_field(self, field_name_or_ref_name, project=None):
"""GetField.
Gets information on a specific field.
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
:rtype: :class:`<WorkItemField> <work-item-tracking.v4_1.models.WorkItemField>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
response = self._send(http_method='GET',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemField', response)
def get_fields(self, project=None, expand=None):
"""GetFields.
Returns information for all fields.
:param str project: Project ID or project name
:param str expand: Use ExtensionFields to include extension fields, otherwise exclude them. Unless the feature flag for this parameter is enabled, extension fields are always included.
:rtype: [WorkItemField]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemField]', self._unwrap_collection(response))
def update_field(self, work_item_field, field_name_or_ref_name, project=None):
"""UpdateField.
Updates the field.
:param :class:`<WorkItemField> <work-item-tracking.v4_1.models.WorkItemField>` work_item_field: New field definition
:param str field_name_or_ref_name: Field simple name or reference name
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if field_name_or_ref_name is not None:
route_values['fieldNameOrRefName'] = self._serialize.url('field_name_or_ref_name', field_name_or_ref_name, 'str')
content = self._serialize.body(work_item_field, 'WorkItemField')
self._send(http_method='PATCH',
location_id='b51fd764-e5c2-4b9b-aaf7-3395cf4bdd94',
version='4.1',
route_values=route_values,
content=content)
def create_query(self, posted_query, project, query):
"""CreateQuery.
Creates a query, or moves a query.
:param :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>` posted_query: The query to create.
:param str project: Project ID or project name
:param str query: The parent path for the query to create.
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
content = self._serialize.body(posted_query, 'QueryHierarchyItem')
response = self._send(http_method='POST',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('QueryHierarchyItem', response)
def delete_query(self, project, query):
"""DeleteQuery.
Delete a query or a folder. This deletes any permission change on the deleted query or folder and any of its descendants if it is a folder. It is important to note that the deleted permission changes cannot be recovered upon undeleting the query or folder.
:param str project: Project ID or project name
:param str query: ID or path of the query or folder to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
self._send(http_method='DELETE',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values)
def get_queries(self, project, expand=None, depth=None, include_deleted=None):
"""GetQueries.
Gets the root queries and their children
:param str project: Project ID or project name
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: [QueryHierarchyItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[QueryHierarchyItem]', self._unwrap_collection(response))
def get_query(self, project, query, expand=None, depth=None, include_deleted=None):
"""GetQuery.
Retrieves an individual query and its children
:param str project: Project ID or project name
:param str query:
:param str expand: Include the query string (wiql), clauses, query result columns, and sort options in the results.
:param int depth: In the folder of queries, return child queries and folders to this depth.
:param bool include_deleted: Include deleted queries and folders
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if depth is not None:
query_parameters['$depth'] = self._serialize.query('depth', depth, 'int')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QueryHierarchyItem', response)
def search_queries(self, project, filter, top=None, expand=None, include_deleted=None):
"""SearchQueries.
Searches all queries the user has access to in the current project
:param str project: Project ID or project name
:param str filter: The text to filter the queries with.
:param int top: The number of queries to return (Default is 50 and maximum is 200).
:param str expand:
:param bool include_deleted: Include deleted queries and folders
:rtype: :class:`<QueryHierarchyItemsResult> <work-item-tracking.v4_1.models.QueryHierarchyItemsResult>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query('filter', filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if include_deleted is not None:
query_parameters['$includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
response = self._send(http_method='GET',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('QueryHierarchyItemsResult', response)
def update_query(self, query_update, project, query, undelete_descendants=None):
"""UpdateQuery.
Update a query or a folder. This allows you to update, rename and move queries and folders.
:param :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>` query_update: The query to update.
:param str project: Project ID or project name
:param str query: The path for the query to update.
:param bool undelete_descendants: Undelete the children of this folder. It is important to note that this will not bring back the permission changes that were previously applied to the descendants.
:rtype: :class:`<QueryHierarchyItem> <work-item-tracking.v4_1.models.QueryHierarchyItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if query is not None:
route_values['query'] = self._serialize.url('query', query, 'str')
query_parameters = {}
if undelete_descendants is not None:
query_parameters['$undeleteDescendants'] = self._serialize.query('undelete_descendants', undelete_descendants, 'bool')
content = self._serialize.body(query_update, 'QueryHierarchyItem')
response = self._send(http_method='PATCH',
location_id='a67d190c-c41f-424b-814d-0e906f659301',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('QueryHierarchyItem', response)
def destroy_work_item(self, id, project=None):
"""DestroyWorkItem.
Destroys the specified work item permanently from the Recycle Bin. This action can not be undone.
:param int id: ID of the work item to be destroyed permanently
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
self._send(http_method='DELETE',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
def get_deleted_work_item(self, id, project=None):
"""GetDeletedWorkItem.
Gets a deleted work item from Recycle Bin.
:param int id: ID of the work item to be returned
:param str project: Project ID or project name
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemDelete', response)
def get_deleted_work_items(self, ids, project=None):
"""GetDeletedWorkItems.
Gets the work items from the recycle bin, whose IDs have been specified in the parameters
:param [int] ids: Comma separated list of IDs of the deleted work items to be returned
:param str project: Project ID or project name
:rtype: [WorkItemDeleteReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemDeleteReference]', self._unwrap_collection(response))
def get_deleted_work_item_shallow_references(self, project=None):
"""GetDeletedWorkItemShallowReferences.
Gets a list of the IDs and the URLs of the deleted the work items in the Recycle Bin.
:param str project: Project ID or project name
:rtype: [WorkItemDeleteShallowReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemDeleteShallowReference]', self._unwrap_collection(response))
def restore_work_item(self, payload, id, project=None):
"""RestoreWorkItem.
Restores the deleted work item from Recycle Bin.
:param :class:`<WorkItemDeleteUpdate> <work-item-tracking.v4_1.models.WorkItemDeleteUpdate>` payload: Paylod with instructions to update the IsDeleted flag to false
:param int id: ID of the work item to be restored
:param str project: Project ID or project name
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
content = self._serialize.body(payload, 'WorkItemDeleteUpdate')
response = self._send(http_method='PATCH',
location_id='b70d8d39-926c-465e-b927-b1bf0e5ca0e0',
version='4.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemDelete', response)
def get_revision(self, id, revision_number, expand=None):
"""GetRevision.
Returns a fully hydrated work item for the requested revision
:param int id:
:param int revision_number:
:param str expand:
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if revision_number is not None:
route_values['revisionNumber'] = self._serialize.url('revision_number', revision_number, 'int')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a00c85a5-80fa-4565-99c3-bcd2181434bb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def get_revisions(self, id, top=None, skip=None, expand=None):
"""GetRevisions.
Returns the list of fully hydrated work item revisions, paged.
:param int id:
:param int top:
:param int skip:
:param str expand:
:rtype: [WorkItem]
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='a00c85a5-80fa-4565-99c3-bcd2181434bb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItem]', self._unwrap_collection(response))
def create_template(self, template, team_context):
"""CreateTemplate.
[Preview API] Creates a template
:param :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>` template: Template contents
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
content = self._serialize.body(template, 'WorkItemTemplate')
response = self._send(http_method='POST',
location_id='6a90345f-a676-4969-afce-8e163e1d5642',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemTemplate', response)
def get_templates(self, team_context, workitemtypename=None):
"""GetTemplates.
[Preview API] Gets template
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str workitemtypename: Optional, When specified returns templates for given Work item type.
:rtype: [WorkItemTemplateReference]
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if workitemtypename is not None:
query_parameters['workitemtypename'] = self._serialize.query('workitemtypename', workitemtypename, 'str')
response = self._send(http_method='GET',
location_id='6a90345f-a676-4969-afce-8e163e1d5642',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemTemplateReference]', self._unwrap_collection(response))
def delete_template(self, team_context, template_id):
"""DeleteTemplate.
[Preview API] Deletes the template with given id
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template id
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
self._send(http_method='DELETE',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values)
def get_template(self, team_context, template_id):
"""GetTemplate.
[Preview API] Gets the template with specified id
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template Id
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
response = self._send(http_method='GET',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values)
return self._deserialize('WorkItemTemplate', response)
def replace_template(self, template_content, team_context, template_id):
"""ReplaceTemplate.
[Preview API] Replace template contents
:param :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>` template_content: Template contents to replace with
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param str template_id: Template id
:rtype: :class:`<WorkItemTemplate> <work-item-tracking.v4_1.models.WorkItemTemplate>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
content = self._serialize.body(template_content, 'WorkItemTemplate')
response = self._send(http_method='PUT',
location_id='fb10264a-8836-48a0-8033-1b0ccd2748d5',
version='4.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WorkItemTemplate', response)
def get_update(self, id, update_number):
"""GetUpdate.
Returns a single update for a work item
:param int id:
:param int update_number:
:rtype: :class:`<WorkItemUpdate> <work-item-tracking.v4_1.models.WorkItemUpdate>`
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
if update_number is not None:
route_values['updateNumber'] = self._serialize.url('update_number', update_number, 'int')
response = self._send(http_method='GET',
location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemUpdate', response)
def get_updates(self, id, top=None, skip=None):
"""GetUpdates.
Returns a the deltas between work item revisions
:param int id:
:param int top:
:param int skip:
:rtype: [WorkItemUpdate]
"""
route_values = {}
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemUpdate]', self._unwrap_collection(response))
def query_by_wiql(self, wiql, team_context=None, time_precision=None, top=None):
"""QueryByWiql.
Gets the results of the query given its WIQL.
:param :class:`<Wiql> <work-item-tracking.v4_1.models.Wiql>` wiql: The query containing the WIQL.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:param int top: The max number of results to return.
:rtype: :class:`<WorkItemQueryResult> <work-item-tracking.v4_1.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
content = self._serialize.body(wiql, 'Wiql')
response = self._send(http_method='POST',
location_id='1a9c53f7-f243-4447-b110-35ef023636e4',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('WorkItemQueryResult', response)
def get_query_result_count(self, id, team_context=None, time_precision=None):
"""GetQueryResultCount.
Gets the results of the query given the query ID.
:param str id: The query ID.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:rtype: int
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
response = self._send(http_method='HEAD',
location_id='a02355f5-5f8a-4671-8e32-369d23aac83d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('int', response)
def query_by_id(self, id, team_context=None, time_precision=None):
"""QueryById.
Gets the results of the query given the query ID.
:param str id: The query ID.
:param :class:`<TeamContext> <work-item-tracking.v4_1.models.TeamContext>` team_context: The team context for the operation
:param bool time_precision: Whether or not to use time precision.
:rtype: :class:`<WorkItemQueryResult> <work-item-tracking.v4_1.models.WorkItemQueryResult>`
"""
project = None
team = None
if team_context is not None:
if team_context.project_id:
project = team_context.project_id
else:
project = team_context.project
if team_context.team_id:
team = team_context.team_id
else:
team = team_context.team
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'string')
if team is not None:
route_values['team'] = self._serialize.url('team', team, 'string')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'str')
query_parameters = {}
if time_precision is not None:
query_parameters['timePrecision'] = self._serialize.query('time_precision', time_precision, 'bool')
response = self._send(http_method='GET',
location_id='a02355f5-5f8a-4671-8e32-369d23aac83d',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemQueryResult', response)
def get_work_item_icon_json(self, icon, color=None, v=None):
"""GetWorkItemIconJson.
[Preview API] Get a work item icon given the friendly name and icon color.
:param str icon: The name of the icon
:param str color: The 6-digit hex color for the icon
:param int v: The version of the icon (used only for cache invalidation)
:rtype: :class:`<WorkItemIcon> <work-item-tracking.v4_1.models.WorkItemIcon>`
"""
route_values = {}
if icon is not None:
route_values['icon'] = self._serialize.url('icon', icon, 'str')
query_parameters = {}
if color is not None:
query_parameters['color'] = self._serialize.query('color', color, 'str')
if v is not None:
query_parameters['v'] = self._serialize.query('v', v, 'int')
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemIcon', response)
def get_work_item_icons(self):
"""GetWorkItemIcons.
[Preview API] Get a list of all work item icons.
:rtype: [WorkItemIcon]
"""
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1')
return self._deserialize('[WorkItemIcon]', self._unwrap_collection(response))
def get_work_item_icon_svg(self, icon, color=None, v=None, **kwargs):
"""GetWorkItemIconSvg.
[Preview API] Get a work item icon given the friendly name and icon color.
:param str icon: The name of the icon
:param str color: The 6-digit hex color for the icon
:param int v: The version of the icon (used only for cache invalidation)
:rtype: object
"""
route_values = {}
if icon is not None:
route_values['icon'] = self._serialize.url('icon', icon, 'str')
query_parameters = {}
if color is not None:
query_parameters['color'] = self._serialize.query('color', color, 'str')
if v is not None:
query_parameters['v'] = self._serialize.query('v', v, 'int')
response = self._send(http_method='GET',
location_id='4e1eb4a5-1970-4228-a682-ec48eb2dca30',
version='4.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='image/svg+xml')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback)
def get_reporting_links_by_link_type(self, project=None, link_types=None, types=None, continuation_token=None, start_date_time=None):
"""GetReportingLinksByLinkType.
Get a batch of work item links
:param str project: Project ID or project name
:param [str] link_types: A list of types to filter the results to specific link types. Omit this parameter to get work item links of all link types.
:param [str] types: A list of types to filter the results to specific work item types. Omit this parameter to get work item links of all work item types.
:param str continuation_token: Specifies the continuationToken to start the batch from. Omit this parameter to get the first batch of links.
:param datetime start_date_time: Date/time to use as a starting point for link changes. Only link changes that occurred after that date/time will be returned. Cannot be used in conjunction with 'watermark' parameter.
:rtype: :class:`<ReportingWorkItemLinksBatch> <work-item-tracking.v4_1.models.ReportingWorkItemLinksBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if link_types is not None:
link_types = ",".join(link_types)
query_parameters['linkTypes'] = self._serialize.query('link_types', link_types, 'str')
if types is not None:
types = ",".join(types)
query_parameters['types'] = self._serialize.query('types', types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
response = self._send(http_method='GET',
location_id='b5b5b6d0-0308-40a1-b3f4-b9bb3c66878f',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReportingWorkItemLinksBatch', response)
def get_relation_type(self, relation):
"""GetRelationType.
Gets the work item relation type definition.
:param str relation: The relation name
:rtype: :class:`<WorkItemRelationType> <work-item-tracking.v4_1.models.WorkItemRelationType>`
"""
route_values = {}
if relation is not None:
route_values['relation'] = self._serialize.url('relation', relation, 'str')
response = self._send(http_method='GET',
location_id='f5d33bc9-5b49-4a3c-a9bd-f3cd46dd2165',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemRelationType', response)
def get_relation_types(self):
"""GetRelationTypes.
Gets the work item relation types.
:rtype: [WorkItemRelationType]
"""
response = self._send(http_method='GET',
location_id='f5d33bc9-5b49-4a3c-a9bd-f3cd46dd2165',
version='4.1')
return self._deserialize('[WorkItemRelationType]', self._unwrap_collection(response))
def read_reporting_revisions_get(self, project=None, fields=None, types=None, continuation_token=None, start_date_time=None, include_identity_ref=None, include_deleted=None, include_tag_ref=None, include_latest_only=None, expand=None, include_discussion_changes_only=None, max_page_size=None):
"""ReadReportingRevisionsGet.
Get a batch of work item revisions with the option of including deleted items
:param str project: Project ID or project name
:param [str] fields: A list of fields to return in work item revisions. Omit this parameter to get all reportable fields.
:param [str] types: A list of types to filter the results to specific work item types. Omit this parameter to get work item revisions of all work item types.
:param str continuation_token: Specifies the watermark to start the batch from. Omit this parameter to get the first batch of revisions.
:param datetime start_date_time: Date/time to use as a starting point for revisions, all revisions will occur after this date/time. Cannot be used in conjunction with 'watermark' parameter.
:param bool include_identity_ref: Return an identity reference instead of a string value for identity fields.
:param bool include_deleted: Specify if the deleted item should be returned.
:param bool include_tag_ref: Specify if the tag objects should be returned for System.Tags field.
:param bool include_latest_only: Return only the latest revisions of work items, skipping all historical revisions
:param str expand: Return all the fields in work item revisions, including long text fields which are not returned by default
:param bool include_discussion_changes_only: Return only the those revisions of work items, where only history field was changed
:param int max_page_size: The maximum number of results to return in this batch
:rtype: :class:`<ReportingWorkItemRevisionsBatch> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if types is not None:
types = ",".join(types)
query_parameters['types'] = self._serialize.query('types', types, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
if include_identity_ref is not None:
query_parameters['includeIdentityRef'] = self._serialize.query('include_identity_ref', include_identity_ref, 'bool')
if include_deleted is not None:
query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool')
if include_tag_ref is not None:
query_parameters['includeTagRef'] = self._serialize.query('include_tag_ref', include_tag_ref, 'bool')
if include_latest_only is not None:
query_parameters['includeLatestOnly'] = self._serialize.query('include_latest_only', include_latest_only, 'bool')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if include_discussion_changes_only is not None:
query_parameters['includeDiscussionChangesOnly'] = self._serialize.query('include_discussion_changes_only', include_discussion_changes_only, 'bool')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query('max_page_size', max_page_size, 'int')
response = self._send(http_method='GET',
location_id='f828fe59-dd87-495d-a17c-7a8d6211ca6c',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('ReportingWorkItemRevisionsBatch', response)
| Get a batch of work item revisions. This request may be used if your list of fields is large enough that it may run the URL over the length limit.
:param :class:`<ReportingWorkItemRevisionsFilter> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsFilter>` filter: An object that contains request settings: field filter, type filter, identity format
:param str project: Project ID or project name
:param str continuation_token: Specifies the watermark to start the batch from. Omit this parameter to get the first batch of revisions.
:param datetime start_date_time: Date/time to use as a starting point for revisions, all revisions will occur after this date/time. Cannot be used in conjunction with 'watermark' parameter.
:param str expand:
:rtype: :class:`<ReportingWorkItemRevisionsBatch> <work-item-tracking.v4_1.models.ReportingWorkItemRevisionsBatch>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if start_date_time is not None:
query_parameters['startDateTime'] = self._serialize.query('start_date_time', start_date_time, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
content = self._serialize.body(filter, 'ReportingWorkItemRevisionsFilter')
response = self._send(http_method='POST',
location_id='f828fe59-dd87-495d-a17c-7a8d6211ca6c',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('ReportingWorkItemRevisionsBatch', response)
def create_work_item(self, document, project, type, validate_only=None, bypass_rules=None, suppress_notifications=None):
"""CreateWorkItem.
Creates a single work item.
:param :class:`<[JsonPatchOperation]> <work-item-tracking.v4_1.models.[JsonPatchOperation]>` document: The JSON Patch document representing the work item
:param str project: Project ID or project name
:param str type: The work item type of the work item to create
:param bool validate_only: Indicate if you only want to validate the changes without saving the work item
:param bool bypass_rules: Do not enforce the work item type rules on this update
:param bool suppress_notifications: Do not fire any notifications for this change
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if validate_only is not None:
query_parameters['validateOnly'] = self._serialize.query('validate_only', validate_only, 'bool')
if bypass_rules is not None:
query_parameters['bypassRules'] = self._serialize.query('bypass_rules', bypass_rules, 'bool')
if suppress_notifications is not None:
query_parameters['suppressNotifications'] = self._serialize.query('suppress_notifications', suppress_notifications, 'bool')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='POST',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/json-patch+json')
return self._deserialize('WorkItem', response)
def get_work_item_template(self, project, type, fields=None, as_of=None, expand=None):
"""GetWorkItemTemplate.
Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if fields is not None:
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def delete_work_item(self, id, project=None, destroy=None):
"""DeleteWorkItem.
Deletes the specified work item and sends it to the Recycle Bin, so that it can be restored back, if required. Optionally, if the destroy parameter has been set to true, it destroys the work item permanently.
:param int id: ID of the work item to be deleted
:param str project: Project ID or project name
:param bool destroy: Optional parameter, if set to true, the work item is deleted permanently
:rtype: :class:`<WorkItemDelete> <work-item-tracking.v4_1.models.WorkItemDelete>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if destroy is not None:
query_parameters['destroy'] = self._serialize.query('destroy', destroy, 'bool')
response = self._send(http_method='DELETE',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemDelete', response)
def get_work_item(self, id, project=None, fields=None, as_of=None, expand=None):
"""GetWorkItem.
Returns a single work item.
:param int id: The work item id
:param str project: Project ID or project name
:param [str] fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response)
def get_work_items(self, ids, project=None, fields=None, as_of=None, expand=None, error_policy=None):
"""GetWorkItems.
Returns a list of work items.
:param [int] ids: The comma-separated list of requested work item ids
:param str project: Project ID or project name
:param [str] fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:param str error_policy: The flag to control error policy in a bulk get work items request. Possible options are {Fail, Omit}.
:rtype: [WorkItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if fields is not None:
fields = ",".join(fields)
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if error_policy is not None:
query_parameters['errorPolicy'] = self._serialize.query('error_policy', error_policy, 'str')
response = self._send(http_method='GET',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItem]', self._unwrap_collection(response))
def update_work_item(self, document, id, project=None, validate_only=None, bypass_rules=None, suppress_notifications=None):
"""UpdateWorkItem.
Updates a single work item.
:param :class:`<[JsonPatchOperation]> <work-item-tracking.v4_1.models.[JsonPatchOperation]>` document: The JSON Patch document representing the update
:param int id: The id of the work item to update
:param str project: Project ID or project name
:param bool validate_only: Indicate if you only want to validate the changes without saving the work item
:param bool bypass_rules: Do not enforce the work item type rules on this update
:param bool suppress_notifications: Do not fire any notifications for this change
:rtype: :class:`<WorkItem> <work-item-tracking.v4_1.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if validate_only is not None:
query_parameters['validateOnly'] = self._serialize.query('validate_only', validate_only, 'bool')
if bypass_rules is not None:
query_parameters['bypassRules'] = self._serialize.query('bypass_rules', bypass_rules, 'bool')
if suppress_notifications is not None:
query_parameters['suppressNotifications'] = self._serialize.query('suppress_notifications', suppress_notifications, 'bool')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='72c7ddf8-2cdc-4f60-90cd-ab71c14a399b',
version='4.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/json-patch+json')
return self._deserialize('WorkItem', response)
def get_work_item_next_states_on_checkin_action(self, ids, action=None):
"""GetWorkItemNextStatesOnCheckinAction.
[Preview API] Returns the next state on the given work item IDs.
:param [int] ids: list of work item ids
:param str action: possible actions. Currently only supports checkin
:rtype: [WorkItemNextStateOnTransition]
"""
query_parameters = {}
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
if action is not None:
query_parameters['action'] = self._serialize.query('action', action, 'str')
response = self._send(http_method='GET',
location_id='afae844b-e2f6-44c2-8053-17b3bb936a40',
version='4.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[WorkItemNextStateOnTransition]', self._unwrap_collection(response))
def get_work_item_type_categories(self, project):
"""GetWorkItemTypeCategories.
Get all work item type categories.
:param str project: Project ID or project name
:rtype: [WorkItemTypeCategory]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='9b9f5734-36c8-415e-ba67-f83b45c31408',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemTypeCategory]', self._unwrap_collection(response))
def get_work_item_type_category(self, project, category):
"""GetWorkItemTypeCategory.
Get specific work item type category by name.
:param str project: Project ID or project name
:param str category: The category name
:rtype: :class:`<WorkItemTypeCategory> <work-item-tracking.v4_1.models.WorkItemTypeCategory>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if category is not None:
route_values['category'] = self._serialize.url('category', category, 'str')
response = self._send(http_method='GET',
location_id='9b9f5734-36c8-415e-ba67-f83b45c31408',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemTypeCategory', response)
def get_work_item_type(self, project, type):
"""GetWorkItemType.
Returns a work item type definition.
:param str project: Project ID or project name
:param str type: Work item type name
:rtype: :class:`<WorkItemType> <work-item-tracking.v4_1.models.WorkItemType>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='7c8d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1',
route_values=route_values)
return self._deserialize('WorkItemType', response)
def get_work_item_types(self, project):
"""GetWorkItemTypes.
Returns the list of work item types
:param str project: Project ID or project name
:rtype: [WorkItemType]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
response = self._send(http_method='GET',
location_id='7c8d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1',
route_values=route_values)
return self._deserialize('[WorkItemType]', self._unwrap_collection(response))
def get_work_item_type_fields_with_references(self, project, type, expand=None):
"""GetWorkItemTypeFieldsWithReferences.
Get a list of fields for a work item type with detailed references.
:param str project: Project ID or project name
:param str type: Work item type.
:param str expand: Expand level for the API response. Properties: to include allowedvalues, default value, isRequired etc. as a part of response; None: to skip these properties.
:rtype: [WorkItemTypeFieldWithReferences]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='bd293ce5-3d25-4192-8e67-e8092e879efb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WorkItemTypeFieldWithReferences]', self._unwrap_collection(response))
def get_work_item_type_field_with_references(self, project, type, field, expand=None):
"""GetWorkItemTypeFieldWithReferences.
Get a field for a work item type with detailed references.
:param str project: Project ID or project name
:param str type: Work item type.
:param str field:
:param str expand: Expand level for the API response. Properties: to include allowedvalues, default value, isRequired etc. as a part of response; None: to skip these properties.
:rtype: :class:`<WorkItemTypeFieldWithReferences> <work-item-tracking.v4_1.models.WorkItemTypeFieldWithReferences>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
if field is not None:
route_values['field'] = self._serialize.url('field', field, 'str')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='bd293ce5-3d25-4192-8e67-e8092e879efb',
version='4.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItemTypeFieldWithReferences', response)
def get_work_item_type_states(self, project, type):
"""GetWorkItemTypeStates.
[Preview API] Returns the state names and colors for a work item type.
:param str project: Project ID or project name
:param str type: The state name
:rtype: [WorkItemStateColor]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='7c9d7a76-4a09-43e8-b5df-bd792f4ac6aa',
version='4.1-preview.1',
route_values=route_values)
return self._deserialize('[WorkItemStateColor]', self._unwrap_collection(response)) | def read_reporting_revisions_post(self, filter, project=None, continuation_token=None, start_date_time=None, expand=None):
"""ReadReportingRevisionsPost.
|
models.rs | #![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessPolicyEntry {
#[serde(rename = "tenantId")]
pub tenant_id: String,
#[serde(rename = "objectId")]
pub object_id: String,
#[serde(rename = "applicationId", default, skip_serializing_if = "Option::is_none")]
pub application_id: Option<String>,
pub permissions: Permissions,
}
impl AccessPolicyEntry {
pub fn new(tenant_id: String, object_id: String, permissions: Permissions) -> Self {
Self {
tenant_id,
object_id,
application_id: None,
permissions,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CheckNameAvailabilityResult {
#[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")]
pub name_available: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<check_name_availability_result::Reason>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl CheckNameAvailabilityResult {
pub fn new() -> Self {
Self::default()
}
}
pub mod check_name_availability_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Reason {
AccountNameInvalid,
AlreadyExists,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CloudError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<CloudErrorBody>,
}
impl CloudError {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CloudErrorBody {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl CloudErrorBody {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct DeletedVault {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DeletedVaultProperties>,
}
impl DeletedVault {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct DeletedVaultListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DeletedVault>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl DeletedVaultListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct DeletedVaultProperties {
#[serde(rename = "vaultId", default, skip_serializing_if = "Option::is_none")]
pub vault_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "deletionDate", default, skip_serializing_if = "Option::is_none")]
pub deletion_date: Option<String>,
#[serde(rename = "scheduledPurgeDate", default, skip_serializing_if = "Option::is_none")]
pub scheduled_purge_date: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl DeletedVaultProperties {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IpRule {
pub value: String,
}
impl IpRule {
pub fn new(value: String) -> Self {
Self { value }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct LogSpecification {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "blobDuration", default, skip_serializing_if = "Option::is_none")]
pub blob_duration: Option<String>,
}
impl LogSpecification {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct NetworkRuleSet {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub bypass: Option<network_rule_set::Bypass>,
#[serde(rename = "defaultAction", default, skip_serializing_if = "Option::is_none")]
pub default_action: Option<network_rule_set::DefaultAction>,
#[serde(rename = "ipRules", default, skip_serializing_if = "Vec::is_empty")]
pub ip_rules: Vec<IpRule>,
#[serde(rename = "virtualNetworkRules", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_network_rules: Vec<VirtualNetworkRule>,
}
impl NetworkRuleSet {
pub fn new() -> Self {
Self::default()
}
}
pub mod network_rule_set {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Bypass {
AzureServices,
None,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DefaultAction {
Allow,
Deny,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OperationProperties>,
}
impl Operation {
pub fn new() -> Self {
Self::default()
}
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
impl Display {
pub fn new() -> Self {
Self::default()
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl OperationListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationProperties {
#[serde(rename = "serviceSpecification", default, skip_serializing_if = "Option::is_none")]
pub service_specification: Option<ServiceSpecification>,
}
impl OperationProperties {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Permissions {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub keys: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub certificates: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub storage: Vec<String>,
}
impl Permissions {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
impl PrivateEndpoint {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateEndpointConnection {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
}
impl PrivateEndpointConnection {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateEndpointConnectionItem {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
}
impl PrivateEndpointConnectionItem {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateEndpointConnectionProperties {
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[serde(rename = "privateLinkServiceConnectionState", default, skip_serializing_if = "Option::is_none")]
pub private_link_service_connection_state: Option<PrivateLinkServiceConnectionState>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
impl PrivateEndpointConnectionProperties {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Updating,
Deleting,
Failed,
Disconnected,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
Disconnected,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
}
impl PrivateLinkResource {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkResourceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
}
impl PrivateLinkResourceListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
impl PrivateLinkResourceProperties {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkServiceConnectionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionRequired", default, skip_serializing_if = "Option::is_none")]
pub action_required: Option<String>,
}
impl PrivateLinkServiceConnectionState {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl Resource {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Resource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl ResourceListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ServiceSpecification {
#[serde(rename = "logSpecifications", default, skip_serializing_if = "Vec::is_empty")]
pub log_specifications: Vec<LogSpecification>,
}
impl ServiceSpecification {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
pub family: sku::Family,
pub name: sku::Name,
}
impl Sku {
pub fn new(family: sku::Family, name: sku::Name) -> Self {
Self { family, name }
}
}
pub mod sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Family {
A,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
#[serde(rename = "standard")]
Standard,
#[serde(rename = "premium")]
Premium,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Vault {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub properties: VaultProperties,
}
impl Vault {
pub fn new(properties: VaultProperties) -> Self {
Self {
id: None,
name: None,
type_: None,
location: None,
tags: None,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultAccessPolicyParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
pub properties: VaultAccessPolicyProperties,
}
impl VaultAccessPolicyParameters {
pub fn new(properties: VaultAccessPolicyProperties) -> Self {
Self {
id: None,
name: None,
type_: None,
location: None,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultAccessPolicyProperties {
#[serde(rename = "accessPolicies")]
pub access_policies: Vec<AccessPolicyEntry>,
}
impl VaultAccessPolicyProperties {
pub fn new(access_policies: Vec<AccessPolicyEntry>) -> Self {
Self { access_policies }
}
} | pub name: String,
#[serde(rename = "type")]
pub type_: vault_check_name_availability_parameters::Type,
}
impl VaultCheckNameAvailabilityParameters {
pub fn new(name: String, type_: vault_check_name_availability_parameters::Type) -> Self {
Self { name, type_ }
}
}
pub mod vault_check_name_availability_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "Microsoft.KeyVault/vaults")]
MicrosoftKeyVaultVaults,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultCreateOrUpdateParameters {
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub properties: VaultProperties,
}
impl VaultCreateOrUpdateParameters {
pub fn new(location: String, properties: VaultProperties) -> Self {
Self {
location,
tags: None,
properties,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VaultListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Vault>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl VaultListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VaultPatchParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VaultPatchProperties>,
}
impl VaultPatchParameters {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VaultPatchProperties {
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(rename = "accessPolicies", default, skip_serializing_if = "Vec::is_empty")]
pub access_policies: Vec<AccessPolicyEntry>,
#[serde(rename = "enabledForDeployment", default, skip_serializing_if = "Option::is_none")]
pub enabled_for_deployment: Option<bool>,
#[serde(rename = "enabledForDiskEncryption", default, skip_serializing_if = "Option::is_none")]
pub enabled_for_disk_encryption: Option<bool>,
#[serde(rename = "enabledForTemplateDeployment", default, skip_serializing_if = "Option::is_none")]
pub enabled_for_template_deployment: Option<bool>,
#[serde(rename = "enableSoftDelete", default, skip_serializing_if = "Option::is_none")]
pub enable_soft_delete: Option<bool>,
#[serde(rename = "createMode", default, skip_serializing_if = "Option::is_none")]
pub create_mode: Option<vault_patch_properties::CreateMode>,
#[serde(rename = "enablePurgeProtection", default, skip_serializing_if = "Option::is_none")]
pub enable_purge_protection: Option<bool>,
#[serde(rename = "networkAcls", default, skip_serializing_if = "Option::is_none")]
pub network_acls: Option<NetworkRuleSet>,
}
impl VaultPatchProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod vault_patch_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreateMode {
#[serde(rename = "recover")]
Recover,
#[serde(rename = "default")]
Default,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultProperties {
#[serde(rename = "tenantId")]
pub tenant_id: String,
pub sku: Sku,
#[serde(rename = "accessPolicies", default, skip_serializing_if = "Vec::is_empty")]
pub access_policies: Vec<AccessPolicyEntry>,
#[serde(rename = "vaultUri", default, skip_serializing_if = "Option::is_none")]
pub vault_uri: Option<String>,
#[serde(rename = "enabledForDeployment", default, skip_serializing_if = "Option::is_none")]
pub enabled_for_deployment: Option<bool>,
#[serde(rename = "enabledForDiskEncryption", default, skip_serializing_if = "Option::is_none")]
pub enabled_for_disk_encryption: Option<bool>,
#[serde(rename = "enabledForTemplateDeployment", default, skip_serializing_if = "Option::is_none")]
pub enabled_for_template_deployment: Option<bool>,
#[serde(rename = "enableSoftDelete", default, skip_serializing_if = "Option::is_none")]
pub enable_soft_delete: Option<bool>,
#[serde(rename = "createMode", default, skip_serializing_if = "Option::is_none")]
pub create_mode: Option<vault_properties::CreateMode>,
#[serde(rename = "enablePurgeProtection", default, skip_serializing_if = "Option::is_none")]
pub enable_purge_protection: Option<bool>,
#[serde(rename = "networkAcls", default, skip_serializing_if = "Option::is_none")]
pub network_acls: Option<NetworkRuleSet>,
#[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")]
pub private_endpoint_connections: Vec<PrivateEndpointConnectionItem>,
}
impl VaultProperties {
pub fn new(tenant_id: String, sku: Sku) -> Self {
Self {
tenant_id,
sku,
access_policies: Vec::new(),
vault_uri: None,
enabled_for_deployment: None,
enabled_for_disk_encryption: None,
enabled_for_template_deployment: None,
enable_soft_delete: None,
create_mode: None,
enable_purge_protection: None,
network_acls: None,
private_endpoint_connections: Vec::new(),
}
}
}
pub mod vault_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreateMode {
#[serde(rename = "recover")]
Recover,
#[serde(rename = "default")]
Default,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkRule {
pub id: String,
}
impl VirtualNetworkRule {
pub fn new(id: String) -> Self {
Self { id }
}
} | #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultCheckNameAvailabilityParameters { |
article.service.spec.ts | import { ArticleService } from './article.service';
import { Test, TestingModule } from '@nestjs/testing';
jest.useFakeTimers();
class | {}
describe('Article Service', () => {
let app: TestingModule;
let articlesService: ArticleService;
beforeAll(async () => {
const ApiServiceProvider = {
provide: ArticleService,
useClass: ApiServiceMock,
};
app = await Test.createTestingModule({
imports: [ArticleService],
providers: [ArticleService, ApiServiceProvider],
}).compile();
articlesService = app.get<ArticleService>(ArticleService);
});
afterEach(async () => {});
afterAll(async () => {
await app.close();
});
describe('Create Article', () => {
it('should create an article', async () => {
const createdService = await articlesService.create({
author: 1,
body: 'test-body-1',
title: 'test-title-1',
});
console.log(createdService);
});
});
afterAll(async () => {
await app?.close();
});
});
| ApiServiceMock |
index.ts | export { MenuBar } from './menu/MenuBar'; | export { HelmetComponent, Copyright } from './common'; |
|
index.js | /** Copyright (c) 2018 Uber Technologies, Inc.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @flow
*/ | require('./cli/test');
require('./cli/build');
require('./compiler/api');
require('./compiler/errors');
require('./hmr');
require('./route-prefix.js');
/*
require('./browser-support');
*/
require('../build/babel-plugins/babel-plugin-pure-create-plugin/test');
require('../build/babel-plugins/babel-plugin-asseturl/test');
require('../build/babel-plugins/babel-plugin-chunkid/test');
require('../build/babel-plugins/babel-plugin-i18n/test');
require('../build/babel-plugins/babel-plugin-sw/test');
require('../build/babel-plugins/babel-plugin-sync-chunk-ids/test');
require('../build/babel-plugins/babel-plugin-sync-chunk-paths/test');
require('../build/babel-plugins/babel-plugin-utils/test');
require('../build/babel-plugins/babel-plugin-transform-tree-shake/test');
process.on('unhandledRejection', e => {
throw e;
}); |
/* eslint-env node */
require('./cli/dev'); |
models.py | # -*- coding: utf-8 -*-
import uuid
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MinValueValidator
from django.db import models
from rest_framework.exceptions import NotAcceptable
from apps.authentication.models import OnlineUser as User
class Order(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
order_line = models.ForeignKey(
"OrderLine", related_name="orders", on_delete=models.CASCADE
)
# Price of product when paid
price = models.DecimalField(max_digits=10, decimal_places=2, blank=True)
# Quantity of products ordered
quantity = models.PositiveIntegerField(validators=[MinValueValidator(1)])
def total_price(self):
return self.content_object.price * self.quantity
def reduce_stock(self):
self.content_object.reduce_stock(self.quantity)
def __str__(self):
return str(self.content_object)
class Meta:
default_permissions = ("add", "change", "delete")
class OrderLine(models.Model):
user = models.ForeignKey(User, related_name="u", on_delete=models.CASCADE)
datetime = models.DateTimeField(auto_now_add=True)
paid = models.BooleanField(default=False)
def count_orders(self):
return sum((order.quantity for order in self.orders.all()))
def subtotal(self):
|
def pay(self):
if self.paid:
return
if self.subtotal() > self.user.saldo:
self.delete()
raise NotAcceptable("Insufficient funds")
# Setting price for orders in case product price changes later
for order in self.orders.all():
order.price = order.total_price()
order.save()
order.reduce_stock()
self.user.saldo = self.user.saldo - self.subtotal()
self.user.save()
self.paid = True
self.save()
def __str__(self):
return str(self.pk)
class Meta:
default_permissions = ("add", "change", "delete")
class MagicToken(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
token = models.CharField("token", default=uuid.uuid4, max_length=36)
data = models.TextField("data")
created = models.DateTimeField("created", editable=False, auto_now_add=True)
class Meta:
default_permissions = ("add", "change", "delete")
| return sum((order.total_price() for order in self.orders.all())) |
ws.rs | #![deny(warnings)]
use warp::Filter;
use futures::{FutureExt, StreamExt};
#[tokio::test]
async fn upgrade() {
let _ = pretty_env_logger::try_init();
let route = warp::ws().map(|ws: warp::ws::Ws| ws.on_upgrade(|_| async {}));
// From https://tools.ietf.org/html/rfc6455#section-1.2
let key = "dGhlIHNhbXBsZSBub25jZQ==";
let accept = "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=";
let resp = warp::test::request()
.header("connection", "upgrade")
.header("upgrade", "websocket")
.header("sec-websocket-version", "13")
.header("sec-websocket-key", key)
.reply(&route)
.await;
assert_eq!(resp.status(), 101);
assert_eq!(resp.headers()["connection"], "upgrade");
assert_eq!(resp.headers()["upgrade"], "websocket");
assert_eq!(resp.headers()["sec-websocket-accept"], accept);
let resp = warp::test::request()
.header("connection", "keep-alive, Upgrade")
.header("upgrade", "Websocket")
.header("sec-websocket-version", "13")
.header("sec-websocket-key", key)
.reply(&route)
.await;
assert_eq!(resp.status(), 101);
}
#[tokio::test]
async fn | () {
let _ = pretty_env_logger::try_init();
let route = warp::any().map(warp::reply);
warp::test::ws()
.handshake(route)
.await
.expect_err("handshake non-websocket route should fail");
}
#[tokio::test]
async fn text() {
let _ = pretty_env_logger::try_init();
let mut client = warp::test::ws()
.handshake(ws_echo())
.await
.expect("handshake");
client.send_text("hello warp");
let msg = client.recv().await.expect("recv");
assert_eq!(msg.to_str(), Ok("hello warp"));
}
#[tokio::test]
async fn binary() {
let _ = pretty_env_logger::try_init();
let mut client = warp::test::ws()
.handshake(ws_echo())
.await
.expect("handshake");
client.send(warp::ws::Message::binary(&b"bonk"[..]));
let msg = client.recv().await.expect("recv");
assert!(msg.is_binary());
assert_eq!(msg.as_bytes(), b"bonk");
}
#[tokio::test]
async fn closed() {
let _ = pretty_env_logger::try_init();
let route = warp::ws().map(|ws: warp::ws::Ws| {
ws.on_upgrade(|websocket| {
websocket
.close()
.map(|_| ())
})
});
let mut client = warp::test::ws().
handshake(route)
.await
.expect("handshake");
client.recv_closed()
.await
.expect("closed");
}
#[tokio::test]
async fn limit_message_size() {
let _ = pretty_env_logger::try_init();
let echo = warp::ws().map(|ws: warp::ws::Ws| {
ws.max_message_size(1024).on_upgrade(|websocket| {
// Just echo all messages back...
let (tx, rx) = websocket.split();
rx
.forward(tx)
.map(|result| {
assert!(result.is_err());
assert_eq!(
format!("{}", result.unwrap_err()).as_str(),
"Space limit exceeded: Message too big: 0 + 1025 > 1024"
);
})
})
});
let mut client = warp::test::ws()
.handshake(echo)
.await
.expect("handshake");
client.send(warp::ws::Message::binary(vec![0; 1025]));
client.send_text("hello warp");
assert!(client.recv().await.is_err());
}
fn ws_echo() -> impl Filter<Extract = impl warp::Reply, Error = warp::Rejection> {
warp::ws().map(|ws: warp::ws::Ws| {
ws.on_upgrade(|websocket| {
// Just echo all messages back...
let (tx, rx) = websocket.split();
rx
.forward(tx)
.map(|_| ())
})
})
}
| fail |
do_full_save_example.py | from hyperparameter_hunter import Environment, CrossValidationExperiment
from hyperparameter_hunter.utils.learning_utils import get_toy_classification_data
from sklearn.model_selection import RepeatedStratifiedKFold
from xgboost import XGBClassifier
def do_full_save(experiment_result):
"""This is a simple check to see if the final OOF ROC-AUC score is above 0.75. If it is, we return True; otherwise, we return
False. As input, your do_full_save functions should expect an Experiment's result dictionary. This is actually the dictionary
that gets saved as the Experiment's "description" file, so for more information on what's in there, look at any description
file or see :attr:`hyperparameter_hunter.recorders.DescriptionRecorder.result` (the object passed to `do_full_save`)"""
return experiment_result['final_evaluations']['oof']['roc_auc_score'] > 0.75
def | ():
env = Environment(
train_dataset=get_toy_classification_data(),
root_results_path='HyperparameterHunterAssets',
metrics_map=['roc_auc_score'],
cross_validation_type=RepeatedStratifiedKFold,
cross_validation_params=dict(n_splits=3, n_repeats=2, random_state=32),
do_full_save=do_full_save,
)
experiment_0 = CrossValidationExperiment(model_initializer=XGBClassifier, model_init_params=dict(subsample=0.01))
# Pro Tip: By setting XGBoost's subsample ridiculously low, we can get bad scores on purpose
# Upon completion of this Experiment, we see a warning that not all result files will be saved
# This is because the final score of the Experiment was below our threshold of 0.75
# Specifically, we skipped saving prediction files (OOF, holdout, test, or in-fold), and the heartbeat file
# What still got saved is the Experiment's: key information, leaderboard position, and description file
# These are saved to allow us to use the information for future hyperparameter optimization, and detect repeated Experiments
# Additionally, the Experiment's script backup is saved, but that's because its one of the first things that happens
# For even finer control over what gets saved, use `do_full_save` together with `file_blacklist`
# Now, lets perform another Experiment that does a bit better than our intentionally miserable one
experiment_1 = CrossValidationExperiment(model_initializer=XGBClassifier, model_init_params=dict(subsample=0.5))
# Our second Experiment was executed in the same Environment, so it was still subject to the `do_full_save` constraint
# However, because it scored above 0.75 (hopefully), all of the result files were saved
if __name__ == '__main__':
execute()
| execute |
TDRPG.py | import ugame
import stage
import utils
GAME = None
#######################################################
# Game
class Game(stage.Stage):
"""Base class for a game and its display"""
# TODO: add game state machine
# TODO: make each screen a state, and make a transition between them when player overlaps with trigger zones
# TODO: have a combat state
def __init__(self, display=None, fps=12):
# require singleton
global GAME
if GAME:
raise ValueError("Only one Game is allowed at a time")
GAME = self
# NOTE: PyGamer display is 160x128
if display:
super().__init__(display, fps)
else:
super().__init__(ugame.display, fps)
self.midX = int(self.width*0.5)
self.midY = int(self.height*0.5)
self.spriteSize = 16 # static size of sprites in pixels using the stage library
self.bounceX = self.width-self.spriteSize
self.bounceY = self.height-self.spriteSize
self.tilesX = int(self.width/self.spriteSize) # number of tiles that will fit in game
self.tilesY = int(self.height/self.spriteSize)
self.map = None
self.updaters = []
self.sprites = []
self.forceRefresh = False # force a refresh on the next frame
self._pauseObject = None # object that receives updates while game is paused
self.framesToWaitAfterPause = 2
self._curFramesWaiting = 0
def addToUpdates(self, obj):
if isinstance(obj, list):
self.updaters.extend(obj)
else:
self.updaters.append(obj)
def removeFromUpdates(self, obj):
if not isinstance(obj, list):
obj = list(obj)
for o in obj:
self.updaters.remove(o)
def addToSprites(self, obj, updater=True):
if isinstance(obj, list):
self.sprites.extend(obj)
else:
self.sprites.append(obj)
if updater:
self.addToUpdates(obj)
def removeFromSprites(self, obj, updater=True):
if not isinstance(obj, list):
obj = list(obj)
for o in obj:
self.sprites.remove(o)
if updater:
self.removeFromUpdates(obj)
def pause(self, pauseObject):
self._pauseObject = pauseObject
def resume(self):
self._pauseObject = None
self._curFramesWaiting = 0
def gameLoop(self):
while True:
if self._pauseObject:
self._pauseObject.update()
elif self._curFramesWaiting < self.framesToWaitAfterPause:
ugame.buttons.get_pressed() # clear out button press cache
self._curFramesWaiting += 1
else:
for obj in self.updaters:
obj.update()
if not self.forceRefresh:
self.render_sprites(self.sprites)
else:
self.render_block(0, 0)
self.forceRefresh = False
self.tick()
#######################################################
# Map
class TileMap(stage.Grid):
"""A tile map for the whole screen, utilizing a tile set from the given bank"""
def __init__(self, bank, width=8, height=8, palette=None, buffer=None):
super().__init__(bank, width, height, palette, buffer)
self.shaking = 0
self.framesToShake = 4
self._curShakeFrame = 0
self.solidTypes = [] # tile types that should be treated as solid walls for collision
self.triggerTypes = [] # tile types that should trigger some action when overlapped
def fromHexList(self, tileList):
"""
Given a list of hex codes, update the tile map
Example:
tileList = [
"0123456789ABCDEF", # row 0
"0123456790ABCDEF", # row 1
...
]
"""
# validate input
if len(tileList) != self.height:
raise ValueError("Length of tileList is {} but expected {}".format(len(tileList), self.height))
# iterate through tile list
x = 0
y = 0
for row in tileList:
if len(row) != self.width:
raise ValueError("Length of row {} is {} but expected {}".format(y, len(row), self.width))
for tileValue in row:
self.tile(x, y, int(tileValue, 16))
x += 1
y += 1
x = 0
def shake(self, amount=4):
self.shaking = amount
self._curShakeFrame = 0
def handleTrigger(self, sprite, x, y, tileType):
"""Handle special actions based on the tile type"""
pass
def update(self):
if self.shaking != 0:
GAME.forceRefresh = True
if self._curShakeFrame % 2 == 0:
self.move(self.shaking, 0)
else:
self.move(-self.shaking, 0)
self._curShakeFrame += 1
if self._curShakeFrame >= self.framesToShake:
self._curShakeFrame = 0
self.shaking = 0
#######################################################
# Entities
class Moveable(stage.Sprite):
"""Base class for moveable sprites like a player or enemy"""
def __init__(self, bank, x, y):
super().__init__(bank, 0, x, y)
self.x = x
self.y = y
self.collider = utils.BoundingBox(self,2, 2, 12, 12)
self.animations = utils.StateMachine()
def getTilesInCollider(self, dx=0, dy=0):
"""Calculate the grid tiles that are underneath each corner of this sprite's bounding box"""
tiles = []
rect = utils.Rectangle(self.collider.x+dx, self.collider.y+dy, self.collider.width, self.collider.height)
# top left
point = rect.getTopLeft()
point[0] >>= 4 # divide by 16
point[1] >>= 4 # divide by 16
if point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY:
tiles.append(point)
# top right
point = rect.getTopRight()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# bottom left
point = rect.getBtmLeft()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# bottom right
point = rect.getBtmRight()
point[0] >>= 4
point[1] >>= 4
if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles:
tiles.append(point)
# return list of tiles
return tiles
def getMovement(self):
"""
Determine desired movement (whether AI or player controls) and return dx, dy for this frame
NOTE: tile collision currently only supports moving in one direction at a time (no diagonal)
"""
return 0, 0
def applyMovementAndAnims(self, dx, dy):
"""Apply the desired movement and animations to this sprite"""
# handle movement and constrain to the stage
self.x = max(min(self.x + dx, GAME.bounceX), 0)
self.y = max(min(self.y + dy, GAME.bounceY), 0)
# finish movement
self.move(self.x, self.y)
self.collider.update()
self.animations.update()
def checkTileCollision(self, dx, dy):
"""Check the game map for collisions with tiles. Works best by checking one axis at a time"""
if dx != 0:
# check map for impassable OR special handler tiles
tiles = self.getTilesInCollider(dx, 0)
for t in tiles:
tileType = GAME.map.tile(x=t[0], y=t[1])
if tileType in GAME.map.solidTypes:
if dx > 0:
self.x = ((t[0]-1) << 4) + self.collider.dx - 1
else:
self.x = ((t[0]+1) << 4) - self.collider.dx + 1
dx = 0
break
elif tileType in GAME.map.triggerTypes:
GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType)
if dy != 0:
# check map for impassable OR special handler tiles
tiles = self.getTilesInCollider(0, dy)
for t in tiles:
tileType = GAME.map.tile(x=t[0], y=t[1])
if tileType in GAME.map.solidTypes:
if dy > 0:
self.y = ((t[1]-1) << 4) + self.collider.dy - 1
else:
self.y = ((t[1]+1) << 4) - self.collider.dy + 1
dy = 0
break
elif tileType in GAME.map.triggerTypes:
GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType)
return dx, dy
def getAnimation(self, dx, dy):
"""Update the animation based on the movement and state"""
pass
def update(self):
super().update()
dx, dy = self.getMovement()
dx, dy = self.checkTileCollision(dx, dy)
self.getAnimation(dx, dy)
self.applyMovementAndAnims(dx, dy)
#######################################################
# Animation Helpers
class AnimState(utils.State):
"""
Base class for animation states in a state machine
Expects all the frames to be consecutive in the sprite sheet
Can delay a number of game frames between each animation frame (ex: delay of 1 with 12 fps means delay 1/12 sec between animation frames)
"""
LOOP_FOREVER = -1
ROTATE_MIRROR = 4
ROTATE_90CW = 1
ROTATE_90CCW = 2
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0):
"""
Create the new state. By default, the animation will advance each game frame, and it will loop forever.
"""
super().__init__(name)
self.sprite = sprite
self.frameStart = frameStart
self.frameEnd = frameEnd
self._curFrame = frameStart
self.delay = delay
self._curDelay = 0
self.numTimes = numTimes
self._curTimes = 0
self.nextState = nextState
self.rotate = rotate
def enter(self, machine):
utils.log("Entering {} and setting frame to {}. Will repeat {} times and then go to state {}".format(self.name, self.frameStart, self.numTimes, self.nextState))
self.sprite.set_frame(self.frameStart, self.rotate)
self._curFrame = self.frameStart
self._curDelay = 0
def update(self, machine):
# handle delay in the animation
if self.delay > 0:
if self._curDelay < self.delay:
self._curDelay += 1
return
# advance the frame in the animation
self._curFrame += 1
self._curDelay = 0
# handle looping/exiting animation
if self._curFrame > self.frameEnd:
self._curFrame = self.frameStart
self._curTimes += 1
if self.numTimes != self.LOOP_FOREVER and self._curTimes > self.numTimes:
self.goToNextState(machine)
return
self.sprite.set_frame(self._curFrame, self.rotate)
def goToNextState(self, machine):
machine.goToState(self.nextState)
class AnimLoop(AnimState):
"""
Loop an animation for a sprite. Expects all the frames to be consecutive in the sprite sheet.
"""
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, rotate=0):
super().__init__(name, sprite, frameStart, frameEnd, delay, rotate=rotate)
class AnimRepeatN(AnimState):
"""
Repeat an animation N times. Expects all the frames to be consecutive in the sprite sheet.
"""
def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0):
super().__init__(name, sprite, frameStart, frameEnd, delay, numTimes, nextState, rotate)
#######################################################
# GUI
class Dialog(TileMap):
"""A modal text dialog built using a tile map"""
def __init__(self, bank, width=8, height=2, text1=None, text2=None, sprite1=None, palette=None, buffer=None):
super().__init__(bank, width, height, palette, buffer)
self.showing = False
# first line of text
self.marginX = 4
self.marginY = 4
self.text = None
if text1:
self.text1 = stage.Text(width=len(text1), height=1)
self.text1.text(text1)
# second line of text
self.marginX2 = self.marginX
self.marginY2 = self.marginY + 15
self.text2 = None
if text2:
self.text2 = stage.Text(width=len(text2), height=1)
self.text2.text(text2)
# extra sprite
self.sprite1 = None
if sprite1:
self.sprite1 = sprite1
# frames to wait at start (avoids accidental button presses)
self.framesToWait = 2
self._curFramesWaiting = 0
def move(self, x, y, z=None):
if self.text1:
self.text1.move(x+self.marginX, y+self.marginY, z)
if self.text2:
self.text2.move(x+self.marginX2, y+self.marginY2, z)
super().move(x, y, z)
def show(self):
"""Display this dialog on top of all the other layers and pause the game"""
if self.showing:
return
GAME.layers.insert(0, self)
if self.text1:
GAME.layers.insert(0, self.text1)
if self.text2:
GAME.layers.insert(0, self.text2)
if self.sprite1:
GAME.layers.insert(0, self.sprite1)
GAME.forceRefresh = True
GAME.pause(self)
self.showing = True
self._curFramesWaiting = 0
def | (self):
"""Hide this dialog and unpause the game"""
if not self.showing:
return
GAME.layers.remove(self)
if self.text1:
GAME.layers.remove(self.text1)
if self.text2:
GAME.layers.remove(self.text2)
if self.sprite1:
GAME.layers.remove(self.sprite1)
GAME.forceRefresh = True
GAME.resume()
self.showing = False
def update(self):
"""Update function called while the game is paused"""
if self._curFramesWaiting < self.framesToWait:
self._curFramesWaiting += 1
return
| hide |
from_raw_arc.rs | //! A "Manual Arc" which allows manually frobbing the reference count
//!
//! This module contains a copy of the `Arc` found in the standard library,
//! stripped down to the bare bones of what we actually need. The reason this is
//! done is for the ability to concretely know the memory layout of the `Inner`
//! structure of the arc pointer itself (e.g. `ArcInner` in the standard
//! library).
//!
//! We do some unsafe casting from `*mut OVERLAPPED` to a `FromRawArc<T>` to
//! ensure that data lives for the length of an I/O operation, but this means
//! that we have to know the layouts of the structures involved. This
//! representation primarily guarantees that the data, `T` is at the front of
//! the inner pointer always.
//!
//! Note that we're missing out on some various optimizations implemented in the
//! standard library:
//!
//! * The size of `FromRawArc` is actually two words because of the drop flag
//! * The compiler doesn't understand that the pointer in `FromRawArc` is never
//! null, so Option<FromRawArc<T>> is not a nullable pointer.
use std::ops::Deref;
use std::mem;
use std::sync::atomic::{self, AtomicUsize, Ordering};
pub struct FromRawArc<T> {
_inner: *mut Inner<T>,
}
unsafe impl<T: Sync + Send> Send for FromRawArc<T> { }
unsafe impl<T: Sync + Send> Sync for FromRawArc<T> { }
#[repr(C)]
struct Inner<T> {
data: T,
cnt: AtomicUsize,
}
impl<T> FromRawArc<T> {
pub fn | (data: T) -> FromRawArc<T> {
let x = Box::new(Inner {
data: data,
cnt: AtomicUsize::new(1),
});
FromRawArc { _inner: unsafe { mem::transmute(x) } }
}
pub unsafe fn from_raw(ptr: *mut T) -> FromRawArc<T> {
// Note that if we could use `mem::transmute` here to get a libstd Arc
// (guaranteed) then we could just use std::sync::Arc, but this is the
// crucial reason this currently exists.
FromRawArc { _inner: ptr as *mut Inner<T> }
}
}
impl<T> Clone for FromRawArc<T> {
fn clone(&self) -> FromRawArc<T> {
// Atomic ordering of Relaxed lifted from libstd, but the general idea
// is that you need synchronization to communicate this increment to
// another thread, so this itself doesn't need to be synchronized.
unsafe {
(*self._inner).cnt.fetch_add(1, Ordering::Relaxed);
}
FromRawArc { _inner: self._inner }
}
}
impl<T> Deref for FromRawArc<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &(*self._inner).data }
}
}
impl<T> Drop for FromRawArc<T> {
fn drop(&mut self) {
unsafe {
// Atomic orderings lifted from the standard library
if (*self._inner).cnt.fetch_sub(1, Ordering::Release) != 1 {
return
}
atomic::fence(Ordering::Acquire);
drop(mem::transmute::<_, Box<T>>(self._inner));
}
}
}
#[cfg(test)]
mod tests {
use super::FromRawArc;
#[test]
fn smoke() {
let a = FromRawArc::new(1);
assert_eq!(*a, 1);
assert_eq!(*a.clone(), 1);
}
#[test]
fn drops() {
struct A<'a>(&'a mut bool);
impl<'a> Drop for A<'a> {
fn drop(&mut self) {
*self.0 = true;
}
}
let mut a = false;
{
let a = FromRawArc::new(A(&mut a));
a.clone();
assert!(!*a.0);
}
assert!(a);
}
}
| new |
common_test.go | /*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require" | "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
)
func TestParseGarbageCollectPolicy(t *testing.T) {
r := require.New(t)
app := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
Policies: []v1beta1.AppPolicy{{Type: "example"}},
}}
spec, err := ParseGarbageCollectPolicy(app)
r.NoError(err)
r.Nil(spec)
app.Spec.Policies = append(app.Spec.Policies, v1beta1.AppPolicy{
Type: "garbage-collect",
Properties: &runtime.RawExtension{Raw: []byte("bad value")},
})
_, err = ParseGarbageCollectPolicy(app)
r.Error(err)
policySpec := &v1alpha1.GarbageCollectPolicySpec{
KeepLegacyResource: false,
Rules: []v1alpha1.GarbageCollectPolicyRule{{
Selector: v1alpha1.ResourcePolicyRuleSelector{TraitTypes: []string{"a"}},
Strategy: v1alpha1.GarbageCollectStrategyOnAppUpdate,
}, {
Selector: v1alpha1.ResourcePolicyRuleSelector{TraitTypes: []string{"b"}},
Strategy: v1alpha1.GarbageCollectStrategyNever,
}},
}
bs, err := json.Marshal(policySpec)
r.NoError(err)
app.Spec.Policies[1].Properties.Raw = bs
spec, err = ParseGarbageCollectPolicy(app)
r.NoError(err)
r.Equal(policySpec, spec)
}
func TestParseApplyOncePolicy(t *testing.T) {
r := require.New(t)
app := &v1beta1.Application{Spec: v1beta1.ApplicationSpec{
Policies: []v1beta1.AppPolicy{{Type: "example"}},
}}
spec, err := ParseApplyOncePolicy(app)
r.NoError(err)
r.Nil(spec)
app.Spec.Policies = append(app.Spec.Policies, v1beta1.AppPolicy{
Type: "apply-once",
Properties: &runtime.RawExtension{Raw: []byte("bad value")},
})
_, err = ParseApplyOncePolicy(app)
r.Error(err)
policySpec := &v1alpha1.ApplyOncePolicySpec{Enable: true}
bs, err := json.Marshal(policySpec)
r.NoError(err)
app.Spec.Policies[1].Properties.Raw = bs
spec, err = ParseApplyOncePolicy(app)
r.NoError(err)
r.Equal(policySpec, spec)
} | "k8s.io/apimachinery/pkg/runtime"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1" |
user-inputs.js | /* -------------------------------------------------------------------------- */
/* Copyright 2002-2016, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* -------------------------------------------------------------------------- */
define(function(require) {
var Locale = require('utils/locale');
var TemplateUtils = require('utils/template-utils');
var VNetsTable = require('tabs/vnets-tab/datatable');
var RangeSlider = require('utils/range-slider');
var TemplateHTML = require('hbs!./user-inputs/table');
var RowTemplateHTML = require('hbs!./user-inputs/row');
//==============================================================================
// VM & Service user inputs
//==============================================================================
return {
// User inputs edition
'html': _html,
'setup': _setup,
'fill': _fill,
'retrieve': _retrieve,
// Instantiate
'vmTemplateInsert': _generateVMTemplateUserInputs,
'serviceTemplateInsert': _generateServiceTemplateUserInputs,
// Utils
'marshall': _marshall,
'unmarshall': _unmarshall,
'parse': _parse,
'generateInputElement': _generateInputElement,
'attributeInput': _attributeInput,
'insertAttributeInputMB': _insertAttributeInputMB
};
function _html(){
return TemplateHTML();
}
function _setup(context){
context.on("click", ".add_user_input_attr", function() {
$(".user_input_attrs tbody", context).append(RowTemplateHTML());
$("select.user_input_type", context).change();
});
context.on("change", "select.user_input_type", function() {
var row = $(this).closest("tr");
$(".user_input_type_right", row).hide();
$(".user_input_type_right."+this.value, row).show();
});
context.on("click", ".user_input_attrs i.remove-tab", function() {
$(this).closest('tr').remove();
});
}
function _retrieve(context){ | var attr = {};
attr.name = $(".user_input_name", $(this)).val();
attr.mandatory = true;
attr.type = $(".user_input_type", $(this)).val();
attr.description = $(".user_input_description", $(this)).val();
switch(attr.type){
case "number":
case "number-float":
case "fixed":
attr.initial = $("."+attr.type+" input.user_input_initial", $(this)).val();
break;
case "range":
case "range-float":
var min = $("."+attr.type+" input.user_input_params_min", $(this)).val();
var max = $("."+attr.type+" input.user_input_params_max", $(this)).val();
attr.params = min + ".." + max;
attr.initial = $("."+attr.type+" input.user_input_initial", $(this)).val();
break;
case "list":
attr.params = $("."+attr.type+" input.user_input_params", $(this)).val();
attr.initial = $("."+attr.type+" input.user_input_initial", $(this)).val();
break;
}
userInputsJSON[attr.name] = _marshall(attr);
}
});
return userInputsJSON;
}
function _fill(context, templateJSON){
var userInputsJSON = templateJSON['USER_INPUTS'];
if (userInputsJSON) {
$.each(userInputsJSON, function(key, value) {
$(".add_user_input_attr", context).trigger("click");
var trcontext = $(".user_input_attrs tbody tr", context).last();
$(".user_input_name", trcontext).val(key);
var attr = _unmarshall(value);
if (templateJSON[key] != undefined){
attr.initial = templateJSON[key];
}
$(".user_input_type", trcontext).val(attr.type).change();
$(".user_input_description", trcontext).val(attr.description);
switch(attr.type){
case "number":
case "number-float":
case "fixed":
$("."+attr.type+" input.user_input_initial", trcontext).val(attr.initial);
break;
case "range":
case "range-float":
var values = attr.params.split(".."); // "2..8"
if (values.length == 2){
$("."+attr.type+" input.user_input_params_min", trcontext).val(values[0]);
$("."+attr.type+" input.user_input_params_max", trcontext).val(values[1]);
} else {
console.error('Wrong user input parameters for "'+key+'". Expected "MIN..MAX", received "'+attr.params+'"');
}
$("."+attr.type+" input.user_input_initial", trcontext).val(attr.initial);
break;
case "list":
$("."+attr.type+" input.user_input_params", trcontext).val(attr.params);
$("."+attr.type+" input.user_input_initial", trcontext).val(attr.initial);
break;
}
});
}
}
// It will replace the div's html with a row for each USER_INPUTS
// opts.text_header: header text for the text & password inputs
// opts.network_header: header text for the network inputs
// returns true if at least one input was inserted
function _generateVMTemplateUserInputs(div, template_json, opts) {
// Delete the special user inputs for the capacity
var inputs = $.extend({}, template_json.VMTEMPLATE.TEMPLATE.USER_INPUTS);
delete inputs["CPU"];
delete inputs["MEMORY"];
delete inputs["VCPU"];
opts.div = div;
opts.user_inputs = inputs;
opts.defaults = $.extend({}, template_json.VMTEMPLATE.TEMPLATE);
return _generateInstantiateUserInputs(opts);
}
// It will replace the div's html with a row for each USER_INPUTS
// opts.text_header: header text for the text & password inputs
// opts.network_header: header text for the network inputs
// returns true if at least one input was inserted
function _generateServiceTemplateUserInputs(div, template_json, opts) {
if(opts == undefined){
opts = {};
}
opts.div = div;
opts.user_inputs = template_json.DOCUMENT.TEMPLATE.BODY.custom_attrs;
return _generateInstantiateUserInputs(opts);
}
// It will replace the div's html with a row for each USER_INPUTS
// opts.div: where to insert the html
// opts.user_inputs: Object with the USER_INPUTS section
// opts.defaults: Object with the first level attributes (TEMPLATE)
// opts.text_header: header text for the text & password inputs
// opts.network_header: header text for the network inputs
// returns true if at least one input was inserted
function _generateInstantiateUserInputs(opts) {
var div = opts.div;
var user_inputs = opts.user_inputs;
var defaults = opts.defaults;
if (defaults == undefined){
defaults = {};
}
div.empty();
var html = '';
if (user_inputs == undefined) {
return false;
}
if (opts == undefined) {
opts = {};
}
if (opts.text_header == undefined) {
opts.text_header = Locale.tr("Custom Attributes");
}
if (opts.network_header == undefined) {
opts.network_header = Locale.tr("Network");
}
var network_attrs = [];
var input_attrs = [];
$.each(user_inputs, function(key, value) {
var attrs = _parse(key, value);
if (defaults[key] != undefined){
attrs.initial = opts.defaults[key];
}
if (attrs.type == "vnet_id"){
network_attrs.push(attrs);
} else {
input_attrs.push(attrs);
}
});
if (network_attrs.length > 0) {
html += '<fieldset>';
if (opts.network_header.length > 0) {
html += '<legend>' +
opts.network_header +
'</legend>' +
'</div>';
}
html += '<div class="instantiate_user_inputs">' +
'</div>' +
'</fieldset>';
div.append(html);
var separator = "";
var vnetsTable;
$.each(network_attrs, function(index, vnet_attr) {
var unique_id = "user_input_" + (vnet_attr.name.replace(/ /g, "_"));
vnetsTable = new VNetsTable(unique_id, {'select': true});
$(".instantiate_user_inputs", div).append(
'<div class="row">' +
'<div class="large-12 large-centered columns">' +
separator +
'<h5>' +
TemplateUtils.htmlEncode(vnet_attr.description) +
'</h5>' +
vnetsTable.dataTableHTML +
'</div>' +
'</div>');
separator = "<hr/>";
vnetsTable.initialize();
$('#refresh_button_' + unique_id).click();
vnetsTable.idInput().attr("wizard_field", vnet_attr.name).attr("required", "");
});
}
if (input_attrs.length > 0) {
html += '<fieldset>';
if (opts.text_header.length > 0) {
html += '<legend>' +
opts.text_header +
'</legend>' +
'</div>';
}
html += '<div class="instantiate_user_inputs">' +
'</div>' +
'</fieldset>';
div.append(html);
$.each(input_attrs, function(index, custom_attr) {
$(".instantiate_user_inputs", div).append(
'<div class="row">' +
'<div class="large-12 large-centered columns">' +
'<label>' +
TemplateUtils.htmlEncode(custom_attr.description) +
_attributeInput(custom_attr) +
'</label>' +
'</div>' +
'</div>');
});
}
return (network_attrs.length > 0 || input_attrs.length > 0);
}
/**
* Transforms a user input object to a string
* @param {object} attr user input object, e.g.
* { "mandatory": true/false
* "type":
* "description":
* ["params":] "2..8" / "2,4,8"
* ["initial":] "3"
* }
* @return {string} String in the form "M|range|Description here|2..8|4"
*/
function _marshall(attr) {
var st = "";
st += (attr.mandatory ? "M" : "O") + "|" +
(attr.type != undefined ? attr.type : "text") + "|" +
(attr.description != undefined ? attr.description : "");
switch (attr.type) {
case "number":
case "number-float":
case "fixed":
st += ("| |" + (attr.initial != undefined ? attr.initial : "") );
break;
case "range":
case "range-float":
case "list":
st += ("|" + (attr.params != undefined ? attr.params : "") +
"|" + (attr.initial != undefined ? attr.initial : "") );
break;
}
return st;
}
/**
* Transforms a user input string to an object
* @param {string} value String in the form "M|range|Description here|2..8|4"
* @return {object} user input object, e.g.
* { "mandatory": true/false
* "type":
* "description":
* ["params":] "2..8" / "2,4,8"
* ["initial":] "3"
* }
*/
function _unmarshall(value) {
var parts = value.split("|");
var attr = {
"mandatory": (parts[0] == "M"),
"type": parts[1],
"description": parts[2],
"initial": ""
};
if (parts[3] != undefined){
attr.params = parts[3];
}
if (parts[4] != undefined){
attr.initial = parts[4];
}
return attr;
}
/**
* Returns a structure with the user input parameters
* @param {string} name Template Attribute name, e.g. USER_PASSWORD
* @param {string} value Template Attribute value,
* e.g. "M|range|Description here|2..8|4"
* @return {object} { "name":
"mandatory":
"type":
"description":
["params":] "2..8" / "2,4,8"
["initial":]
["min":]
["max":]
["step":]
["options":]
["tick_size":] For range inputs, the tick positions
starting from 0, not min
}
*/
function _parse(name, value) {
var attr = _unmarshall(value);
attr.name = name;
// TODO: error management (params undefined)
switch (attr.type) {
case "number":
attr.step = "1";
break;
case "number-float":
attr.step = "any";
break;
case "range":
var params = attr.params.split(".."); // "2..8"
attr.min = parseInt( params[0] );
attr.max = parseInt( params[1] );
attr.step = "1";
attr.tick_size = 1;
while ((attr.max - attr.min) / attr.tick_size > 10 ){
attr.tick_size *= 10;
}
break;
case "range-float":
var params = attr.params.split(".."); // "2.4..8.75"
attr.min = parseFloat( params[0] );
attr.max = parseFloat( params[1] );
attr.step = "any";
attr.tick_size = 1;
while ((attr.max - attr.min) / attr.tick_size > 10 ){
attr.tick_size *= 10;
}
break;
case "list":
attr.options = attr.params.split(","); // "2,4,16"
break;
}
return attr;
}
/**
* Inserts an html <input> for the given user input attribute, plus a selector
* to change between MB and GB. The source attr is supposed to be in MB
* @param {object} attr structure as returned by parse
* @param {jQuery} div jQuery selector for the div to attach the html to
*/
function _insertAttributeInputMB(attr, div) {
// Modified input for GB
var attr_gb = $.extend({}, attr);
if (attr.type == "range"){
attr.tick_size = 1024;
}
delete attr_gb.initial;
attr_gb.wizard_field_disabled = true;
if (attr_gb.type == "range"){
attr_gb.type = "range-float";
attr_gb.min = Math.ceil((attr_gb.min / 1024));
attr_gb.max = Math.floor((attr_gb.max / 1024));
attr_gb.step = "1";
attr_gb.tick_size = 1;
} else if (attr_gb.type == "list"){
attr_gb.options = attr_gb.options.map(function(e){
return e / 1024;
});
} else if (attr_gb.type == "number"){
attr_gb.type = "number-float";
attr_gb.step = "1";
}
div.html(
'<div class="input-group mb_input_wrapper">'+
'<div class="mb_input input-group-field">' +
_attributeInput(attr) +
'</div>' +
'<div class="gb_input input-group-field">' +
_attributeInput(attr_gb) +
'</div>' +
'<div class="input-group-button">'+
'<select id="mb_input_unit" style="width: auto;">' +
'<option value="MB">'+Locale.tr("MB")+'</option>' +
'<option value="GB" selected>'+Locale.tr("GB")+'</option>' +
'</select>' +
'</div>'+
'</div>');
_setupAttributeInputMB(div);
// Update attr_gb with the value set in attr
$("input, select", $("div.mb_input", div)).trigger("input");
var input_val = $("input, select", $("div.mb_input", div)).val();
if (input_val == "" || (input_val >= 1024 && (input_val % 1024 == 0))){
$("#mb_input_unit", div).val("GB").change();
} else {
$("#mb_input_unit", div).val("MB").change();
}
}
function _setupAttributeInputMB(context) {
// MB to GB
$("div.mb_input", context).on("input", "input, select", function(){
var val = "";
if (this.value && this.value >= 0) {
val = this.value / 1024;
}
$("input, select", $("div.gb_input", context)).val(val);
});
// GB to MB
$("div.gb_input", context).on("input", "input, select", function(){
var val = "";
if (this.value && this.value >= 0) {
val = Math.floor(this.value * 1024);
}
$("input, select", $("div.mb_input", context)).val(val);
});
var gb_inputs = $("div.gb_input", context).children().detach();
// Unit select
$("#mb_input_unit", context).on('change', function() {
var mb_input_unit_val = $('#mb_input_unit :selected', context).val();
if (mb_input_unit_val == 'GB') {
$("div.mb_input", context).hide();
gb_inputs.appendTo($("div.gb_input", context));
$("div.mb_input input,select",context).trigger("input");
} else {
$("div.mb_input", context).show();
gb_inputs = $("div.gb_input", context).children().detach();
}
});
$("#mb_input_unit", context).change();
}
/**
* Returns an html <input> for the given user input attribute
* @param {object} attr structure as returned by parse
* @return {string} string containing an html <input> element
*/
function _attributeInput(attr) {
var input;
var required = (attr.mandatory ? "required" : "");
var wizard_field = 'wizard_field="' + TemplateUtils.htmlEncode(attr.name) + '"';
if (attr.wizard_field_disabled == true){
wizard_field = "";
}
var value = "";
if (attr.initial != undefined){
value = TemplateUtils.htmlEncode(attr.initial);
}
switch (attr.type) {
case "text":
input = '<textarea type="text" rows="1" '+wizard_field+' '+required+'>'+TemplateUtils.htmlEncode(value)+'</textarea>';
break;
case "text64":
try {
input = '<textarea type="text" rows="1" wizard_field_64="true" '+wizard_field+' '+required+'>'+TemplateUtils.htmlEncode(atob(value))+'</textarea>';
} catch(e){
console.error(e.message);
input = "<p>"+e.message+"</p>";
}
break;
case "password":
input = '<input type="password" value="'+value+'" '+wizard_field+' '+required+'/>';
break;
case "number":
case "number-float":
var min = attr.min != undefined ? 'min="'+attr.min+'"' : "";
var max = attr.max != undefined ? 'max="'+attr.max+'"' : "";
input = '<input type="number" step="'+attr.step+'" '+min+' '+max+' value="'+value+'" '+wizard_field+' '+required+'/>';
break;
case "range":
case "range-float":
input = RangeSlider.html(attr);
break;
case "list":
input = '<select '+wizard_field+' '+required+'>';
$.each(attr.options, function(){
var selected = (attr.initial == this);
input += '<option value="'+this+'" '+
(selected? 'selected' : '')+'>'+
this+
'</option>';
});
input += '</select>';
break;
case "fixed":
input = '<input type="text" value="'+value+'" '+wizard_field+' '+required+' disabled/>';
break;
}
return input;
}
/**
* Returns an html <input> for the given USER_INPUT attribute
* @param {string} name Template Attribute name, e.g. USER_PASSWORD
* @param {string} value Template Attribute value,
* e.g. "M|range|Description here|2..8|4"
* @return {string} string containing an html <input> element
*/
function _generateInputElement(name, value) {
var attrs = _parse(name, value);
return _attributeInput(attrs);
}
}); | var userInputsJSON = {};
$(".user_input_attrs tbody tr", context).each(function() {
if ($(".user_input_name", $(this)).val()) { |
compiler_worker.rs | // Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use crate::ops;
use crate::state::ThreadSafeState;
use crate::worker::Worker;
use deno_core;
use deno_core::StartupData;
use std::ops::Deref;
use std::ops::DerefMut;
/// This worker is used to host TypeScript and WASM compilers.
///
/// It provides minimal set of ops that are necessary to facilitate
/// compilation.
///
/// NOTE: This worker is considered priveleged, because it may
/// access file system without permission check.
///
/// At the moment this worker is meant to be single-use - after
/// performing single compilation/bundling it should be destroyed.
///
/// TODO(bartlomieju): add support to reuse the worker - or in other
/// words support stateful TS compiler
pub struct CompilerWorker(Worker);
impl CompilerWorker {
pub fn new(
name: String,
startup_data: StartupData,
state: ThreadSafeState,
) -> Self {
let state_ = state.clone();
let mut worker = Worker::new(name, startup_data, state_);
{
let isolate = &mut worker.isolate;
ops::runtime::init(isolate, &state);
ops::compiler::init(isolate, &state);
ops::web_worker::init(isolate, &state);
ops::errors::init(isolate, &state);
// for compatibility with Worker scope, though unused at
// the moment
ops::timers::init(isolate, &state);
ops::fetch::init(isolate, &state);
// TODO(bartlomieju): CompilerWorker should not
// depend on those ops
ops::os::init(isolate, &state);
ops::files::init(isolate, &state);
ops::fs::init(isolate, &state);
ops::io::init(isolate, &state);
}
Self(worker)
}
}
impl Deref for CompilerWorker {
type Target = Worker;
fn | (&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for CompilerWorker {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
| deref |
lib.rs | println!("Hello!");
}
#[cfg(config2)]
fn config2_hello() {
println!("Hello!");
}
#[test]
#[cfg(config1)]
fn it_works() {
config1_hello();
}
#[test]
#[cfg(config2)]
fn it_works() {
config2_hello();
} | #[cfg(config1)]
fn config1_hello() { |
|
download_apps.py | #!/usr/bin/python3
import os
import sys
import json
import zipfile
import shutil
from urllib.request import urlretrieve
cur_dir = os.path.dirname(os.path.realpath(__file__))
app_dir = os.path.join(cur_dir, 'opt/dist/app')
gluu_app_dir = os.path.join(cur_dir, 'opt/dist/gluu')
target = 'el7'
if '-el8' in sys.argv:
target = 'el8'
elif '-ub' in sys.argv:
target = 'ub'
app_versions = {
"JETTY_VERSION": "9.4.35.v20201120",
"AMAZON_CORRETTO_VERSION": "11-x64",
"OX_GITVERISON": ".Final",
"OX_VERSION": "4.4.0",
"JYTHON_VERSION": "2.7.2",
"NODE_VERSION": "v12.19.0",
"SETUP_BRANCH": "version_4.3.0",
"PASSPORT_NODE_VERSION": "4.3.0",
"TWILIO_VERSION": "7.17.0",
"JSMPP_VERSION": "2.3.7"
}
def download(url, target_fn):
if not target_fn.startswith('/'):
dst = os.path.join(cur_dir, target_fn)
else:
dst = target_fn
pardir, fn = os.path.split(dst)
if not os.path.exists(pardir):
os.makedirs(pardir)
print("Downloading", url, "to", dst)
urlretrieve(url, dst)
def package_oxd():
oxd_app_dir = os.path.join(cur_dir, 'tmp')
oxd_tgz_fn = os.path.join(oxd_app_dir, 'oxd-server.tgz')
oxd_zip_fn = os.path.join(oxd_app_dir, 'oxd-server.zip')
oxd_tmp_dir = os.path.join(oxd_app_dir, os.urandom(5).hex())
download('https://ox.gluu.org/maven/org/gluu/oxd-server/{0}{1}/oxd-server-{0}{1}-distribution.zip'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), 'tmp/oxd-server.zip')
os.mkdir(oxd_tmp_dir)
cmd = 'unzip -qqo {} -d {}/oxd-server'.format(oxd_zip_fn, oxd_tmp_dir)
print("Excuting", cmd)
os.system(cmd)
cmd = 'mkdir ' + os.path.join(oxd_tmp_dir, 'oxd-server/data')
print("Excuting", cmd)
os.system(cmd)
download('https://raw.githubusercontent.com/GluuFederation/oxd/master/debian/oxd-server', os.path.join(oxd_tmp_dir, 'oxd-server/bin/oxd-server'))
cmd = 'cd {}; tar -zcf {} oxd-server'.format(oxd_tmp_dir, oxd_tgz_fn)
print("Excuting", cmd)
os.system(cmd)
os.remove(oxd_zip_fn)
shutil.rmtree(oxd_tmp_dir)
shutil.copyfile(os.path.join(cur_dir, 'tmp/oxd-server.tgz'), os.path.join(gluu_app_dir, 'oxd-server.tgz'))
unit_files = ['casa.service', 'idp.service', 'oxauth-rp.service', 'oxd-server.service', 'scim.service', 'fido2.service', 'identity.service', 'opendj.service', 'oxauth.service', 'passport.service']
if not '-e' in sys.argv:
for uf in unit_files:
base_url = 'https://raw.githubusercontent.com/GluuFederation/community-edition-package/master/package/systemd/{}'
download(base_url.format(uf), 'etc/systemd/system/'+uf)
download('https://corretto.aws/downloads/latest/amazon-corretto-{0}-linux-jdk.tar.gz'.format(app_versions['AMAZON_CORRETTO_VERSION']), os.path.join(app_dir, 'amazon-corretto-{0}-linux-jdk.tar.gz'.format(app_versions['AMAZON_CORRETTO_VERSION'])))
download('https://repo1.maven.org/maven2/org/eclipse/jetty/jetty-distribution/{0}/jetty-distribution-{0}.tar.gz'.format(app_versions['JETTY_VERSION']), os.path.join(app_dir, 'jetty-distribution-{0}.tar.gz'.format(app_versions['JETTY_VERSION'])))
download('https://repo1.maven.org/maven2/org/python/jython-installer/{0}/jython-installer-{0}.jar'.format(app_versions['JYTHON_VERSION']), os.path.join(app_dir, 'jython-installer-{0}.jar'.format(app_versions['JYTHON_VERSION'])))
download('https://nodejs.org/dist/{0}/node-{0}-linux-x64.tar.xz'.format(app_versions['NODE_VERSION']), os.path.join(app_dir, 'node-{0}-linux-x64.tar.xz'.format(app_versions['NODE_VERSION'])))
download('https://github.com/npcole/npyscreen/archive/master.zip', os.path.join(app_dir, 'npyscreen-master.zip'))
download('https://ox.gluu.org/maven/org/gluufederation/opendj/opendj-server-legacy/4.0.0.gluu/opendj-server-legacy-4.0.0.gluu.zip', os.path.join(app_dir, 'opendj-server-4.0.0.zip'))
| download('https://ox.gluu.org/maven/org/gluu/oxauth-server/{0}{1}/oxauth-server-{0}{1}.war'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir, 'oxauth.war'))
download('https://ox.gluu.org/maven/org/gluu/oxtrust-server/{0}{1}/oxtrust-server-{0}{1}.war'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir,'identity.war'))
download('https://ox.gluu.org/maven/org/gluu/oxauth-client/{0}{1}/oxauth-client-{0}{1}-jar-with-dependencies.jar'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir,'oxauth-client-jar-with-dependencies.jar'))
download('https://ox.gluu.org/maven/org/gluu/oxShibbolethStatic/{0}{1}/oxShibbolethStatic-{0}{1}.jar'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir,'shibboleth-idp.jar'))
download('https://ox.gluu.org/maven/org/gluu/oxshibbolethIdp/{0}{1}/oxshibbolethIdp-{0}{1}.war'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir,'idp.war'))
download('https://ox.gluu.org/npm/passport/passport-{}.tgz'.format(app_versions['OX_VERSION']), os.path.join(gluu_app_dir, 'passport.tgz'))
download('https://ox.gluu.org/npm/passport/passport-version_{}-node_modules.tar.gz'.format(app_versions['PASSPORT_NODE_VERSION']), os.path.join(gluu_app_dir, 'passport-version_{}-node_modules.tar.gz'.format(app_versions['PASSPORT_NODE_VERSION'])))
download('https://ox.gluu.org/maven/org/gluu/super-gluu-radius-server/{0}{1}/super-gluu-radius-server-{0}{1}.jar'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir, 'super-gluu-radius-server.jar'))
download('https://ox.gluu.org/maven/org/gluu/super-gluu-radius-server/{0}{1}/super-gluu-radius-server-{0}{1}-distribution.zip'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir, 'gluu-radius-libs.zip'))
download('https://ox.gluu.org/maven/org/gluu/casa/{0}{1}/casa-{0}{1}.war'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir, 'casa.war'))
download('https://repo1.maven.org/maven2/com/twilio/sdk/twilio/{0}/twilio-{0}.jar'.format(app_versions['TWILIO_VERSION']), os.path.join(gluu_app_dir,'twilio-{0}.jar'.format(app_versions['TWILIO_VERSION'])))
download('https://repo1.maven.org/maven2/org/jsmpp/jsmpp/{0}/jsmpp-{0}.jar'.format(app_versions['JSMPP_VERSION']), os.path.join(gluu_app_dir, 'jsmpp-{0}.jar'.format(app_versions['JSMPP_VERSION'])))
download('https://github.com/GluuFederation/casa/raw/version_{}/extras/casa.pub'.format(app_versions['OX_VERSION']), 'etc/certs/casa.pub')
download('https://raw.githubusercontent.com/GluuFederation/gluu-snap/master/facter/facter', 'usr/bin/facter')
download('https://ox.gluu.org/maven/org/gluu/scim-server/{0}{1}/scim-server-{0}{1}.war'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir, 'scim.war'))
download('https://ox.gluu.org/maven/org/gluu/fido2-server/{0}{1}/fido2-server-{0}{1}.war'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir, 'fido2.war'))
download('https://ox.gluu.org/maven/org/gluu/oxauth-rp/{0}{1}/oxauth-rp-{0}{1}.war'.format(app_versions['OX_VERSION'], app_versions['OX_GITVERISON']), os.path.join(gluu_app_dir, 'oxauth-rp.war'))
download('https://github.com/GluuFederation/community-edition-setup/archive/{}.zip'.format(app_versions['SETUP_BRANCH']), os.path.join(gluu_app_dir, 'community-edition-setup.zip'))
download('https://raw.githubusercontent.com/GluuFederation/community-edition-setup/{}/install.py'.format(app_versions['SETUP_BRANCH']), 'opt/gluu/bin/install.py')
if target in ('el7', 'el8'):
download('https://repo.gluu.org/nochroot/python-libs/py3libs-{}.tgz'.format(target), 'tmp/usr.tgz')
package_oxd()
if '-x' in sys.argv:
download('https://raw.githubusercontent.com/GluuFederation/community-edition-package/master/ce-host/4.3.0/dependencies.sh'.format(), 'opt/gluu/bin/dependencies.sh')
download('https://raw.githubusercontent.com/GluuFederation/community-edition-package/master/ce-host/4.3.0/gluu-serverd'.format(), 'usr/sbin/gluu-serverd')
download('https://raw.githubusercontent.com/GluuFederation/community-edition-package/master/ce-host/4.3.0/gluu-server.sh'.format(), 'etc/profile.d/gluu-server.sh')
for app_bin in ('usr/bin/facter',
'opt/gluu/bin/install.py',
'opt/gluu/bin/dependencies.sh',
'usr/sbin/gluu-serverd',
'etc/profile.d/gluu-server.sh',
):
fn = os.path.join(cur_dir, app_bin)
if os.path.exists(fn):
os.chmod(fn, 33261)
if target in ('el7', 'el8'):
os.system('tar zxf {} -C {}'.format(os.path.join(cur_dir, 'tmp/usr.tgz'), cur_dir))
tmp_dir = os.path.join(cur_dir, 'tmp')
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
scripts_dir = os.path.join(cur_dir, 'opt/dist/scripts')
if not os.path.exists(scripts_dir):
os.makedirs(scripts_dir)
open(os.path.join(scripts_dir, '.dontremove'), 'w').close()
#./makeself.sh --tar-extra "--exclude=/opt/gluu-server-4.3.0-host/download_apps.py" --target / /opt/gluu-server-4.3.0-host gluu-server-4.3.0-host.sh "Gluu CE Package 4.3.0" /opt/gluu/bin/dependencies.sh | |
application.js | // This is a manifest file that'll be compiled into application.js, which will include all the files
// listed below.
//
// Any JavaScript/Coffee file within this directory, lib/assets/javascripts, vendor/assets/javascripts,
// or any plugin's vendor/assets/javascripts directory can be referenced here using a relative path.
//
// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
// compiled file.
//
// Read Sprockets README (https://github.com/rails/sprockets#sprockets-directives) for details
// about supported directives.
//
//= require Chart.min
//= require jquery
//= require bootstrap-sprockets
//= require jquery_ujs | import 'moment/moment' | //= require turbolinks
//= require_tree . |
test_series.py | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from io import StringIO
import pytest
from jinja2 import Template
from flexget.entry import Entry
from flexget.logger import capture_output
from flexget.manager import get_parser, Session
from flexget.task import TaskAbort
from flexget.components.series import db
def age_series(**kwargs):
import datetime
session = Session()
session.query(db.EpisodeRelease).update({'first_seen': datetime.datetime.now() - datetime.timedelta(**kwargs)})
session.commit()
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True)
def config(request):
"""Override and parametrize default config fixture for all series tests."""
newconfig = Template(request.cls.config).render({'parser': request.param})
# Make sure we remembered to put the section in config
assert request.cls.config != newconfig, 'config parameterization did nothing?'
return newconfig
class TestQuality(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
exact_quality:
mock:
- {title: 'QTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'QTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'QTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'QTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'QTest.S01E01.720p.XViD-FlexGet'}
series:
- QTest:
quality: 720p
quality_fail:
mock:
- {title: 'Q2Test.S01E01.HDTV.XViD-FlexGet'}
- {title: 'Q2Test.S01E01.PDTV.XViD-FlexGet'}
- {title: 'Q2Test.S01E01.DSR.XViD-FlexGet'}
series:
- Q2Test:
quality: 720p
min_quality:
mock:
- {title: 'MinQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.720p.XViD-FlexGet'}
series:
- MinQTest:
quality: ">720p"
max_quality:
mock:
- {title: 'MaxQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.720p.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.720p.bluray-FlexGet'}
series:
- MaxQTest:
quality: "<720p <=HDTV"
min_max_quality:
mock:
- {title: 'MinMaxQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.720p.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.HR.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.1080p.XViD-FlexGet'}
series:
- MinMaxQTest:
quality: 480p-hr
max_unknown_quality:
mock:
- {title: 'MaxUnknownQTest.S01E01.XViD-FlexGet'}
series:
- MaxUnknownQTest:
quality: "<=hdtv"
quality_from_group:
mock:
- {title: 'GroupQual.S01E01.HDTV.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.PDTV.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.DSR.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.1080p.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.720p.XViD-FlexGet'}
- {title: 'Other.S01E01.hdtv.dd5.1.XViD-FlexGet'}
- {title: 'Other.S01E01.720p.hdtv.XViD-FlexGet'}
series:
720P:
- GroupQual
# Test that an integer group name doesn't cause an exception.
1080:
- Test
hdtv <hr !dd5.1:
- Other
quality_in_series_name:
mock:
- title: my 720p show S01E01
- title: my 720p show S01E02 720p
series:
- my 720p show:
quality: '<720p'
"""
def test_exact_quality(self, execute_task):
"""Series plugin: choose by quality"""
task = execute_task('exact_quality')
assert task.find_entry('accepted', title='QTest.S01E01.720p.XViD-FlexGet'), \
'720p should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_quality_fail(self, execute_task):
task = execute_task('quality_fail')
assert not task.accepted, 'No qualities should have matched'
def test_min_quality(self, execute_task):
"""Series plugin: min_quality"""
task = execute_task('min_quality')
assert task.find_entry('accepted', title='MinQTest.S01E01.1080p.XViD-FlexGet'), \
'MinQTest.S01E01.1080p.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_max_quality(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('max_quality')
assert task.find_entry('accepted', title='MaxQTest.S01E01.HDTV.XViD-FlexGet'), \
'MaxQTest.S01E01.HDTV.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_min_max_quality(self, execute_task):
"""Series plugin: min_quality with max_quality"""
task = execute_task('min_max_quality')
assert task.find_entry('accepted', title='MinMaxQTest.S01E01.HR.XViD-FlexGet'), \
'MinMaxQTest.S01E01.HR.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_max_unknown_quality(self, execute_task):
"""Series plugin: max quality with unknown quality"""
task = execute_task('max_unknown_quality')
assert len(task.accepted) == 1, 'should have accepted'
def test_group_quality(self, execute_task):
"""Series plugin: quality from group name"""
task = execute_task('quality_from_group')
assert task.find_entry('accepted', title='GroupQual.S01E01.720p.XViD-FlexGet'), \
'GroupQual.S01E01.720p.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one (no entries should pass for series `other`'
def test_quality_in_series_name(self, execute_task):
"""Make sure quality in title does not get parsed as quality"""
task = execute_task('quality_in_series_name')
assert task.find_entry('accepted', title='my 720p show S01E01'), \
'quality in title should not have been parsed'
assert len(task.accepted) == 1, 'should not have accepted 720p entry'
class TestDatabase(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- some series
- progress
tasks:
test_1:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-FlexGet'}
test_2:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-DoppelGanger'}
progress_1:
mock:
- {title: 'Progress.S01E20.720p-FlexGet'}
- {title: 'Progress.S01E20.HDTV-FlexGet'}
progress_2:
mock:
- {title: 'Progress.S01E20.720p.Another-FlexGet'}
- {title: 'Progress.S01E20.HDTV-Another-FlexGet'}
"""
def test_database(self, execute_task):
"""Series plugin: simple database"""
task = execute_task('test_1')
task = execute_task('test_2')
assert task.find_entry('rejected', title='Some.Series.S01E20.720p.XViD-DoppelGanger'), \
'failed basic download remembering'
def test_doppelgangers(self, execute_task):
"""Series plugin: doppelganger releases (dupes)"""
task = execute_task('progress_1')
assert task.find_entry('accepted', title='Progress.S01E20.720p-FlexGet'), \
'best quality not accepted'
# should not accept anything
task = execute_task('progress_1')
assert not task.accepted, 'repeated execution accepted'
# introduce new doppelgangers
task = execute_task('progress_2')
assert not task.accepted, 'doppelgangers accepted'
class TestFilterSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-FlexGet'}
- {title: 'Another.Series.S01E20.720p.XViD-FlexGet'}
- {title: 'Another.Series.S01E21.1080p.H264-FlexGet'}
- {title: 'Date.Series.10-11-2008.XViD'}
- {title: 'Date.Series.10.12.2008.XViD'}
- {title: 'Date.Series.2008-10-13.XViD'}
- {title: 'Date.Series.10.14.09.XViD'}
- {title: 'Date Series 2010 11 17 XViD'}
- {title: 'Useless title', filename: 'Filename.Series.S01E26.XViD'}
- {title: 'Empty.Description.S01E22.XViD', description: ''}
# test chaining
regexp:
reject:
- 1080p
series:
- another series
- date series
- filename series
- empty description
- (some) series
metainfo_series_override:
metainfo_series: yes
mock:
- {title: 'Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet'}
- {title: 'Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet'}
series:
- Test Series
test_all_series_mode:
mock:
- {title: 'Test.Series.S01E02.PDTV.XViD-FlexGet'}
- {title: 'Test Series - 1x03 - PDTV XViD-FlexGet'}
- {title: 'Other.Show.S02E01.PDTV.XViD-FlexGet'}
- {title: 'other show season 2 episode 2'}
- {title: 'Date.Show.03-29-2012.HDTV.XViD-FlexGet'}
all_series: yes
test_alternate_name:
mock:
- title: The.Show.S01E01
- title: Other.Name.S01E02
- title: many.names.S01E01
- title: name.1.S01E02
- title: name.2.S01E03
- title: paren.title.2013.S01E01
series:
- The Show:
alternate_name: Other Name
- many names:
alternate_name:
- name 1
- name 2
- paren title (US):
alternate_name: paren title 2013
test_input_order_preserved:
series:
- Some Show
"""
def test_smoke(self, execute_task):
"""Series plugin: test several standard features"""
task = execute_task('test')
# normal passing
assert task.find_entry(title='Another.Series.S01E20.720p.XViD-FlexGet'), \
'Another.Series.S01E20.720p.XViD-FlexGet should have passed'
# series with brackets
assert task.find_entry('accepted', title='Some.Series.S01E20.720p.XViD-FlexGet'), \
'Some.Series.S01E20.720p.XViD-FlexGet should have been accepted'
# date formats
df = ['Date.Series.10-11-2008.XViD', 'Date.Series.10.12.2008.XViD', 'Date Series 2010 11 17 XViD',
'Date.Series.2008-10-13.XViD', 'Date.Series.10.14.09.XViD']
for d in df:
entry = task.find_entry(title=d)
assert entry, 'Date format did not match %s' % d
assert 'series_parser' in entry, 'series_parser missing from %s' % d
assert entry['series_parser'].id_type == 'date', '%s did not return three groups for dates' % d
# parse from filename
assert task.find_entry(filename='Filename.Series.S01E26.XViD'), 'Filename parsing failed'
# empty description
assert task.find_entry(title='Empty.Description.S01E22.XViD'), 'Empty Description failed'
# chaining with regexp plugin
assert task.find_entry('rejected', title='Another.Series.S01E21.1080p.H264-FlexGet'), \
'regexp chaining'
def test_metainfo_series_override(self, execute_task):
"""Series plugin: override metainfo_series"""
task = execute_task('metainfo_series_override')
# Make sure the metainfo_series plugin is working first
entry = task.find_entry('entries', title='Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet')
assert entry['series_guessed'], 'series should have been guessed'
assert entry['series_name'] == entry['series_parser'].name == 'Other Show With Extra Crap', \
'metainfo_series is not running'
# Make sure the good series data overrode metainfo data for the listed series
entry = task.find_entry('accepted', title='Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet')
assert not entry.get('series_guessed'), 'series plugin should override series_guessed'
assert entry['series_name'] == entry['series_parser'].name == 'Test Series', \
'Series name should be \'Test Series\', was: entry: %s, parser: %s' % (
entry['series_name'], entry['series_parser'].name)
def test_all_series_mode(self, execute_task):
"""Series plugin: test all option"""
task = execute_task('test_all_series_mode')
assert task.find_entry('accepted', title='Test.Series.S01E02.PDTV.XViD-FlexGet')
task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet')
entry = task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet')
assert entry
assert entry.get('series_name') == 'Test Series'
entry = task.find_entry('accepted', title='Other.Show.S02E01.PDTV.XViD-FlexGet')
assert entry.get('series_guessed')
entry2 = task.find_entry('accepted', title='other show season 2 episode 2')
# Make sure case is normalized so series are marked with the same name no matter the case in the title
assert entry.get('series_name') == entry2.get(
'series_name') == 'Other Show', 'Series names should be in title case'
entry = task.find_entry('accepted', title='Date.Show.03-29-2012.HDTV.XViD-FlexGet')
assert entry.get('series_guessed')
assert entry.get('series_name') == 'Date Show'
def test_alternate_name(self, execute_task):
task = execute_task('test_alternate_name')
assert all(e.accepted for e in task.all_entries), 'All releases should have matched a show'
@pytest.mark.parametrize('reverse', [False, True])
def test_input_order_preserved(self, manager, execute_task, reverse):
"""If multiple versions of an episode are acceptable, make sure the first one is accepted."""
entries = [
Entry(title='Some Show S01E01 720p proper', url='http://a'),
Entry(title='Some Show S01E01 1080p', url='http://b')
]
if reverse:
entries.reverse()
task = execute_task('test_input_order_preserved', options={'inject': entries})
assert task.accepted[0] == entries[0], 'first entry should have been accepted'
class TestEpisodeAdvancement(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_backwards_1:
mock:
- {title: 'backwards s02e12'}
- {title: 'backwards s02e10'}
series:
- backwards
test_backwards_2:
mock:
- {title: 'backwards s02e01'}
series:
- backwards
test_backwards_3:
mock:
- {title: 'backwards s01e01'}
series:
- backwards
test_backwards_okay_1:
mock:
- {title: 'backwards s01e02'}
series:
- backwards:
tracking: backfill
test_backwards_okay_2:
mock:
- {title: 'backwards s01e03'}
series:
- backwards:
tracking: no
test_forwards_1:
mock:
- {title: 'forwards s01e01'}
series:
- forwards
test_forwards_2:
mock:
- {title: 'forwards s02e01'}
series:
- forwards
test_forwards_3:
mock:
- {title: 'forwards s03e01'}
series:
- forwards
test_forwards_4:
mock:
- {title: 'forwards s04e02'}
series:
- forwards
test_forwards_5:
mock:
- {title: 'forwards s05e01'}
series:
- forwards
test_forwards_okay_1:
mock:
- {title: 'forwards s05e01'}
series:
- forwards:
tracking: no
test_unordered:
mock:
- {title: 'zzz s01e05'}
- {title: 'zzz s01e06'}
- {title: 'zzz s01e07'}
- {title: 'zzz s01e08'}
- {title: 'zzz s01e09'}
- {title: 'zzz s01e10'}
- {title: 'zzz s01e15'}
- {title: 'zzz s01e14'}
- {title: 'zzz s01e13'}
- {title: 'zzz s01e12'}
- {title: 'zzz s01e11'}
- {title: 'zzz s01e01'}
series:
- zzz
test_seq1:
mock:
- title: seq 05
series:
- seq
test_seq2:
mock:
- title: seq 06
series:
- seq
test_seq3:
mock:
- title: seq 10
series:
- seq
test_seq4:
mock:
- title: seq 01
series:
- seq
"""
def test_backwards(self, execute_task):
"""Series plugin: episode advancement (backwards)"""
task = execute_task('test_backwards_1')
assert task.find_entry('accepted', title='backwards s02e12'), \
'backwards s02e12 should have been accepted'
assert task.find_entry('accepted', title='backwards s02e10'), \
'backwards s02e10 should have been accepted within grace margin'
task = execute_task('test_backwards_2')
assert task.find_entry('accepted', title='backwards s02e01'), \
'backwards s02e01 should have been accepted, in current season'
task = execute_task('test_backwards_3')
assert task.find_entry('rejected', title='backwards s01e01'), \
'backwards s01e01 should have been rejected, in previous season'
task = execute_task('test_backwards_okay_1')
assert task.find_entry('accepted', title='backwards s01e02'), \
'backwards s01e01 should have been accepted, backfill enabled'
task = execute_task('test_backwards_okay_2')
assert task.find_entry('accepted', title='backwards s01e03'), \
'backwards s01e01 should have been accepted, tracking off'
def test_forwards(self, execute_task):
"""Series plugin: episode advancement (future)"""
task = execute_task('test_forwards_1')
assert task.find_entry('accepted', title='forwards s01e01'), \
'forwards s01e01 should have been accepted'
task = execute_task('test_forwards_2')
assert task.find_entry('accepted', title='forwards s02e01'), \
'forwards s02e01 should have been accepted'
task = execute_task('test_forwards_3')
assert task.find_entry('accepted', title='forwards s03e01'), \
'forwards s03e01 should have been accepted'
task = execute_task('test_forwards_4')
assert task.find_entry('rejected', title='forwards s04e02'), \
'forwards s04e02 should have been rejected'
task = execute_task('test_forwards_5')
assert task.find_entry('rejected', title='forwards s05e01'), \
'forwards s05e01 should have been rejected'
task = execute_task('test_forwards_okay_1')
assert task.find_entry('accepted', title='forwards s05e01'), \
'forwards s05e01 should have been accepted with tracking turned off'
def test_unordered(self, execute_task):
"""Series plugin: unordered episode advancement"""
task = execute_task('test_unordered')
assert len(task.accepted) == 12, \
'not everyone was accepted'
def test_sequence(self, execute_task):
# First should be accepted
task = execute_task('test_seq1')
entry = task.find_entry('accepted', title='seq 05')
assert entry['series_id'] == 5
# Next in sequence should be accepted
task = execute_task('test_seq2')
entry = task.find_entry('accepted', title='seq 06')
assert entry['series_id'] == 6
# Should be too far in the future
task = execute_task('test_seq3')
entry = task.find_entry(title='seq 10')
assert entry not in task.accepted, 'Should have been too far in future'
# Should be too far in the past
task = execute_task('test_seq4')
entry = task.find_entry(title='seq 01')
assert entry not in task.accepted, 'Should have been too far in the past'
class TestFilterSeriesPriority(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'foobar 720p s01e01'}
- {title: 'foobar hdtv s01e01'}
regexp:
reject:
- 720p
series:
- foobar
"""
def test_priorities(self, execute_task):
"""Series plugin: regexp plugin is able to reject before series plugin"""
task = execute_task('test')
assert task.find_entry('rejected', title='foobar 720p s01e01'), \
'foobar 720p s01e01 should have been rejected'
assert task.find_entry('accepted', title='foobar hdtv s01e01'), \
'foobar hdtv s01e01 is not accepted'
class TestPropers(object):
config = """
templates:
global:
parsing:
series: {{parser}}
# prevents seen from rejecting on second execution,
# we want to see that series is able to reject
disable: builtins
series:
- test
- foobar
- asfd:
quality: HR-1080p
- V
- tftest:
propers: 3 hours
- notest:
propers: no
tasks:
propers_1:
mock:
- {title: 'Test.S01E01.720p-FlexGet'}
# introduce proper, should be accepted
propers_2:
mock:
- {title: 'Test.S01E01.720p.Proper-FlexGet'}
# introduce non-proper, should not be downloaded
propers_3:
mock:
- {title: 'Test.S01E01.FlexGet'}
# introduce proper at the same time, should nuke non-proper and get proper
proper_at_first:
mock:
- {title: 'Foobar.S01E01.720p.FlexGet'}
- {title: 'Foobar.S01E01.720p.proper.FlexGet'}
# test a lot of propers at once
lot_propers:
mock:
- {title: 'V.2009.S01E01.PROPER.HDTV.A'}
- {title: 'V.2009.S01E01.PROPER.HDTV.B'}
- {title: 'V.2009.S01E01.PROPER.HDTV.C'}
diff_quality_1:
mock:
- {title: 'Test.S01E02.720p-FlexGet'}
# low quality proper, should not be accepted
diff_quality_2:
mock:
- {title: 'Test.S01E02.HDTV.Proper-FlexGet'}
# min + max quality with propers
min_max_quality_1:
mock:
- {title: 'asfd.S01E01.720p-FlexGet'}
min_max_quality_2:
mock:
- {title: 'asfd.S01E01.720p.Proper-FlexGet'}
proper_timeframe_1:
mock:
- {title: 'TFTest.S01E01.720p-FlexGet'}
proper_timeframe_2:
mock:
- {title: 'TFTest.S01E01.720p.proper-FlexGet'}
no_propers_1:
mock:
- {title: 'NoTest.S01E01.720p-FlexGet'}
no_propers_2:
mock:
- {title: 'NoTest.S01E01.720p.proper-FlexGet'}
proper_upgrade_1:
mock:
- {title: 'Test.S02E01.hdtv.proper'}
proper_upgrade_2:
mock:
- {title: 'Test.S02E01.hdtv.real.proper'}
anime_proper_1:
mock:
- title: test 04v0 hdtv
anime_proper_2:
mock:
- title: test 04 hdtv
fastsub_proper_1:
mock:
- title: test s01e01 Fastsub hdtv
fastsub_proper_2:
mock:
- title: test s01e01 Fastsub repack hdtv
fastsub_proper_3:
mock:
- title: test s01e01 hdtv
fastsub_proper_4:
mock:
- title: test s01e01 proper hdtv
"""
def test_propers_timeframe(self, execute_task):
"""Series plugin: propers timeframe"""
task = execute_task('proper_timeframe_1')
assert task.find_entry('accepted', title='TFTest.S01E01.720p-FlexGet'), \
'Did not accept before timeframe'
# let 6 hours pass
age_series(hours=6)
task = execute_task('proper_timeframe_2')
assert task.find_entry('rejected', title='TFTest.S01E01.720p.proper-FlexGet'), \
'Did not reject after proper timeframe'
def test_no_propers(self, execute_task):
"""Series plugin: no propers at all"""
task = execute_task('no_propers_1')
assert len(task.accepted) == 1, 'broken badly'
task = execute_task('no_propers_2')
assert len(task.rejected) == 1, 'accepted proper'
def test_min_max_propers(self, execute_task):
"""Series plugin: min max propers"""
task = execute_task('min_max_quality_1')
assert len(task.accepted) == 1, 'uhh, broken badly'
task = execute_task('min_max_quality_2')
assert len(task.accepted) == 1, 'should have accepted proper'
def test_lot_propers(self, execute_task):
"""Series plugin: proper flood"""
task = execute_task('lot_propers')
assert len(task.accepted) == 1, 'should have accepted (only) one of the propers'
def test_diff_quality_propers(self, execute_task):
"""Series plugin: proper in different/wrong quality"""
task = execute_task('diff_quality_1')
assert len(task.accepted) == 1
task = execute_task('diff_quality_2')
assert len(task.accepted) == 0, 'should not have accepted lower quality proper'
def test_propers(self, execute_task):
"""Series plugin: proper accepted after episode is downloaded"""
# start with normal download ...
task = execute_task('propers_1')
assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \
'Test.S01E01-FlexGet should have been accepted'
# rejects downloaded
task = execute_task('propers_1')
assert task.find_entry('rejected', title='Test.S01E01.720p-FlexGet'), \
'Test.S01E01-FlexGet should have been rejected'
# accepts proper
task = execute_task('propers_2')
assert task.find_entry('accepted', title='Test.S01E01.720p.Proper-FlexGet'), \
'new undownloaded proper should have been accepted'
# reject downloaded proper
task = execute_task('propers_2')
assert task.find_entry('rejected', title='Test.S01E01.720p.Proper-FlexGet'), \
'downloaded proper should have been rejected'
# reject episode that has been downloaded normally and with proper
task = execute_task('propers_3')
assert task.find_entry('rejected', title='Test.S01E01.FlexGet'), \
'Test.S01E01.FlexGet should have been rejected'
def test_proper_available(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('proper_at_first')
assert task.find_entry('accepted', title='Foobar.S01E01.720p.proper.FlexGet'), \
'Foobar.S01E01.720p.proper.FlexGet should have been accepted'
def test_proper_upgrade(self, execute_task):
"""Series plugin: real proper after proper"""
task = execute_task('proper_upgrade_1')
assert task.find_entry('accepted', title='Test.S02E01.hdtv.proper')
task = execute_task('proper_upgrade_2')
assert task.find_entry('accepted', title='Test.S02E01.hdtv.real.proper')
def test_anime_proper(self, execute_task):
task = execute_task('anime_proper_1')
assert task.accepted, 'ep should have accepted'
task = execute_task('anime_proper_2')
assert task.accepted, 'proper ep should have been accepted'
def test_fastsub_proper(self, execute_task):
task = execute_task('fastsub_proper_1')
assert task.accepted, 'ep should have accepted'
task = execute_task('fastsub_proper_2')
assert task.accepted, 'proper ep should have been accepted'
task = execute_task('fastsub_proper_3')
assert task.accepted, 'proper ep should have been accepted'
task = execute_task('fastsub_proper_4')
assert task.accepted, 'proper ep should have been accepted'
class TestSimilarNames(object):
# hmm, not very good way to test this .. seriesparser should be tested alone?
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'FooBar.S03E01.DSR-FlexGet'}
- {title: 'FooBar: FirstAlt.S02E01.DSR-FlexGet'}
- {title: 'FooBar: SecondAlt.S01E01.DSR-FlexGet'}
series:
- FooBar
- 'FooBar: FirstAlt'
- 'FooBar: SecondAlt'
test_ambiguous:
mock:
- title: Foo.2.2
series:
- Foo:
identified_by: sequence
- Foo 2:
identified_by: sequence
"""
def test_names(self, execute_task):
"""Series plugin: similar namings"""
task = execute_task('test')
assert task.find_entry('accepted', title='FooBar.S03E01.DSR-FlexGet'), 'Standard failed?'
assert task.find_entry('accepted', title='FooBar: FirstAlt.S02E01.DSR-FlexGet'), 'FirstAlt failed'
assert task.find_entry('accepted', title='FooBar: SecondAlt.S01E01.DSR-FlexGet'), 'SecondAlt failed'
def test_ambiguous(self, execute_task):
task = execute_task('test_ambiguous')
# In the event of ambiguous match, more specific one should be chosen
assert task.find_entry('accepted', title='Foo.2.2')['series_name'] == 'Foo 2'
class TestDuplicates(object):
config = """
templates:
global:
parsing:
series: {{parser}}
# just cleans log a bit ..
disable:
- seen
tasks:
test_dupes:
mock:
- {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[FlexGet]'}
- {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[ASDF]'}
series:
- Foo 2009
test_1:
mock:
- {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[FlexGet]'}
- {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[ASDF]'}
series:
- foo bar
test_2:
mock:
- {title: 'Foo.Bar.S02E04.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]'}
- {title: 'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E03.HDTV.XviD-FlexGet'}
- {title: 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY'}
series:
- foo bar
test_true_dupes:
mock:
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
series:
- dupe
"""
def test_dupes(self, execute_task):
"""Series plugin: dupes with same quality"""
task = execute_task('test_dupes')
assert len(task.accepted) == 1, 'accepted both'
def test_true_dupes(self, execute_task):
"""Series plugin: true duplicate items"""
task = execute_task('test_true_dupes')
assert len(task.accepted) == 1, 'should have accepted (only) one'
def test_downloaded(self, execute_task):
"""Series plugin: multiple downloaded and new episodes are handled correctly"""
task = execute_task('test_1')
task = execute_task('test_2')
# these should be accepted
accepted = ['Foo.Bar.S02E03.HDTV.XviD-FlexGet', 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY']
for item in accepted:
assert task.find_entry('accepted', title=item), \
'%s should have been accepted' % item
# these should be rejected
rejected = ['Foo.Bar.S02E04.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]',
'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]']
for item in rejected:
assert task.find_entry('rejected', title=item), \
'%s should have been rejected' % item
class TestQualities(object):
config = """
templates:
global:
parsing:
series: {{parser}}
disable: builtins
series:
- FooBar:
qualities:
- SDTV
- 720p
- 1080p
- FooBaz:
upgrade: yes
qualities:
- hdtv
- hr
- 720p
- FooBum:
quality: 720p-1080i
upgrade: yes
- FooD:
target: 720p
timeframe: 0 hours
upgrade: yes
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
- {title: 'FooBar.S01E01.1080p-FlexGet'}
- {title: 'FooBar.S01E01.HR-FlexGet'}
test_2:
mock:
- {title: 'FooBar.S01E01.720p-FlexGet'}
propers_1:
mock:
- {title: 'FooBar.S01E02.720p-FlexGet'}
propers_2:
mock:
- {title: 'FooBar.S01E02.720p.Proper-FlexGet'}
upgrade_1:
mock:
- {title: 'FooBaz.S01E02.pdtv-FlexGet'}
- {title: 'FooBaz.S01E02.HR-FlexGet'}
upgrade_2:
mock:
- {title: 'FooBaz.S01E02.720p-FlexGet'}
- {title: 'FooBaz.S01E02.1080p-FlexGet'}
upgrade_3:
mock:
- {title: 'FooBaz.S01E02.hdtv-FlexGet'}
- {title: 'FooBaz.S01E02.720p rc-FlexGet'}
quality_upgrade_1:
mock:
- title: FooBum.S03E01.1080p # too high
- title: FooBum.S03E01.hdtv # too low
- title: FooBum.S03E01.720p # in range
quality_upgrade_2:
mock:
- title: FooBum.S03E01.1080i # should be upgraded to
- title: FooBum.S03E01.720p-ver2 # Duplicate ep
target_1:
mock:
- title: Food.S06E11.hdtv
target_2:
mock:
- title: Food.S06E11.1080p
- title: Food.S06E11.720p
"""
def test_qualities(self, execute_task):
"""Series plugin: qualities"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet'), \
'Didn''t accept FooBar.S01E01.PDTV-FlexGet'
assert task.find_entry('accepted', title='FooBar.S01E01.1080p-FlexGet'), \
'Didn''t accept FooBar.S01E01.1080p-FlexGet'
assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \
'Accepted FooBar.S01E01.HR-FlexGet'
task = execute_task('test_2')
assert task.find_entry('accepted', title='FooBar.S01E01.720p-FlexGet'), \
'Didn''t accept FooBar.S01E01.720p-FlexGet'
# test that it rejects them afterwards
task = execute_task('test_1')
assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-FlexGet'), \
'Didn\'t reject FooBar.S01E01.PDTV-FlexGet'
assert task.find_entry('rejected', title='FooBar.S01E01.1080p-FlexGet'), \
'Didn\'t reject FooBar.S01E01.1080p-FlexGet'
assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \
'Accepted FooBar.S01E01.HR-FlexGet'
def test_propers(self, execute_task):
"""Series plugin: qualities + propers"""
task = execute_task('propers_1')
assert task.accepted
task = execute_task('propers_2')
assert task.accepted, 'proper not accepted'
task = execute_task('propers_2')
assert not task.accepted, 'proper accepted again'
def test_qualities_upgrade(self, execute_task):
task = execute_task('upgrade_1')
assert task.find_entry('accepted', title='FooBaz.S01E02.HR-FlexGet'), 'HR quality should be accepted'
assert len(task.accepted) == 1, 'Only best quality should be accepted'
task = execute_task('upgrade_2')
assert task.find_entry('accepted', title='FooBaz.S01E02.720p-FlexGet'), '720p quality should be accepted'
assert len(task.accepted) == 1, 'Only best quality should be accepted'
task = execute_task('upgrade_3')
assert not task.accepted, 'Should not have accepted worse qualities'
def test_quality_upgrade(self, execute_task):
task = execute_task('quality_upgrade_1')
assert len(task.accepted) == 1, 'Only one ep should have passed quality filter'
assert task.find_entry('accepted', title='FooBum.S03E01.720p')
task = execute_task('quality_upgrade_2')
assert len(task.accepted) == 1, 'one ep should be valid upgrade'
assert task.find_entry('accepted', title='FooBum.S03E01.1080i')
def test_target_upgrade(self, execute_task):
task = execute_task('target_1')
assert len(task.accepted) == 1, 'Only one ep should have been grabbed'
assert task.find_entry('accepted', title='Food.S06E11.hdtv')
task = execute_task('target_2')
assert len(task.accepted) == 1, 'one ep should be valid upgrade'
assert task.find_entry('accepted', title='Food.S06E11.720p'), 'Should upgrade to `target`'
class TestIdioticNumbering(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar:
identified_by: ep
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
test_2:
mock:
- {title: 'FooBar.102.PDTV-FlexGet'}
"""
def test_idiotic(self, execute_task):
"""Series plugin: idiotic numbering scheme"""
task = execute_task('test_1')
task = execute_task('test_2')
entry = task.find_entry(title='FooBar.102.PDTV-FlexGet')
assert entry, 'entry not found?'
assert entry['series_season'] == 1, 'season not detected'
assert entry['series_episode'] == 2, 'episode not detected'
class TestNormalization(object):
config = """
templates:
global:
parsing:
series: {{parser}}
disable: [seen]
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
series:
- FOOBAR
test_2:
mock:
- {title: 'FooBar.S01E01.PDTV-aoeu'}
series:
- foobar
test_3:
mock:
- title: Foo bar & co 2012.s01e01.sdtv.a
series:
- foo bar & co 2012
test_4:
mock:
- title: Foo bar & co 2012.s01e01.sdtv.b
series:
- Foo/Bar and Co. (2012)
"""
def test_capitalization(self, execute_task):
"""Series plugin: configuration capitalization"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet')
task = execute_task('test_2')
assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-aoeu')
def test_normalization(self, execute_task):
task = execute_task('test_3')
assert task.find_entry('accepted', title='Foo bar & co 2012.s01e01.sdtv.a')
task = execute_task('test_4')
assert task.find_entry('rejected', title='Foo bar & co 2012.s01e01.sdtv.b')
class TestMixedNumbering(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar:
identified_by: ep
tasks:
test_1:
mock:
- {title: 'FooBar.S03E07.PDTV-FlexGet'}
test_2:
mock:
- {title: 'FooBar.0307.PDTV-FlexGet'}
"""
def test_mixednumbering(self, execute_task):
"""Series plugin: Mixed series numbering"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S03E07.PDTV-FlexGet')
task = execute_task('test_2')
assert task.find_entry('rejected', title='FooBar.0307.PDTV-FlexGet')
class TestExact(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
auto:
mock:
- {title: 'ABC.MIAMI.S01E01.PDTV-FlexGet'}
- {title: 'ABC.S01E01.PDTV-FlexGet'}
- {title: 'ABC.LA.S01E01.PDTV-FlexGet'}
series:
- ABC
- ABC LA
- ABC Miami
name_regexp:
mock:
- title: show s09e05 hdtv
- title: show a s09e06 hdtv
series:
- show:
name_regexp: ^show
exact: yes
date:
mock:
- title: date show 04.01.2011 hdtv
- title: date show b 04.02.2011 hdtv
series:
- date show:
exact: yes
"""
def test_auto(self, execute_task):
"""Series plugin: auto enable exact"""
task = execute_task('auto')
assert task.find_entry('accepted', title='ABC.S01E01.PDTV-FlexGet')
assert task.find_entry('accepted', title='ABC.LA.S01E01.PDTV-FlexGet')
assert task.find_entry('accepted', title='ABC.MIAMI.S01E01.PDTV-FlexGet')
def test_with_name_regexp(self, execute_task):
task = execute_task('name_regexp')
assert task.find_entry('accepted', title='show s09e05 hdtv')
assert not task.find_entry('accepted', title='show a s09e06 hdtv')
def test_dated_show(self, execute_task):
task = execute_task('date')
assert task.find_entry('accepted', title='date show 04.01.2011 hdtv')
assert not task.find_entry('accepted', title='date show b 04.02.2011 hdtv')
class TestTimeframe(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- test:
timeframe: 5 hours
target: 720p
tasks:
test_no_waiting:
mock:
- {title: 'Test.S01E01.720p-FlexGet'}
test_stop_waiting_1:
mock:
- {title: 'Test.S01E02.HDTV-FlexGet'}
test_stop_waiting_2:
mock:
- {title: 'Test.S01E02.720p-FlexGet'}
test_proper_afterwards:
mock:
- {title: 'Test.S01E02.720p.Proper-FlexGet'}
test_expires:
mock:
- {title: 'Test.S01E03.pdtv-FlexGet'}
test_min_max_fail:
series:
- mm test:
timeframe: 5 hours
target: 720p
quality: hdtv+ <=720p
mock:
- {title: 'MM Test.S01E02.pdtv-FlexGet'}
- {title: 'MM Test.S01E02.1080p-FlexGet'}
test_min_max_pass:
series:
- mm test:
timeframe: 5 hours
target: 720p
quality: hdtv+ <=720p
mock:
- {title: 'MM Test.S01E02.pdtv-FlexGet'}
- {title: 'MM Test.S01E02.hdtv-FlexGet'}
- {title: 'MM Test.S01E02.1080p-FlexGet'}
test_qualities_fail:
series:
- q test:
timeframe: 5 hours
qualities:
- hdtv
- 1080p
mock:
- {title: 'Q Test.S01E02.pdtv-FlexGet'}
- {title: 'Q Test.S01E02.1080p-FlexGet'}
test_qualities_pass:
series:
- q test:
timeframe: 5 hours
qualities:
- sdtv
- 720p
mock:
- {title: 'Q Test.S01E02.1080p-FlexGet'}
test_with_quality_1:
series:
- q test:
timeframe: 5 hours
quality: hdtv+
target: 720p
mock:
- title: q test s01e01 pdtv 720p
test_with_quality_2:
series:
- q test:
timeframe: 5 hours
quality: hdtv+
target: 720p
mock:
- title: q test s01e01 hdtv
"""
def test_no_waiting(self, execute_task):
"""Series plugin: no timeframe waiting needed"""
task = execute_task('test_no_waiting')
assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \
'720p not accepted immediattely'
def test_stop_waiting(self, execute_task):
"""Series plugin: timeframe quality appears, stop waiting, proper appears"""
task = execute_task('test_stop_waiting_1')
assert task.entries and not task.accepted
task = execute_task('test_stop_waiting_2')
assert task.find_entry('accepted', title='Test.S01E02.720p-FlexGet'), \
'720p should have caused stop waiting'
task = execute_task('test_proper_afterwards')
assert task.find_entry('accepted', title='Test.S01E02.720p.Proper-FlexGet'), \
'proper should have been accepted'
def test_expires(self, execute_task):
"""Series plugin: timeframe expires"""
# first execution should not accept anything
task = execute_task('test_expires')
assert not task.accepted
# let 3 hours pass
age_series(hours=3)
task = execute_task('test_expires')
assert not task.accepted, 'expired too soon'
# let another 3 hours pass, should expire now!
age_series(hours=6)
task = execute_task('test_expires')
assert task.accepted, 'timeframe didn\'t expire'
def test_min_max_fail(self, execute_task):
task = execute_task('test_min_max_fail')
assert not task.accepted
# Let 6 hours pass, timeframe should not even been started, as pdtv doesn't meet min_quality
age_series(hours=6)
task = execute_task('test_min_max_fail')
assert task.entries and not task.accepted
def test_min_max_pass(self, execute_task):
task = execute_task('test_min_max_pass')
assert not task.accepted
# Let 6 hours pass, timeframe should expire and accept hdtv copy
age_series(hours=6)
task = execute_task('test_min_max_pass')
assert task.find_entry('accepted', title='MM Test.S01E02.hdtv-FlexGet')
assert len(task.accepted) == 1
def | (self, execute_task):
task = execute_task('test_qualities_fail')
assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet'), \
'should have accepted wanted quality'
assert len(task.accepted) == 1
# Let 6 hours pass, timeframe should not even been started, as we already have one of our qualities
age_series(hours=6)
task = execute_task('test_qualities_fail')
assert task.entries and not task.accepted
def test_qualities_pass(self, execute_task):
task = execute_task('test_qualities_pass')
assert not task.accepted, 'None of the qualities should have matched'
# Let 6 hours pass, timeframe should expire and accept 1080p copy
age_series(hours=6)
task = execute_task('test_qualities_pass')
assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet')
assert len(task.accepted) == 1
def test_with_quality(self, execute_task):
task = execute_task('test_with_quality_1')
assert not task.accepted, 'Entry does not pass quality'
age_series(hours=6)
# Entry from first test feed should not pass quality
task = execute_task('test_with_quality_1')
assert not task.accepted, 'Entry does not pass quality'
# Timeframe should not yet have started
task = execute_task('test_with_quality_2')
assert not task.accepted, 'Timeframe should not yet have passed'
age_series(hours=6)
task = execute_task('test_with_quality_2')
assert task.accepted, 'Timeframe should have passed'
class TestBacklog(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
backlog:
mock:
- {title: 'Test.S01E01.hdtv-FlexGet'}
series:
- test: {timeframe: 6 hours}
"""
def testBacklog(self, manager, execute_task):
"""Series plugin: backlog"""
task = execute_task('backlog')
assert task.entries and not task.accepted, 'no entries at the start'
# simulate test going away from the task
del (manager.config['tasks']['backlog']['mock'])
age_series(hours=12)
task = execute_task('backlog')
assert task.accepted, 'backlog is not injecting episodes'
class TestManipulate(object):
"""Tests that it's possible to manipulate entries before they're parsed by series plugin"""
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_1:
mock:
- {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'}
series:
- test
test_2:
mock:
- {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'}
series:
- test
manipulate:
- title:
extract: '^PREFIX: (.*)'
"""
def testManipulate(self, execute_task):
"""Series plugin: test manipulation priority"""
# should not work with the prefix
task = execute_task('test_1')
assert not task.accepted, 'series accepted even with prefix?'
assert not task.accepted, 'series rejecte even with prefix?'
task = execute_task('test_2')
assert task.accepted, 'manipulate failed to pre-clean title'
class TestFromGroup(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: '[Ignored] Test 12'}
- {title: '[FlexGet] Test 12'}
- {title: 'Test.13.HDTV-Ignored'}
- {title: 'Test.13.HDTV-FlexGet'}
- {title: 'Test.14.HDTV-Name'}
- {title: 'Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3'}
- {title: 'Test :: h264 10-bit | Softsubs (Ignore) | Episode 3'}
series:
- test: {from_group: [Name, FlexGet]}
"""
def test_from_group(self, execute_task):
"""Series plugin: test from_group"""
task = execute_task('test')
assert task.find_entry('accepted', title='[FlexGet] Test 12')
assert task.find_entry('accepted', title='Test.13.HDTV-FlexGet')
assert task.find_entry('accepted', title='Test.14.HDTV-Name')
assert task.find_entry('accepted', title='Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3')
class TestBegin(object):
config = """
templates:
global:
parsing:
series: {{parser}}
eps:
mock:
- {title: 'WTest.S02E03.HDTV.XViD-FlexGet'}
- {title: 'W2Test.S02E03.HDTV.XViD-FlexGet'}
tasks:
season_id_test:
template: eps
series:
- WTest:
begin: S02
- W2Test:
begin: S03
before_ep_test:
template: eps
series:
- WTest:
begin: S02E05
- W2Test:
begin: S03E02
after_ep_test:
template: eps
series:
- WTest:
begin: S02E03
- W2Test:
begin: S02E01
before_seq_test:
mock:
- title: WTest.1.HDTV.XViD-FlexGet
- title: W2Test.13.HDTV.XViD-FlexGet
series:
- WTest:
begin: 2
- W2Test:
begin: 120
after_seq_test:
mock:
- title: WTest.2.HDTV.XViD-FlexGet
- title: W2Test.123.HDTV.XViD-FlexGet
series:
- WTest:
begin: 2
- W2Test:
begin: 120
before_date_test:
mock:
- title: WTest.2001.6.6.HDTV.XViD-FlexGet
- title: W2Test.12.30.2012.HDTV.XViD-FlexGet
series:
- WTest:
begin: '2009-05-05'
- W2Test:
begin: '2012-12-31'
after_date_test:
mock:
- title: WTest.2009.5.5.HDTV.XViD-FlexGet
- title: W2Test.1.1.2013.HDTV.XViD-FlexGet
series:
- WTest:
begin: '2009-05-05'
- W2Test:
begin: '2012-12-31'
test_advancement1:
mock:
- title: WTest.S01E01
series:
- WTest
test_advancement2:
mock:
- title: WTest.S03E01
series:
- WTest
test_advancement3:
mock:
- title: WTest.S03E01
series:
- WTest:
begin: S03E01
"""
def test_season_id(self, execute_task):
task = execute_task('season_id_test')
assert task.find_entry('accepted', title='WTest.S02E03.HDTV.XViD-FlexGet'), \
'Entry should have been accepted, it\'s after the begin episode'
assert task.find_entry('rejected', title='W2Test.S02E03.HDTV.XViD-FlexGet'), \
'Entry should have been rejected, it\'s before the begin episode'
def test_before_ep(self, execute_task):
task = execute_task('before_ep_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_ep(self, execute_task):
task = execute_task('after_ep_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_before_seq(self, execute_task):
task = execute_task('before_seq_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_seq(self, execute_task):
task = execute_task('after_seq_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_before_date(self, execute_task):
task = execute_task('before_date_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_date(self, execute_task):
task = execute_task('after_date_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_advancement(self, execute_task):
# Put S01E01 into the database as latest download
task = execute_task('test_advancement1')
assert task.accepted
# Just verify regular ep advancement would block S03E01
task = execute_task('test_advancement2')
assert not task.accepted, 'Episode advancement should have blocked'
# Make sure ep advancement doesn't block it when we've set begin to that ep
task = execute_task('test_advancement3')
assert task.accepted, 'Episode should have been accepted'
class TestSeriesPremiere(object):
config = """
templates:
global:
parsing:
series: {{parser}}
metainfo_series: yes
series_premiere: yes
tasks:
test:
mock:
- {title: 'Foobar.S01E01.PDTV-FlexGet'}
- {title: 'Foobar.S01E11.1080p-FlexGet'}
- {title: 'Foobar.S02E02.HR-FlexGet'}
"""
def testOnlyPremieres(self, execute_task):
"""Test series premiere"""
task = execute_task('test')
assert task.find_entry('accepted', title='Foobar.S01E01.PDTV-FlexGet',
series_name='Foobar', series_season=1,
series_episode=1), 'Series premiere should have been accepted'
assert len(task.accepted) == 1
# TODO: Add more tests, test interaction with series plugin and series_exists
class TestImportSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
timeframe_max:
configure_series:
settings:
propers: 12 hours
target: 720p
timeframe: 5 minutes
quality: "<=720p <=bluray"
from:
mock:
- title: the show
mock:
- title: the show s03e02 1080p bluray
- title: the show s03e02 hdtv
test_import_altnames:
configure_series:
from:
mock:
- {title: 'the show', configure_series_alternate_name: 'le show'}
mock:
- title: le show s03e03
"""
def test_timeframe_max(self, execute_task):
"""Tests configure_series as well as timeframe with max_quality."""
task = execute_task('timeframe_max')
assert not task.accepted, 'Entry shouldnot have been accepted on first run.'
age_series(minutes=6)
task = execute_task('timeframe_max')
assert task.find_entry('accepted', title='the show s03e02 hdtv'), \
'hdtv should have been accepted after timeframe.'
def test_import_altnames(self, execute_task):
"""Tests configure_series with alternate_name."""
task = execute_task('test_import_altnames')
entry = task.find_entry(title='le show s03e03')
assert entry.accepted, 'entry matching series alternate name should have been accepted.'
assert entry['series_name'] == 'the show', 'entry series should be set to the main name'
class TestIDTypes(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
all_types:
series:
- episode
- seasonless episode
- date
- sequence
- stupid id:
id_regexp: (\\dcat)
mock:
- title: episode S03E04
- title: episode 3x05
- title: date 2011.4.3 other crap hdtv
- title: date 4.5.11
- title: sequence 003
- title: sequence 4
- title: stupid id 3cat
- title: seasonless episode e01
"""
def test_id_types(self, execute_task):
task = execute_task('all_types')
for entry in task.entries:
assert entry['series_name'], '%s not parsed by series plugin' % entry['title']
assert entry['series_id_type'] in entry['series_name']
class TestCaseChange(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
first:
mock:
- title: theshow s02e04
series:
- TheShow
second:
mock:
- title: thEshoW s02e04 other
series:
- THESHOW
"""
def test_case_change(self, execute_task):
task = execute_task('first')
# Make sure series_name uses case from config, make sure episode is accepted
assert task.find_entry('accepted', title='theshow s02e04', series_name='TheShow')
task = execute_task('second')
# Make sure series_name uses new case from config, make sure ep is rejected because we have a copy
assert task.find_entry('rejected', title='thEshoW s02e04 other', series_name='THESHOW')
class TestInvalidSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
blank:
mock:
- title: whatever
series:
- '':
quality: 720p
"""
def test_blank_series(self, execute_task):
"""Make sure a blank series doesn't crash."""
task = execute_task('blank')
assert not task.aborted, 'Task should not have aborted'
class TestDoubleEps(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_double1:
mock:
- title: double S01E02-E03
series:
- double
test_double2:
mock:
- title: double S01E03
series:
- double
"""
def test_double(self, execute_task):
# First should be accepted
task = execute_task('test_double1')
assert task.find_entry('accepted', title='double S01E02-E03')
# We already got ep 3 as part of double, should not be accepted
task = execute_task('test_double2')
assert not task.find_entry('accepted', title='double S01E03')
class TestAutoLockin(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar
- BarFood
tasks:
try_date_1:
mock:
- title: FooBar 2012-10-10 HDTV
lock_ep:
mock:
- title: FooBar S01E01 HDTV
- title: FooBar S01E02 HDTV
- title: FooBar S01E03 HDTV
try_date_2:
mock:
- title: FooBar 2012-10-11 HDTV
test_special_lock:
mock:
- title: BarFood christmas special HDTV
- title: BarFood easter special HDTV
- title: BarFood haloween special HDTV
- title: BarFood bad special HDTV
try_reg:
mock:
- title: BarFood S01E01 HDTV
- title: BarFood 2012-9-9 HDTV
"""
def test_ep_lockin(self, execute_task):
task = execute_task('try_date_1')
assert task.find_entry('accepted', title='FooBar 2012-10-10 HDTV'), \
'dates should be accepted before locked in on an identifier type'
task = execute_task('lock_ep')
assert len(task.accepted) == 3, 'All ep mode episodes should have been accepted'
task = execute_task('try_date_2')
assert not task.find_entry('accepted', title='FooBar 2012-10-11 HDTV'), \
'dates should not be accepted after series has locked in to ep mode'
def test_special_lock(self, execute_task):
"""Make sure series plugin does not lock in to type 'special'"""
task = execute_task('test_special_lock')
assert len(task.accepted) == 4, 'All specials should have been accepted'
task = execute_task('try_reg')
assert len(task.accepted) == 2, 'Specials should not have caused episode type lock-in'
class TestReruns(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
one_accept:
mock:
- title: the show s01e01
- title: the show s01e01 different
series:
- the show
rerun: 2
mock_output: yes
"""
def test_one_accept(self, execute_task):
task = execute_task('one_accept')
assert len(task.mock_output) == 1, \
'should have accepted once!: %s' % ', '.join(e['title'] for e in task.mock_output)
class TestSpecials(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
preferspecials:
mock:
- title: the show s03e04 special
series:
- the show:
prefer_specials: True
nopreferspecials:
mock:
- title: the show s03e05 special
series:
- the show:
prefer_specials: False
assumespecial:
mock:
- title: the show SOMETHING
series:
- the show:
assume_special: True
noassumespecial:
mock:
- title: the show SOMETHING
series:
- the show:
assume_special: False
special_looks_like_season_pack:
mock:
- title: Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget
series:
- Doctor Who
"""
def test_prefer_specials(self, execute_task):
# Test that an entry matching both ep and special is flagged as a special when prefer_specials is True
task = execute_task('preferspecials')
entry = task.find_entry('accepted', title='the show s03e04 special')
assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged a special was not.'
def test_not_prefer_specials(self, execute_task):
# Test that an entry matching both ep and special is flagged as an ep when prefer_specials is False
task = execute_task('nopreferspecials')
entry = task.find_entry('accepted', title='the show s03e05 special')
assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged a special was.'
def test_assume_special(self, execute_task):
# Test that an entry with no ID found gets flagged as a special and accepted if assume_special is True
task = execute_task('assumespecial')
entry = task.find_entry(title='the show SOMETHING')
assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged as a special was not.'
assert entry.accepted, 'Entry which should have been accepted was not.'
def test_not_assume_special(self, execute_task):
# Test that an entry with no ID found does not get flagged as a special and accepted if assume_special is False
task = execute_task('noassumespecial')
entry = task.find_entry(title='the show SOMETHING')
assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged as a special was.'
assert not entry.accepted, 'Entry which should not have been accepted was.'
def test_special_looks_like_a_season_pack(self, execute_task):
"""Make sure special episodes are not being parsed as season packs"""
task = execute_task('special_looks_like_season_pack')
entry = task.find_entry(title='Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget')
assert entry.get('series_id_type') == 'special', 'Entry should have been flagged as a special'
assert not entry['season_pack'], 'Entry should not have been flagged as a season pack'
assert entry.accepted, 'Entry which should not have been accepted was.'
class TestAlternateNames(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
alternate_name:
series:
- Some Show:
begin: S01E01
alternate_name: Other Show
another_alternate_name:
series:
- Some Show:
alternate_name: Good Show
set_other_alternate_name:
mock:
- title: Third.Show.S01E01
- title: Other.Show.S01E01
series:
- Some Show:
alternate_name: Third Show
rerun: 0
duplicate_names_in_different_series:
series:
- First Show:
begin: S01E01
alternate_name: Third Show
- Second Show:
begin: S01E01
alternate_name: Third Show
"""
def test_set_alternate_name(self, execute_task):
# Tests that old alternate names are not kept in the database.
task = execute_task('alternate_name')
task = execute_task('set_other_alternate_name')
assert task.find_entry('accepted', title='Third.Show.S01E01'), \
'A new alternate name should have been associated with the series.'
assert task.find_entry('undecided', title='Other.Show.S01E01'), \
'The old alternate name for the series is still present.'
def test_duplicate_alternate_names_in_different_series(self, execute_task):
with pytest.raises(TaskAbort) as ex:
execute_task('duplicate_names_in_different_series')
# only test that the reason is about alternate names, not which names.
reason = 'Error adding alternate name'
assert ex.value.reason[:27] == reason, \
'Wrong reason for task abortion. Should be about duplicate alternate names.'
# Test the DB behaves like we expect ie. alternate names cannot
def test_alternate_names_are_removed_from_db(self, execute_task):
from flexget.manager import Session
with Session() as session:
execute_task('alternate_name')
# test the current state of alternate names
assert len(session.query(db.AlternateNames).all()) == 1, 'There should be one alternate name present.'
assert session.query(db.AlternateNames).first().alt_name == 'Other Show', \
'Alternate name should have been Other Show.'
# run another task that overwrites the alternate names
execute_task('another_alternate_name')
assert len(session.query(db.AlternateNames).all()) == 1, \
'The old alternate name should have been removed from the database.'
assert session.query(db.AlternateNames).first().alt_name == 'Good Show', \
'The alternate name in the database should be the new one, Good Show.'
class TestCLI(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
learn_series:
series:
- Some Show
- Other Show
mock:
- title: Some Series S01E01
- title: Other Series S01E02
"""
def test_series_list(self, manager, execute_task):
"""Very rudimentary test, mostly makes sure this doesn't crash."""
execute_task('learn_series')
options = get_parser().parse_args(['series', 'list', '--porcelain'])
buffer = StringIO()
with capture_output(buffer, loglevel='error'):
manager.handle_cli(options=options)
lines = buffer.getvalue().split('\n')
assert all(any(line.lstrip().startswith(series) for line in lines) for series in ['Some Show', 'Other Show'])
class TestSeriesRemove(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
get_episode:
seen: local
series:
- My Show
mock:
- title: My Show S01E01 1080p
- title: My Show S01E01 720p
remove_episode:
seen: no
mock:
- title: My Show S01E01
series_name: My Show
series_id: S01E01
accept_all: yes
series_remove: yes
"""
def test_remove_episode(self, execute_task):
task = execute_task('get_episode')
assert len(task.accepted) == 1
first_rls = task.accepted[0]
task = execute_task('get_episode')
assert not task.accepted, 'series plugin duplicate blocking not working?'
task = execute_task('remove_episode')
task = execute_task('get_episode')
assert len(task.accepted) == 1, 'new release not accepted after forgetting ep'
assert task.accepted[0] != first_rls, 'same release accepted on second run'
class TestSeriesSeasonPack(object):
_config = """
templates:
global:
parsing:
series: internal
series:
- foo:
season_packs: yes
- bar:
season_packs: yes
tracking: backfill
- baz:
season_packs: 3
- boo:
season_packs: always
- bla:
season_packs: only
- bro:
season_packs:
threshold: 1
reject_eps: yes
tasks:
multiple_formats:
mock:
- title: foo.s01.720p-flexget
- title: foo.2xALL.720p-flexget
foo_s01:
mock:
- title: foo.s01.720p-flexget
foo_s02:
mock:
- title: foo.s02.720p-flexget
foo_s03:
mock:
- title: foo.s03.720p-flexget
foo_s01ep1:
mock:
- title: foo.s01e1.720p-flexget
foo_s02ep1:
mock:
- title: foo.s02e1.720p-flexget
season_pack_priority:
mock:
- title: foo.s01e1.720p-flexget
- title: foo.s01e2.720p-flexget
- title: foo.s01e3.720p-flexget
- title: foo.s01e4.720p-flexget
- title: foo.s01e5.720p-flexget
- title: foo.s01.720p-flexget
respect_begin:
series:
- bar:
begin: s02e01
season_packs: yes
mock:
- title: bar.s01.720p-flexget
- title: bar.s02.720p-flexget
several_seasons:
mock:
- title: foo.s03.720p-flexget
- title: foo.s07.720p-flexget
- title: foo.s03.1080p-flexget
- title: foo.s06.720p-flexget
- title: foo.s09.720p-flexget
test_backfill_1:
mock:
- title: bar.s03.720p-flexget
test_backfill_2:
mock:
- title: bar.s02.720p-flexget
test_backfill_3:
mock:
- title: bar.s03e01.720p-flexget
test_backfill_4:
mock:
- title: bar.s02e01.1080p-flexget
test_specific_season_pack_threshold_1:
mock:
- title: baz.s01e01.720p-flexget
- title: baz.s01e02.720p-flexget
- title: baz.s01e03.720p-flexget
test_specific_season_pack_threshold_2:
mock:
- title: baz.s01.720p-flexget
test_specific_season_pack_threshold_3:
mock:
- title: baz.s01e01.720p-flexget
- title: baz.s01e02.720p-flexget
- title: baz.s01e03.720p-flexget
- title: baz.s01e04.720p-flexget
test_always_get_season_pack_1:
mock:
- title: boo.s01e01.720p-flexget
- title: boo.s01e02.720p-flexget
- title: boo.s01e03.720p-flexget
- title: boo.s01e04.720p-flexget
test_always_get_season_pack_2:
mock:
- title: boo.s01.720p-flexget
test_only_get_season_packs:
mock:
- title: bla.s01.720p-flexget
- title: bla.s02e01.720p-flexget
test_proper_season_pack:
mock:
- title: foo.s01.720p-flexget
- title: foo.s01.720p.proper-flexget
test_proper_season_pack_2:
mock:
- title: foo.s01.720p-flexget
test_proper_season_pack_3:
mock:
- title: foo.s01.720p.proper-flexget
test_all_series:
mock:
- title: show.name.s01.720p.HDTV-Group
all_series:
season_packs: yes
test_with_dict_config_1:
mock:
- title: bro.s01e01.720p.HDTV-Flexget
- title: bro.s01.720p.HDTV-Flexget
test_with_dict_config_2:
mock:
- title: bro.s02.720p.HDTV-Flexget
"""
@pytest.fixture()
def config(self):
"""Overrides outer config fixture since season pack support does not work with guessit parser"""
return self._config
def test_season_pack_simple(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
def test_basic_tracking(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s01ep1')
assert len(task.accepted) == 0
task = execute_task('foo_s02ep1')
assert len(task.accepted) == 1
def test_season_pack_takes_priority(self, execute_task):
task = execute_task('season_pack_priority')
assert len(task.accepted) == 1
entry = task.find_entry(title='foo.s01.720p-flexget')
assert entry.accepted
def test_respect_begin(self, execute_task):
task = execute_task('respect_begin')
assert len(task.accepted) == 1
entry = task.find_entry(title='bar.s02.720p-flexget')
assert entry.accepted
def test_tracking_rules_old_eps(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s02')
assert len(task.accepted) == 1
task = execute_task('foo_s01ep1')
assert not task.accepted
def test_tracking_rules_old_season(self, execute_task):
task = execute_task('foo_s02')
assert len(task.accepted) == 1
task = execute_task('foo_s01')
assert not task.accepted
def test_tracking_rules_new_season(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s03')
assert not task.accepted
def test_several_seasons(self, execute_task):
task = execute_task('several_seasons')
assert len(task.accepted) == 4
def test_multiple_formats(self, execute_task):
task = execute_task('multiple_formats')
assert len(task.accepted) == 2
def test_backfill(self, execute_task):
task = execute_task('test_backfill_1')
assert len(task.accepted) == 1
task = execute_task('test_backfill_2')
assert len(task.accepted) == 1
task = execute_task('test_backfill_3')
assert not task.accepted
task = execute_task('test_backfill_4')
assert not task.accepted
def test_default_threshold(self, execute_task):
task = execute_task('foo_s01ep1')
assert len(task.accepted) == 1
task = execute_task('foo_s01')
assert len(task.accepted) == 0
def test_specific_season_pack_threshold_positive(self, execute_task):
task = execute_task('test_specific_season_pack_threshold_1')
assert len(task.accepted) == 3
task = execute_task('test_specific_season_pack_threshold_2')
assert len(task.accepted) == 1
def test_specific_season_pack_threshold_negative(self, execute_task):
task = execute_task('test_specific_season_pack_threshold_3')
assert len(task.accepted) == 4
task = execute_task('test_specific_season_pack_threshold_2')
assert not task.accepted
def test_loose_threshold(self, execute_task):
task = execute_task('test_always_get_season_pack_1')
assert len(task.accepted) == 4
task = execute_task('test_always_get_season_pack_2')
assert len(task.accepted) == 1
def test_exclusive(self, execute_task):
task = execute_task('test_only_get_season_packs')
assert len(task.accepted) == 1
entry = task.find_entry(title='bla.s01.720p-flexget')
assert entry.accepted
def test_proper_season_pack(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('test_proper_season_pack')
assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget')
def test_proper_season_pack_2(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('test_proper_season_pack_2')
assert task.find_entry('accepted', title='foo.s01.720p-flexget')
task = execute_task('test_proper_season_pack_3')
assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget')
def test_all_series(self, execute_task):
task = execute_task('test_all_series')
assert task.find_entry('accepted', title='show.name.s01.720p.HDTV-Group')
def test_advanced_config(self, execute_task):
task = execute_task('test_with_dict_config_1')
assert not task.find_entry('accepted', title='bro.s01e01.720p.HDTV-Flexget')
assert task.find_entry('accepted', title='bro.s01.720p.HDTV-Flexget')
execute_task('test_with_dict_config_2',
options={'inject': [Entry(title='bro.s02e01.720p.HDTV-Flexget', url='')],
'immortal': True})
task = execute_task('test_with_dict_config_2')
assert task.find_entry('accepted', title='bro.s02.720p.HDTV-Flexget')
class TestSeriesDDAudio(object):
_config = """
templates:
global:
parsing:
series: internal
tasks:
min_quality:
mock:
- {title: 'MinQATest.S01E01.720p.XViD.DD5.1-FlexGet'}
- {title: 'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'}
series:
- MinQATest:
quality: ">dd5.1"
max_quality:
mock:
- {title: 'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'}
- {title: 'MaxQATest.S01E01.720p.XViD.DD+5.1-FlexGet'}
series:
- MaxQATest:
quality: "<=dd5.1"
test_channels:
mock:
- {title: 'Channels.S01E01.1080p.HDTV.DD+2.0-FlexGet'}
- {title: 'Channels.S01E01.1080p.HDTV.DD+5.1-FlexGet'}
- {title: 'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'}
series:
- Channels:
quality: dd+5.1
"""
@pytest.fixture()
def config(self):
"""Overrides outer config fixture since DD+ and arbitrary channels support does not work with guessit parser"""
return self._config
def test_min_quality(self, execute_task):
"""Series plugin: min_quality"""
task = execute_task('min_quality')
assert task.find_entry('accepted', title='MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'), \
'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only two'
def test_max_quality(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('max_quality')
assert task.find_entry('accepted', title='MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'), \
'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_channels(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('test_channels')
assert task.find_entry(title='Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'), \
'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
| test_qualities_fail |
tasks.py | """Tasks for use with Invoke.
(c) 2020-2021 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from distutils.util import strtobool
from invoke import Collection, task as invoke_task
import os
def is_truthy(arg):
"""Convert "truthy" strings into Booleans.
Examples:
>>> is_truthy('yes')
True
Args:
arg (str): Truthy string (True values are y, yes, t, true, on and 1; false values are n, no,
f, false, off and 0. Raises ValueError if val is anything else.
"""
if isinstance(arg, bool):
return arg
return bool(strtobool(arg))
# Use pyinvoke configuration for default values, see http://docs.pyinvoke.org/en/stable/concepts/configuration.html
# Variables may be overwritten in invoke.yml or by the environment variables INVOKE_NAUTOBOT_PLUGIN_CHATOPS_aci_xxx
namespace = Collection("nautobot_plugin_chatops_aci")
namespace.configure(
{
"nautobot_plugin_chatops_aci": {
"nautobot_ver": "latest",
"project_name": "nautobot-plugin-chatops-aci",
"python_ver": "3.8",
"local": False,
"compose_dir": os.path.join(os.path.dirname(__file__), "development"),
"compose_files": [
"docker-compose.requirements.yml",
"docker-compose.base.yml",
"docker-compose.dev.yml",
],
}
}
)
def task(function=None, *args, **kwargs):
"""Task decorator to override the default Invoke task decorator and add each task to the invoke namespace."""
def task_wrapper(function=None):
"""Wrapper around invoke.task to add the task to the namespace as well."""
if args or kwargs:
task_func = invoke_task(*args, **kwargs)(function)
else:
task_func = invoke_task(function)
namespace.add_task(task_func)
return task_func
if function:
# The decorator was called with no arguments
return task_wrapper(function)
# The decorator was called with arguments
return task_wrapper
def docker_compose(context, command, **kwargs):
"""Helper function for running a specific docker-compose command with all appropriate parameters and environment.
Args:
context (obj): Used to run specific commands
command (str): Command string to append to the "docker-compose ..." command, such as "build", "up", etc.
**kwargs: Passed through to the context.run() call.
"""
build_env = {
"NAUTOBOT_VER": context.nautobot_plugin_chatops_aci.nautobot_ver,
"PYTHON_VER": context.nautobot_plugin_chatops_aci.python_ver,
}
compose_command = f'docker-compose --project-name {context.nautobot_plugin_chatops_aci.project_name} --project-directory "{context.nautobot_plugin_chatops_aci.compose_dir}"'
for compose_file in context.nautobot_plugin_chatops_aci.compose_files:
compose_file_path = os.path.join(context.nautobot_plugin_chatops_aci.compose_dir, compose_file)
compose_command += f' -f "{compose_file_path}"'
compose_command += f" {command}"
print(f'Running docker-compose command "{command}"')
return context.run(compose_command, env=build_env, **kwargs)
def run_command(context, command, **kwargs):
"""Wrapper to run a command locally or inside the nautobot container."""
if is_truthy(context.nautobot_plugin_chatops_aci.local):
context.run(command, **kwargs)
else:
# Check if netbox is running, no need to start another netbox container to run a command
docker_compose_status = "ps --services --filter status=running"
results = docker_compose(context, docker_compose_status, hide="out")
if "nautobot" in results.stdout:
compose_command = f"exec nautobot {command}"
else:
compose_command = f"run --entrypoint '{command}' nautobot"
docker_compose(context, compose_command, pty=True)
# ------------------------------------------------------------------------------
# BUILD
# ------------------------------------------------------------------------------
@task(
help={
"force_rm": "Always remove intermediate containers",
"cache": "Whether to use Docker's cache when building the image (defaults to enabled)",
}
)
def build(context, force_rm=False, cache=True):
"""Build Nautobot docker image."""
command = "build"
if not cache:
command += " --no-cache"
if force_rm:
command += " --force-rm"
print(f"Building Nautobot with Python {context.nautobot_plugin_chatops_aci.python_ver}...")
docker_compose(context, command)
@task
def generate_packages(context):
"""Generate all Python packages inside docker and copy the file locally under dist/."""
command = "poetry build"
run_command(context, command)
# ------------------------------------------------------------------------------
# START / STOP / DEBUG
# ------------------------------------------------------------------------------
@task
def debug(context):
"""Start Nautobot and its dependencies in debug mode."""
print("Starting Nautobot in debug mode...")
docker_compose(context, "up")
@task
def start(context):
"""Start Nautobot and its dependencies in detached mode."""
print("Starting Nautobot in detached mode...")
docker_compose(context, "up --detach")
@task
def restart(context):
"""Gracefully restart all containers."""
print("Restarting Nautobot...")
docker_compose(context, "restart")
@task
def stop(context):
"""Stop Nautobot and its dependencies."""
print("Stopping Nautobot...")
docker_compose(context, "down")
@task
def destroy(context):
"""Destroy all containers and volumes."""
print("Destroying Nautobot...")
docker_compose(context, "down --volumes")
@task
def vscode(context):
"""Launch Visual Studio Code with the appropriate Environment variables to run in a container."""
command = "code nautobot.code-workspace"
context.run(command)
# ------------------------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------------------------
@task
def nbshell(context):
"""Launch an interactive nbshell session."""
command = "nautobot-server nbshell"
run_command(context, command)
@task
def cli(context):
"""Launch a bash shell inside the running Nautobot container."""
run_command(context, "bash")
@task(
help={
"user": "name of the superuser to create (default: admin)",
}
)
def createsuperuser(context, user="admin"):
"""Create a new Nautobot superuser account (default: "admin"), will prompt for password."""
command = f"nautobot-server createsuperuser --username {user}"
run_command(context, command)
@task(
help={
"name": "name of the migration to be created; if unspecified, will autogenerate a name",
}
)
def makemigrations(context, name=""):
"""Perform makemigrations operation in Django."""
command = "nautobot-server makemigrations nautobot_plugin_chatops_aci"
if name:
command += f" --name {name}"
run_command(context, command)
@task
def migrate(context):
"""Perform migrate operation in Django."""
command = "nautobot-server migrate"
run_command(context, command)
@task(help={})
def post_upgrade(context):
"""
Performs Nautobot common post-upgrade operations using a single entrypoint.
This will run the following management commands with default settings, in order:
- migrate
- trace_paths
- collectstatic
- remove_stale_contenttypes
- clearsessions
- invalidate all
"""
command = "nautobot-server post_upgrade"
run_command(context, command)
# ------------------------------------------------------------------------------
# TESTS
# ------------------------------------------------------------------------------
@task(
help={
"autoformat": "Apply formatting recommendations automatically, rather than failing if formatting is incorrect.",
}
)
def black(context, autoformat=False):
"""Check Python code style with Black."""
if autoformat:
black_command = "black"
else:
black_command = "black --check --diff"
command = f"{black_command} ."
run_command(context, command)
@task
def flake8(context):
"""Check for PEP8 compliance and other style issues."""
command = "flake8 ."
run_command(context, command)
@task
def hadolint(context):
"""Check Dockerfile for hadolint compliance and other style issues."""
command = "hadolint development/Dockerfile"
run_command(context, command)
@task
def pylint(context):
"""Run pylint code analysis."""
command = (
'pylint --init-hook "import nautobot; nautobot.setup()" --rcfile pyproject.toml nautobot_plugin_chatops_aci'
)
run_command(context, command)
@task
def pydocstyle(context):
"""Run pydocstyle to validate docstring formatting adheres to NTC defined standards."""
# We exclude the /migrations/ directory since it is autogenerated code
command = "pydocstyle ."
run_command(context, command)
| @task
def yamllint(context):
"""Run yamllint to validate formating adheres to NTC defined YAML standards.
Args:
context (obj): Used to run specific commands
"""
command = "yamllint . --format standard"
run_command(context, command)
@task
def bandit(context):
"""Run bandit to validate basic static code security analysis."""
command = "bandit --recursive . --configfile .bandit.yml"
run_command(context, command)
@task
def check_migrations(context):
"""Check for missing migrations."""
command = "nautobot-server --config=nautobot/core/tests/nautobot_config.py makemigrations --dry-run --check"
run_command(context, command)
@task(
help={
"keepdb": "save and re-use test database between test runs for faster re-testing.",
"label": "specify a directory or module to test instead of running all Nautobot tests",
"failfast": "fail as soon as a single test fails don't run the entire test suite",
"buffer": "Discard output from passing tests",
}
)
def unittest(context, keepdb=False, label="nautobot_plugin_chatops_aci", failfast=False, buffer=True):
"""Run Nautobot unit tests."""
command = f"coverage run --module nautobot.core.cli test {label}"
if keepdb:
command += " --keepdb"
if failfast:
command += " --failfast"
if buffer:
command += " --buffer"
run_command(context, command)
@task
def unittest_coverage(context):
"""Report on code test coverage as measured by 'invoke unittest'."""
command = "coverage report --skip-covered --include 'nautobot_plugin_chatops_aci/*' --omit *migrations*"
run_command(context, command)
@task(
help={
"failfast": "fail as soon as a single test fails don't run the entire test suite",
}
)
def tests(context, failfast=False):
"""Run all tests for this plugin."""
# If we are not running locally, start the docker containers so we don't have to for each test
if not is_truthy(context.nautobot_plugin_chatops_aci.local):
print("Starting Docker Containers...")
start(context)
# Sorted loosely from fastest to slowest
print("Running black...")
black(context)
print("Running flake8...")
flake8(context)
print("Running bandit...")
bandit(context)
print("Running pydocstyle...")
pydocstyle(context)
print("Running yamllint...")
yamllint(context)
print("Running pylint...")
pylint(context)
print("Running unit tests...")
unittest(context, failfast=failfast)
print("All tests have passed!")
unittest_coverage(context) | |
permissions.py | from rest_framework.permissions import BasePermission
class SuperAdmin(BasePermission):
def has_object_permission(self, request, view, obj):
| user = request.user
if not (user and user.is_authenticated):
return False
if user.is_superuser:
return True
return False |
|
TopK.ts | /**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {KernelConfig, KernelFunc, NumericDataType, TensorInfo, TopK, TopKAttrs, TopKInputs, TypedArray, util} from '@tensorflow/tfjs-core';
import {MathBackendWebGL} from '../backend_webgl';
import {topKImplCPU} from '../kernel_utils/shared';
import {MergeProgram, SwapProgram} from '../top_k_gpu';
import {fill} from './Fill';
import {gatherV2} from './GatherV2';
import {reshape} from './Reshape';
import {slice} from './Slice';
function disposeIntermediateTensorInfoOrNull(
backend: MathBackendWebGL, tensorInfo: TensorInfo) {
if (tensorInfo !== null) {
backend.disposeIntermediateTensorInfo(tensorInfo);
}
}
function | (num: number) {
let pow2 = 1;
while (pow2 < num) {
pow2 *= 2;
}
return pow2;
}
// Based on Algorithm 2 of Bitonic Top K, ref:
// https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf
export function topK(
args: {inputs: TopKInputs, backend: MathBackendWebGL, attrs: TopKAttrs}):
TensorInfo[] {
const {inputs, backend, attrs} = args;
const {x} = inputs;
const {k, sorted} = attrs;
if (backend.shouldExecuteOnCPU([x])) {
const xVals = backend.readSync(x.dataId) as TypedArray;
const [allTopKVals, allTopKIndices] =
topKImplCPU(xVals, x.shape, x.dtype as NumericDataType, k, sorted);
return [
backend.makeTensorInfo(
allTopKVals.shape, allTopKVals.dtype, allTopKVals.values),
backend.makeTensorInfo(
allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values)
];
}
const xShape = x.shape;
const lastDim = xShape[xShape.length - 1];
if (k === 0) {
xShape[xShape.length - 1] = 0;
return [
backend.makeTensorInfo(xShape, x.dtype, []),
backend.makeTensorInfo(xShape, 'int32', [])
];
}
if (lastDim === 1 /* firstPass */) {
return [
x, fill({attrs: {shape: xShape, dtype: 'int32', value: 0}, backend})
];
}
// Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim.
const xSize = util.sizeFromShape(xShape);
const batch = xSize / lastDim;
const x2D = reshape({inputs: {x}, attrs: {shape: [batch, lastDim]}, backend});
const kPow2 = roundUpToPow2(k);
const lastDimPow2 = roundUpToPow2(lastDim);
// Only the indices containing the top K are kept at every step to reduce
// number of outputs in the GPU algorithms, so once the final set of indices
// is computed then gather is used to grab the corresponding values
// from the original input.
let indices: TensorInfo = null;
// GPU algorithm always takes in an indices input but this input is not used
// on the first run of a GPU algorithm, therefore if indices is null we simply
// pass in x2D instead of it but the value will not actually be used
const getInputs = () => indices === null ? [x2D, x2D] : [x2D, indices];
const runSwap = (dir: number, inc: number, shape: number[]) => {
const inputs = getInputs();
const program = new SwapProgram(shape);
const customSetup = program.getCustomSetupFunc(
lastDim, indices === null /* firstPass */, dir, inc);
const prevIndices = indices;
indices = backend.runWebGLProgram(program, inputs, 'int32', customSetup);
disposeIntermediateTensorInfoOrNull(backend, prevIndices);
};
// Step 1: local sort
for (let len = 1; len < kPow2; len *= 2) {
const dir = len * 2;
for (let inc = len; inc >= 1; inc /= 2) {
runSwap(dir, inc, [batch, lastDimPow2]);
}
}
// Step 2: merge
for (let indicesSize = lastDimPow2; indicesSize > kPow2; indicesSize /= 2) {
const inputs = getInputs();
const mergeProgram = new MergeProgram([batch, indicesSize / 2]);
const customSetup = mergeProgram.getCustomSetupFunc(
lastDim, indices === null /* firstPass */, kPow2);
const prevIndices = indices;
indices =
backend.runWebGLProgram(mergeProgram, inputs, 'int32', customSetup);
disposeIntermediateTensorInfoOrNull(backend, prevIndices);
// Step 3: rebuild
const len = kPow2 / 2;
const dir = len * 2;
for (let inc = len; inc >= 1; inc /= 2) {
runSwap(dir, inc, indices.shape);
}
}
// Keep only the requested top K results instead of kPow2
let prevIndices = indices;
indices = slice(
{inputs: {x: indices}, backend, attrs: {begin: 0, size: [batch, k]}});
disposeIntermediateTensorInfoOrNull(backend, prevIndices);
// Gather values on last dimension
let values = gatherV2(
{inputs: {x: x2D, indices}, backend, attrs: {axis: 1, batchDims: 1}});
disposeIntermediateTensorInfoOrNull(backend, x2D);
// Reshape back to the original input shape, except that the last
// dimension is k.
const newShape = xShape.slice(0, -1);
newShape.push(k);
prevIndices = indices;
indices = reshape({inputs: {x: indices}, attrs: {shape: newShape}, backend});
disposeIntermediateTensorInfoOrNull(backend, prevIndices);
const prevValues = values;
values = reshape({inputs: {x: values}, attrs: {shape: newShape}, backend});
disposeIntermediateTensorInfoOrNull(backend, prevValues);
return [values, indices];
}
export const topKConfig: KernelConfig = {
kernelName: TopK,
backendName: 'webgl',
kernelFunc: topK as {} as KernelFunc
};
| roundUpToPow2 |
getdataset only one.py | #Split one picture
import cv2
import numpy.random as random
import numpy as np
import os
import time
#borders
#mitochondria
#mitochondria borders
#PSD
#vesicles
def is_Img(name):
|
file_dir_arr = ["axon", "mitochondria", "PSD", "vesicles", "boundaries","mitochondrial boundaries"]
name_list = []
mask_list = []
out_dir = "cutting data"
size_data = 256
size_step = 128
if not os.path.isdir(out_dir):
print("создаю out_dir:" + out_dir)
os.makedirs(out_dir)
dir_input_img = "original data/original/"
dir_input_mask ="original data/"
###########################################################
img_name = "training075.png"
###########################################################
if is_Img(os.path.join(dir_input_img, img_name)):
count = 0
img = cv2.imread(os.path.join(dir_input_img, img_name), 0)
h,w = img.shape[0:2]
if not os.path.isdir(out_dir+"/original"):
print("создаю out_dir:" + "original")
os.makedirs(out_dir+"/original")
for start_y in range(0,h, size_step):
if (h - start_y < size_data):
continue
for start_x in range(0,w, size_step):
if (w - start_x < size_data):
continue
cutting_img = img[start_y:start_y+size_data, start_x:start_x+size_data]
cv2.imwrite(out_dir + "/original/" + img_name + "_" + str(size_data) +"_" + str(size_step) +"_" +str(count)+".png", cutting_img)
count+=1
for i,dir_name in enumerate(file_dir_arr):
if is_Img(os.path.join(dir_input_mask + dir_name, img_name)):
img = cv2.imread(os.path.join(dir_input_mask +dir_name, img_name), 0)
img[img < 128] = 0
img[img > 127] = 255
if name_list.count(img_name) == 0:
name_list.append(img_name)
mask_list.append(np.zeros((len(file_dir_arr),)+ img.shape, np.uint8))
index = name_list.index(img_name)
mask_list[index][i] = img
print(name_list)
for index, mask_stack in enumerate(mask_list):
count = 0
for i,dir_name in enumerate(file_dir_arr):
local_count = count
mask_write = mask_stack[i]
h,w = mask_write.shape[0:2]
if not os.path.isdir(out_dir+"/"+dir_name):
print("создаю out_dir:" + "mask")
os.makedirs(out_dir+"/"+dir_name )
for start_y in range(0,h, size_step):
if (h - start_y < size_data):
continue
for start_x in range(0,w, size_step):
if (w - start_x < size_data):
continue
cutting_mask = mask_write[start_y:start_y+size_data, start_x:start_x+size_data]
cv2.imwrite(out_dir+"/"+dir_name +"/" + name_list[index] + "_" + str(size_data) +"_" + str(size_step) +"_" +str(local_count)+".png", cutting_mask)
local_count+=1
| img_type = ('.png', '.jpg', '.jpeg')
if name.endswith((img_type)):
return True
else:
return False |
dataset.py | import copy
import datetime
import functools
import inspect
import sys
import warnings
from collections import defaultdict
from distutils.version import LooseVersion
from html import escape
from numbers import Number
from operator import methodcaller
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
Union,
cast,
overload,
)
import numpy as np
import pandas as pd
import xarray as xr
from ..coding.cftimeindex import _parse_array_of_cftime_strings
from ..plot.dataset_plot import _Dataset_PlotMethods
from . import (
alignment,
dtypes,
duck_array_ops,
formatting,
formatting_html,
groupby,
ops,
resample,
rolling,
utils,
weighted,
)
from .alignment import _broadcast_helper, _get_broadcast_dims_map_common_coords, align
from .common import (
DataWithCoords,
ImplementsDatasetReduce,
_contains_datetime_like_objects,
)
from .coordinates import (
DatasetCoordinates,
assert_coordinate_consistent,
remap_label_indexers,
)
from .duck_array_ops import datetime_to_numeric
from .indexes import (
Indexes,
default_indexes,
isel_variable_and_index,
propagate_indexes,
remove_unused_levels_categories,
roll_index,
)
from .indexing import is_fancy_indexer
from .merge import (
dataset_merge_method,
dataset_update_method,
merge_coordinates_without_align,
merge_data_and_coords,
)
from .missing import get_clean_interp_index
from .options import OPTIONS, _get_keep_attrs
from .pycompat import is_duck_dask_array, sparse_array_type
from .utils import (
Default,
Frozen,
HybridMappingProxy,
SortedKeysDict,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
hashable,
infix_dims,
is_dict_like,
is_scalar,
maybe_wrap_array,
)
from .variable import (
IndexVariable,
Variable,
as_variable,
assert_unique_multiindex_level_names,
broadcast_variables,
)
if TYPE_CHECKING:
from ..backends import AbstractDataStore, ZarrStore
from .dataarray import DataArray
from .merge import CoercibleMapping
T_DSorDA = TypeVar("T_DSorDA", DataArray, "Dataset")
try:
from dask.delayed import Delayed
except ImportError:
Delayed = None
# list of attributes of pd.DatetimeIndex that are ndarrays of time info
_DATETIMEINDEX_COMPONENTS = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"microsecond",
"nanosecond",
"date",
"time",
"dayofyear",
"weekofyear",
"dayofweek",
"quarter",
]
def _get_virtual_variable(
variables, key: Hashable, level_vars: Mapping = None, dim_sizes: Mapping = None
) -> Tuple[Hashable, Hashable, Variable]:
"""Get a virtual variable (e.g., 'time.year' or a MultiIndex level)
from a dict of xarray.Variable objects (if possible)
"""
if level_vars is None:
level_vars = {}
if dim_sizes is None:
dim_sizes = {}
if key in dim_sizes:
data = pd.Index(range(dim_sizes[key]), name=key)
variable = IndexVariable((key,), data)
return key, key, variable
if not isinstance(key, str):
raise KeyError(key)
split_key = key.split(".", 1)
var_name: Optional[str]
if len(split_key) == 2:
ref_name, var_name = split_key
elif len(split_key) == 1:
ref_name, var_name = key, None
else:
raise KeyError(key)
if ref_name in level_vars:
dim_var = variables[level_vars[ref_name]]
ref_var = dim_var.to_index_variable().get_level_variable(ref_name)
else:
ref_var = variables[ref_name]
if var_name is None:
virtual_var = ref_var
var_name = key
else:
if _contains_datetime_like_objects(ref_var):
ref_var = xr.DataArray(ref_var)
data = getattr(ref_var.dt, var_name).data
else:
data = getattr(ref_var, var_name).data
virtual_var = Variable(ref_var.dims, data)
return ref_name, var_name, virtual_var
def calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashable, int]:
"""Calculate the dimensions corresponding to a set of variables.
Returns dictionary mapping from dimension names to sizes. Raises ValueError
if any of the dimension sizes conflict.
"""
dims: Dict[Hashable, int] = {}
last_used = {}
scalar_vars = {k for k, v in variables.items() if not v.dims}
for k, var in variables.items():
for dim, size in zip(var.dims, var.shape):
if dim in scalar_vars:
raise ValueError(
"dimension %r already exists as a scalar variable" % dim
)
if dim not in dims:
dims[dim] = size
last_used[dim] = k
elif dims[dim] != size:
raise ValueError(
"conflicting sizes for dimension %r: "
"length %s on %r and length %s on %r"
% (dim, size, k, dims[dim], last_used[dim])
)
return dims
def merge_indexes(
indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],
variables: Mapping[Hashable, Variable],
coord_names: Set[Hashable],
append: bool = False,
) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:
"""Merge variables into multi-indexes.
Not public API. Used in Dataset and DataArray set_index
methods.
"""
vars_to_replace: Dict[Hashable, Variable] = {}
vars_to_remove: List[Hashable] = []
dims_to_replace: Dict[Hashable, Hashable] = {}
error_msg = "{} is not the name of an existing variable."
for dim, var_names in indexes.items():
if isinstance(var_names, str) or not isinstance(var_names, Sequence):
var_names = [var_names]
names: List[Hashable] = []
codes: List[List[int]] = []
levels: List[List[int]] = []
current_index_variable = variables.get(dim)
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
if (
current_index_variable is not None
and var.dims != current_index_variable.dims
):
raise ValueError(
"dimension mismatch between %r %s and %r %s"
% (dim, current_index_variable.dims, n, var.dims)
)
if current_index_variable is not None and append:
current_index = current_index_variable.to_index()
if isinstance(current_index, pd.MultiIndex):
names.extend(current_index.names)
codes.extend(current_index.codes)
levels.extend(current_index.levels)
else:
names.append("%s_level_0" % dim)
cat = pd.Categorical(current_index.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
if not len(names) and len(var_names) == 1:
idx = pd.Index(variables[var_names[0]].values)
else: # MultiIndex
for n in var_names:
try:
var = variables[n]
except KeyError:
raise ValueError(error_msg.format(n))
names.append(n)
cat = pd.Categorical(var.values, ordered=True)
codes.append(cat.codes)
levels.append(cat.categories)
idx = pd.MultiIndex(levels, codes, names=names)
for n in names:
dims_to_replace[n] = dim
vars_to_replace[dim] = IndexVariable(dim, idx)
vars_to_remove.extend(var_names)
new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove}
new_variables.update(vars_to_replace)
# update dimensions if necessary, GH: 3512
for k, v in new_variables.items():
if any(d in dims_to_replace for d in v.dims):
new_dims = [dims_to_replace.get(d, d) for d in v.dims]
new_variables[k] = v._replace(dims=new_dims)
new_coord_names = coord_names | set(vars_to_replace)
new_coord_names -= set(vars_to_remove)
return new_variables, new_coord_names
def split_indexes(
dims_or_levels: Union[Hashable, Sequence[Hashable]],
variables: Mapping[Hashable, Variable],
coord_names: Set[Hashable],
level_coords: Mapping[Hashable, Hashable],
drop: bool = False,
) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:
"""Extract (multi-)indexes (levels) as variables.
Not public API. Used in Dataset and DataArray reset_index
methods.
"""
if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):
dims_or_levels = [dims_or_levels]
dim_levels: DefaultDict[Any, List[Hashable]] = defaultdict(list)
dims = []
for k in dims_or_levels:
if k in level_coords:
dim_levels[level_coords[k]].append(k)
else:
dims.append(k)
vars_to_replace = {}
vars_to_create: Dict[Hashable, Variable] = {}
vars_to_remove = []
for d in dims:
index = variables[d].to_index()
if isinstance(index, pd.MultiIndex):
dim_levels[d] = index.names
else:
vars_to_remove.append(d)
if not drop:
vars_to_create[str(d) + "_"] = Variable(d, index, variables[d].attrs)
for d, levs in dim_levels.items():
index = variables[d].to_index()
if len(levs) == index.nlevels:
vars_to_remove.append(d)
else:
vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))
if not drop:
for lev in levs:
idx = index.get_level_values(lev)
vars_to_create[idx.name] = Variable(d, idx, variables[d].attrs)
new_variables = dict(variables)
for v in set(vars_to_remove):
del new_variables[v]
new_variables.update(vars_to_replace)
new_variables.update(vars_to_create)
new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)
return new_variables, new_coord_names
def _assert_empty(args: tuple, msg: str = "%s") -> None:
if args:
raise ValueError(msg % args)
def _check_chunks_compatibility(var, chunks, preferred_chunks):
for dim in var.dims:
if dim not in chunks or (dim not in preferred_chunks):
continue
preferred_chunks_dim = preferred_chunks.get(dim)
chunks_dim = chunks.get(dim)
if isinstance(chunks_dim, int):
chunks_dim = (chunks_dim,)
else:
chunks_dim = chunks_dim[:-1]
if any(s % preferred_chunks_dim for s in chunks_dim):
warnings.warn(
f"Specified Dask chunks {chunks[dim]} would separate "
f"on disks chunk shape {preferred_chunks[dim]} for dimension {dim}. "
"This could degrade performance. "
"Consider rechunking after loading instead.",
stacklevel=2,
)
def _get_chunk(var, chunks):
# chunks need to be explicity computed to take correctly into accout
# backend preferred chunking
import dask.array as da
if isinstance(var, IndexVariable):
return {}
if isinstance(chunks, int) or (chunks == "auto"):
chunks = dict.fromkeys(var.dims, chunks)
preferred_chunks = var.encoding.get("preferred_chunks", {})
preferred_chunks_list = [
preferred_chunks.get(dim, shape) for dim, shape in zip(var.dims, var.shape)
]
chunks_list = [
chunks.get(dim, None) or preferred_chunks.get(dim, None) for dim in var.dims
]
output_chunks_list = da.core.normalize_chunks(
chunks_list,
shape=var.shape,
dtype=var.dtype,
previous_chunks=preferred_chunks_list,
)
output_chunks = dict(zip(var.dims, output_chunks_list))
_check_chunks_compatibility(var, output_chunks, preferred_chunks)
return output_chunks
def _maybe_chunk(
name,
var,
chunks,
token=None,
lock=None,
name_prefix="xarray-",
overwrite_encoded_chunks=False,
):
from dask.base import tokenize
if chunks is not None:
chunks = {dim: chunks[dim] for dim in var.dims if dim in chunks}
if var.ndim:
# when rechunking by different amounts, make sure dask names change
# by provinding chunks as an input to tokenize.
# subtle bugs result otherwise. see GH3350
token2 = tokenize(name, token if token else var._data, chunks)
name2 = f"{name_prefix}{name}-{token2}"
var = var.chunk(chunks, name=name2, lock=lock)
if overwrite_encoded_chunks and var.chunks is not None:
var.encoding["chunks"] = tuple(x[0] for x in var.chunks)
return var
else:
return var
def as_dataset(obj: Any) -> "Dataset":
"""Cast the given object to a Dataset.
Handles Datasets, DataArrays and dictionaries of variables. A new Dataset
object is only created if the provided object is not already one.
"""
if hasattr(obj, "to_dataset"):
obj = obj.to_dataset()
if not isinstance(obj, Dataset):
obj = Dataset(obj)
return obj
def _get_func_args(func, param_names):
"""Use `inspect.signature` to try accessing `func` args. Otherwise, ensure
they are provided by user.
"""
try:
func_args = inspect.signature(func).parameters
except ValueError:
func_args = {}
if not param_names:
raise ValueError(
"Unable to inspect `func` signature, and `param_names` was not provided."
)
if param_names:
params = param_names
else:
params = list(func_args)[1:]
if any(
[(p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]) for p in func_args.values()]
):
raise ValueError(
"`param_names` must be provided because `func` takes variable length arguments."
)
return params, func_args
def _initialize_curvefit_params(params, p0, bounds, func_args):
"""Set initial guess and bounds for curvefit.
Priority: 1) passed args 2) func signature 3) scipy defaults
"""
def _initialize_feasible(lb, ub):
# Mimics functionality of scipy.optimize.minpack._initialize_feasible
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
p0 = np.nansum(
[
0.5 * (lb + ub) * int(lb_finite & ub_finite),
(lb + 1) * int(lb_finite & ~ub_finite),
(ub - 1) * int(~lb_finite & ub_finite),
]
)
return p0
param_defaults = {p: 1 for p in params}
bounds_defaults = {p: (-np.inf, np.inf) for p in params}
for p in params:
if p in func_args and func_args[p].default is not func_args[p].empty:
param_defaults[p] = func_args[p].default
if p in bounds:
bounds_defaults[p] = tuple(bounds[p])
if param_defaults[p] < bounds[p][0] or param_defaults[p] > bounds[p][1]:
param_defaults[p] = _initialize_feasible(bounds[p][0], bounds[p][1])
if p in p0:
param_defaults[p] = p0[p]
return param_defaults, bounds_defaults
class DataVariables(Mapping[Hashable, "DataArray"]):
__slots__ = ("_dataset",)
def __init__(self, dataset: "Dataset"):
self._dataset = dataset
def __iter__(self) -> Iterator[Hashable]:
return (
key
for key in self._dataset._variables
if key not in self._dataset._coord_names
)
def __len__(self) -> int:
return len(self._dataset._variables) - len(self._dataset._coord_names)
def __contains__(self, key: Hashable) -> bool:
return key in self._dataset._variables and key not in self._dataset._coord_names
def __getitem__(self, key: Hashable) -> "DataArray":
if key not in self._dataset._coord_names:
return cast("DataArray", self._dataset[key])
raise KeyError(key)
def __repr__(self) -> str:
return formatting.data_vars_repr(self)
@property
def variables(self) -> Mapping[Hashable, Variable]:
all_variables = self._dataset.variables
return Frozen({k: all_variables[k] for k in self})
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [
key
for key in self._dataset._ipython_key_completions_()
if key not in self._dataset._coord_names
]
class _LocIndexer:
__slots__ = ("dataset",)
def __init__(self, dataset: "Dataset"):
self.dataset = dataset
def __getitem__(self, key: Mapping[Hashable, Any]) -> "Dataset":
if not utils.is_dict_like(key):
raise TypeError("can only lookup dictionaries from Dataset.loc")
return self.dataset.sel(key)
class Dataset(Mapping, ImplementsDatasetReduce, DataWithCoords):
"""A multi-dimensional, in memory, array database.
A dataset resembles an in-memory representation of a NetCDF file,
and consists of variables, coordinates and attributes which
together form a self describing dataset.
Dataset implements the mapping interface with keys given by variable
names and values given by DataArray objects for each variable name.
One dimensional variables with name equal to their dimension are
index coordinates used for label based indexing.
To load data from a file or file-like object, use the `open_dataset`
function.
Parameters
----------
data_vars : dict-like, optional
A mapping from variable names to :py:class:`~xarray.DataArray`
objects, :py:class:`~xarray.Variable` objects or to tuples of
the form ``(dims, data[, attrs])`` which can be used as
arguments to create a new ``Variable``. Each dimension must
have the same length in all variables in which it appears.
The following notations are accepted:
- mapping {var name: DataArray}
- mapping {var name: Variable}
- mapping {var name: (dimension name, array-like)}
- mapping {var name: (tuple of dimension names, array-like)}
- mapping {dimension name: array-like}
(it will be automatically moved to coords, see below)
Each dimension must have the same length in all variables in
which it appears.
coords : dict-like, optional
Another mapping in similar form as the `data_vars` argument,
except the each item is saved on the dataset as a "coordinate".
These variables have an associated meaning: they describe
constant/fixed/independent quantities, unlike the
varying/measured/dependent quantities that belong in
`variables`. Coordinates values may be given by 1-dimensional
arrays or scalars, in which case `dims` do not need to be
supplied: 1D arrays will be assumed to give index values along
the dimension with the same name.
The following notations are accepted:
- mapping {coord name: DataArray}
- mapping {coord name: Variable}
- mapping {coord name: (dimension name, array-like)}
- mapping {coord name: (tuple of dimension names, array-like)}
- mapping {dimension name: array-like}
(the dimension name is implicitly set to be the same as the
coord name)
The last notation implies that the coord name is the same as
the dimension name.
attrs : dict-like, optional
Global attributes to save on this dataset.
Examples
--------
Create data:
>>> np.random.seed(0)
>>> temperature = 15 + 8 * np.random.randn(2, 2, 3)
>>> precipitation = 10 * np.random.rand(2, 2, 3)
>>> lon = [[-99.83, -99.32], [-99.79, -99.23]]
>>> lat = [[42.25, 42.21], [42.63, 42.59]]
>>> time = pd.date_range("2014-09-06", periods=3)
>>> reference_time = pd.Timestamp("2014-09-05")
Initialize a dataset with multiple dimensions:
>>> ds = xr.Dataset(
... data_vars=dict(
... temperature=(["x", "y", "time"], temperature),
... precipitation=(["x", "y", "time"], precipitation),
... ),
... coords=dict(
... lon=(["x", "y"], lon),
... lat=(["x", "y"], lat),
... time=time,
... reference_time=reference_time,
... ),
... attrs=dict(description="Weather related data."),
... )
>>> ds
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 42.25 42.21 42.63 42.59
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
reference_time datetime64[ns] 2014-09-05
Dimensions without coordinates: x, y
Data variables:
temperature (x, y, time) float64 29.11 18.2 22.83 ... 18.28 16.15 26.63
precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805
Attributes:
description: Weather related data.
Find out where the coldest temperature was and what values the
other variables had:
>>> ds.isel(ds.temperature.argmin(...))
<xarray.Dataset>
Dimensions: ()
Coordinates:
lon float64 -99.32
lat float64 42.21
time datetime64[ns] 2014-09-08
reference_time datetime64[ns] 2014-09-05
Data variables:
temperature float64 7.182
precipitation float64 8.326
Attributes:
description: Weather related data.
"""
_attrs: Optional[Dict[Hashable, Any]]
_cache: Dict[str, Any]
_coord_names: Set[Hashable]
_dims: Dict[Hashable, int]
_encoding: Optional[Dict[Hashable, Any]]
_close: Optional[Callable[[], None]]
_indexes: Optional[Dict[Hashable, pd.Index]]
_variables: Dict[Hashable, Variable]
__slots__ = (
"_attrs",
"_cache",
"_coord_names",
"_dims",
"_encoding",
"_close",
"_indexes",
"_variables",
"__weakref__",
)
_groupby_cls = groupby.DatasetGroupBy
_rolling_cls = rolling.DatasetRolling
_coarsen_cls = rolling.DatasetCoarsen
_resample_cls = resample.DatasetResample
_weighted_cls = weighted.DatasetWeighted
def __init__(
self,
# could make a VariableArgs to use more generally, and refine these
# categories
data_vars: Mapping[Hashable, Any] = None,
coords: Mapping[Hashable, Any] = None,
attrs: Mapping[Hashable, Any] = None,
):
# TODO(shoyer): expose indexes as a public argument in __init__
if data_vars is None:
data_vars = {}
if coords is None:
coords = {}
both_data_and_coords = set(data_vars) & set(coords)
if both_data_and_coords:
raise ValueError(
"variables %r are found in both data_vars and coords"
% both_data_and_coords
)
if isinstance(coords, Dataset):
coords = coords.variables
variables, coord_names, dims, indexes, _ = merge_data_and_coords(
data_vars, coords, compat="broadcast_equals"
)
self._attrs = dict(attrs) if attrs is not None else None
self._close = None
self._encoding = None
self._variables = variables
self._coord_names = coord_names
self._dims = dims
self._indexes = indexes
@classmethod
def load_store(cls, store, decoder=None) -> "Dataset":
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attributes = decoder(variables, attributes)
obj = cls(variables, attrs=attributes)
obj.set_close(store.close)
return obj
@property
def variables(self) -> Mapping[Hashable, Variable]:
"""Low level interface to Dataset contents as dict of Variable objects.
This ordered dictionary is frozen to prevent mutation that could
violate Dataset invariants. It contains all variable objects
constituting the Dataset, including both data variables and
coordinates.
"""
return Frozen(self._variables)
@property
def attrs(self) -> Dict[Hashable, Any]:
"""Dictionary of global attributes on this dataset"""
if self._attrs is None:
self._attrs = {}
return self._attrs
@attrs.setter
def attrs(self, value: Mapping[Hashable, Any]) -> None:
self._attrs = dict(value)
@property
def encoding(self) -> Dict:
"""Dictionary of global encoding attributes on this dataset"""
if self._encoding is None:
self._encoding = {}
return self._encoding
@encoding.setter
def encoding(self, value: Mapping) -> None:
self._encoding = dict(value)
@property
def dims(self) -> Mapping[Hashable, int]:
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
Note that type of this object differs from `DataArray.dims`.
See `Dataset.sizes` and `DataArray.sizes` for consistently named
properties.
"""
return Frozen(SortedKeysDict(self._dims))
@property
def sizes(self) -> Mapping[Hashable, int]:
"""Mapping from dimension names to lengths.
Cannot be modified directly, but is updated when adding new variables.
This is an alias for `Dataset.dims` provided for the benefit of
consistency with `DataArray.sizes`.
See Also
--------
DataArray.sizes
"""
return self.dims
def load(self, **kwargs) -> "Dataset":
"""Manually trigger loading and/or computation of this dataset's data
from disk or a remote source into memory and return this dataset.
Unlike compute, the original dataset is modified and returned.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
See Also
--------
dask.compute
"""
# access .data to coerce everything to numpy or dask arrays
lazy_data = {
k: v._data for k, v in self.variables.items() if is_duck_dask_array(v._data)
}
if lazy_data:
import dask.array as da
# evaluate all the dask arrays simultaneously
evaluated_data = da.compute(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
# load everything else sequentially
for k, v in self.variables.items():
if k not in lazy_data:
v.load()
return self
def __dask_tokenize__(self):
from dask.base import normalize_token
return normalize_token(
(type(self), self._variables, self._coord_names, self._attrs)
)
def __dask_graph__(self):
graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}
graphs = {k: v for k, v in graphs.items() if v is not None}
if not graphs:
return None
else:
try:
from dask.highlevelgraph import HighLevelGraph
return HighLevelGraph.merge(*graphs.values())
except ImportError:
from dask import sharedict
| def __dask_keys__(self):
import dask
return [
v.__dask_keys__()
for v in self.variables.values()
if dask.is_dask_collection(v)
]
def __dask_layers__(self):
import dask
return sum(
[
v.__dask_layers__()
for v in self.variables.values()
if dask.is_dask_collection(v)
],
(),
)
@property
def __dask_optimize__(self):
import dask.array as da
return da.Array.__dask_optimize__
@property
def __dask_scheduler__(self):
import dask.array as da
return da.Array.__dask_scheduler__
def __dask_postcompute__(self):
return self._dask_postcompute, ()
def __dask_postpersist__(self):
return self._dask_postpersist, ()
def _dask_postcompute(self, results: "Iterable[Variable]") -> "Dataset":
import dask
variables = {}
results_iter = iter(results)
for k, v in self._variables.items():
if dask.is_dask_collection(v):
rebuild, args = v.__dask_postcompute__()
v = rebuild(next(results_iter), *args)
variables[k] = v
return Dataset._construct_direct(
variables,
self._coord_names,
self._dims,
self._attrs,
self._indexes,
self._encoding,
self._close,
)
def _dask_postpersist(
self, dsk: Mapping, *, rename: Mapping[str, str] = None
) -> "Dataset":
from dask import is_dask_collection
from dask.highlevelgraph import HighLevelGraph
from dask.optimization import cull
variables = {}
for k, v in self._variables.items():
if not is_dask_collection(v):
variables[k] = v
continue
if isinstance(dsk, HighLevelGraph):
# dask >= 2021.3
# __dask_postpersist__() was called by dask.highlevelgraph.
# Don't use dsk.cull(), as we need to prevent partial layers:
# https://github.com/dask/dask/issues/7137
layers = v.__dask_layers__()
if rename:
layers = [rename.get(k, k) for k in layers]
dsk2 = dsk.cull_layers(layers)
elif rename: # pragma: nocover
# At the moment of writing, this is only for forward compatibility.
# replace_name_in_key requires dask >= 2021.3.
from dask.base import flatten, replace_name_in_key
keys = [
replace_name_in_key(k, rename) for k in flatten(v.__dask_keys__())
]
dsk2, _ = cull(dsk, keys)
else:
# __dask_postpersist__() was called by dask.optimize or dask.persist
dsk2, _ = cull(dsk, v.__dask_keys__())
rebuild, args = v.__dask_postpersist__()
# rename was added in dask 2021.3
kwargs = {"rename": rename} if rename else {}
variables[k] = rebuild(dsk2, *args, **kwargs)
return Dataset._construct_direct(
variables,
self._coord_names,
self._dims,
self._attrs,
self._indexes,
self._encoding,
self._close,
)
def compute(self, **kwargs) -> "Dataset":
"""Manually trigger loading and/or computation of this dataset's data
from disk or a remote source into memory and return a new dataset.
Unlike load, the original dataset is left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.compute``.
See Also
--------
dask.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def _persist_inplace(self, **kwargs) -> "Dataset":
"""Persist all Dask arrays in memory"""
# access .data to coerce everything to numpy or dask arrays
lazy_data = {
k: v._data for k, v in self.variables.items() if is_duck_dask_array(v._data)
}
if lazy_data:
import dask
# evaluate all the dask arrays simultaneously
evaluated_data = dask.persist(*lazy_data.values(), **kwargs)
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
return self
def persist(self, **kwargs) -> "Dataset":
"""Trigger computation, keeping data as dask arrays
This operation can be used to trigger computation on underlying dask
arrays, similar to ``.compute()`` or ``.load()``. However this
operation keeps the data as dask arrays. This is particularly useful
when using the dask.distributed scheduler and you want to load a large
amount of data into distributed memory.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.persist``.
See Also
--------
dask.persist
"""
new = self.copy(deep=False)
return new._persist_inplace(**kwargs)
@classmethod
def _construct_direct(
cls,
variables,
coord_names,
dims=None,
attrs=None,
indexes=None,
encoding=None,
close=None,
):
"""Shortcut around __init__ for internal use when we want to skip
costly validation
"""
if dims is None:
dims = calculate_dimensions(variables)
obj = object.__new__(cls)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._indexes = indexes
obj._attrs = attrs
obj._close = close
obj._encoding = encoding
return obj
def _replace(
self,
variables: Dict[Hashable, Variable] = None,
coord_names: Set[Hashable] = None,
dims: Dict[Any, int] = None,
attrs: Union[Dict[Hashable, Any], None, Default] = _default,
indexes: Union[Dict[Any, pd.Index], None, Default] = _default,
encoding: Union[dict, None, Default] = _default,
inplace: bool = False,
) -> "Dataset":
"""Fastpath constructor for internal use.
Returns an object with optionally with replaced attributes.
Explicitly passed arguments are *not* copied when placed on the new
dataset. It is up to the caller to ensure that they have the right type
and are not used elsewhere.
"""
if inplace:
if variables is not None:
self._variables = variables
if coord_names is not None:
self._coord_names = coord_names
if dims is not None:
self._dims = dims
if attrs is not _default:
self._attrs = attrs
if indexes is not _default:
self._indexes = indexes
if encoding is not _default:
self._encoding = encoding
obj = self
else:
if variables is None:
variables = self._variables.copy()
if coord_names is None:
coord_names = self._coord_names.copy()
if dims is None:
dims = self._dims.copy()
if attrs is _default:
attrs = copy.copy(self._attrs)
if indexes is _default:
indexes = copy.copy(self._indexes)
if encoding is _default:
encoding = copy.copy(self._encoding)
obj = self._construct_direct(
variables, coord_names, dims, attrs, indexes, encoding
)
return obj
def _replace_with_new_dims(
self,
variables: Dict[Hashable, Variable],
coord_names: set = None,
attrs: Union[Dict[Hashable, Any], None, Default] = _default,
indexes: Union[Dict[Hashable, pd.Index], None, Default] = _default,
inplace: bool = False,
) -> "Dataset":
"""Replace variables with recalculated dimensions."""
dims = calculate_dimensions(variables)
return self._replace(
variables, coord_names, dims, attrs, indexes, inplace=inplace
)
def _replace_vars_and_dims(
self,
variables: Dict[Hashable, Variable],
coord_names: set = None,
dims: Dict[Hashable, int] = None,
attrs: Union[Dict[Hashable, Any], None, Default] = _default,
inplace: bool = False,
) -> "Dataset":
"""Deprecated version of _replace_with_new_dims().
Unlike _replace_with_new_dims(), this method always recalculates
indexes from variables.
"""
if dims is None:
dims = calculate_dimensions(variables)
return self._replace(
variables, coord_names, dims, attrs, indexes=None, inplace=inplace
)
def _overwrite_indexes(self, indexes: Mapping[Any, pd.Index]) -> "Dataset":
if not indexes:
return self
variables = self._variables.copy()
new_indexes = dict(self.indexes)
for name, idx in indexes.items():
variables[name] = IndexVariable(name, idx)
new_indexes[name] = idx
obj = self._replace(variables, indexes=new_indexes)
# switch from dimension to level names, if necessary
dim_names: Dict[Hashable, str] = {}
for dim, idx in indexes.items():
if not isinstance(idx, pd.MultiIndex) and idx.name != dim:
dim_names[dim] = idx.name
if dim_names:
obj = obj.rename(dim_names)
return obj
def copy(self, deep: bool = False, data: Mapping = None) -> "Dataset":
"""Returns a copy of this dataset.
If `deep=True`, a deep copy is made of each of the component variables.
Otherwise, a shallow copy of each of the component variable is made, so
that the underlying memory region of the new dataset is the same as in
the original dataset.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether each component variable is loaded into memory and copied onto
the new object. Default is False.
data : dict-like, optional
Data to use in the new object. Each item in `data` must have same
shape as corresponding data variable in original. When `data` is
used, `deep` is ignored for the data variables and only used for
coords.
Returns
-------
object : Dataset
New object with dimensions, attributes, coordinates, name, encoding,
and optionally data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset(
... {"foo": da, "bar": ("x", [-1, 2])},
... coords={"x": ["one", "two"]},
... )
>>> ds.copy()
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 -1 2
>>> ds_0 = ds.copy(deep=False)
>>> ds_0["foo"][0, 0] = 7
>>> ds_0
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 -1 2
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 -1 2
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> ds.copy(data={"foo": np.arange(6).reshape(2, 3), "bar": ["a", "b"]})
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) int64 0 1 2 3 4 5
bar (x) <U1 'a' 'b'
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Coordinates:
* x (x) <U3 'one' 'two'
Dimensions without coordinates: dim_0, dim_1
Data variables:
foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 -1 2
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
variables = {k: v.copy(deep=deep) for k, v in self._variables.items()}
elif not utils.is_dict_like(data):
raise ValueError("Data must be dict-like")
else:
var_keys = set(self.data_vars.keys())
data_keys = set(data.keys())
keys_not_in_vars = data_keys - var_keys
if keys_not_in_vars:
raise ValueError(
"Data must only contain variables in original "
"dataset. Extra variables: {}".format(keys_not_in_vars)
)
keys_missing_from_data = var_keys - data_keys
if keys_missing_from_data:
raise ValueError(
"Data must contain all variables in original "
"dataset. Data is missing {}".format(keys_missing_from_data)
)
variables = {
k: v.copy(deep=deep, data=data.get(k))
for k, v in self._variables.items()
}
attrs = copy.deepcopy(self._attrs) if deep else copy.copy(self._attrs)
return self._replace(variables, attrs=attrs)
@property
def _level_coords(self) -> Dict[str, Hashable]:
"""Return a mapping of all MultiIndex levels and their corresponding
coordinate name.
"""
level_coords: Dict[str, Hashable] = {}
for name, index in self.indexes.items():
if isinstance(index, pd.MultiIndex):
level_names = index.names
(dim,) = self.variables[name].dims
level_coords.update({lname: dim for lname in level_names})
return level_coords
def _copy_listed(self, names: Iterable[Hashable]) -> "Dataset":
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""
variables: Dict[Hashable, Variable] = {}
coord_names = set()
indexes: Dict[Hashable, pd.Index] = {}
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims
)
variables[var_name] = var
if ref_name in self._coord_names or ref_name in self.dims:
coord_names.add(var_name)
if (var_name,) == var.dims:
indexes[var_name] = var.to_index()
needed_dims: Set[Hashable] = set()
for v in variables.values():
needed_dims.update(v.dims)
dims = {k: self.dims[k] for k in needed_dims}
# preserves ordering of coordinates
for k in self._variables:
if k not in self._coord_names:
continue
if set(self.variables[k].dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
if k in self.indexes:
indexes[k] = self.indexes[k]
return self._replace(variables, coord_names, dims, indexes=indexes)
def _construct_dataarray(self, name: Hashable) -> "DataArray":
"""Construct a DataArray by indexing this dataset"""
from .dataarray import DataArray
try:
variable = self._variables[name]
except KeyError:
_, name, variable = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims
)
needed_dims = set(variable.dims)
coords: Dict[Hashable, Variable] = {}
# preserve ordering
for k in self._variables:
if k in self._coord_names and set(self.variables[k].dims) <= needed_dims:
coords[k] = self.variables[k]
if self._indexes is None:
indexes = None
else:
indexes = {k: v for k, v in self._indexes.items() if k in coords}
return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True)
def __copy__(self) -> "Dataset":
return self.copy(deep=False)
def __deepcopy__(self, memo=None) -> "Dataset":
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
@property
def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for attribute-style access"""
yield from self._item_sources
yield self.attrs
@property
def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for key-completion"""
yield self.data_vars
yield HybridMappingProxy(keys=self._coord_names, mapping=self.coords)
# virtual coordinates
yield HybridMappingProxy(keys=self.dims, mapping=self)
# uses empty dict -- everything here can already be found in self.coords.
yield HybridMappingProxy(keys=self._level_coords, mapping={})
def __contains__(self, key: object) -> bool:
"""The 'in' operator will return true or false depending on whether
'key' is an array in the dataset or not.
"""
return key in self._variables
def __len__(self) -> int:
return len(self.data_vars)
def __bool__(self) -> bool:
return bool(self.data_vars)
def __iter__(self) -> Iterator[Hashable]:
return iter(self.data_vars)
def __array__(self, dtype=None):
raise TypeError(
"cannot directly convert an xarray.Dataset into a "
"numpy array. Instead, create an xarray.DataArray "
"first, either with indexing on the Dataset or by "
"invoking the `to_array()` method."
)
@property
def nbytes(self) -> int:
return sum(v.nbytes for v in self.variables.values())
@property
def loc(self) -> _LocIndexer:
"""Attribute for location based indexing. Only supports __getitem__,
and only when the key is a dict of the form {dim: labels}.
"""
return _LocIndexer(self)
# FIXME https://github.com/python/mypy/issues/7328
@overload
def __getitem__(self, key: Mapping) -> "Dataset": # type: ignore[misc]
...
@overload
def __getitem__(self, key: Hashable) -> "DataArray": # type: ignore[misc]
...
@overload
def __getitem__(self, key: Any) -> "Dataset":
...
def __getitem__(self, key):
"""Access variables or coordinates this dataset as a
:py:class:`~xarray.DataArray`.
Indexing with a list of names will return a new ``Dataset`` object.
"""
if utils.is_dict_like(key):
return self.isel(**cast(Mapping, key))
if hashable(key):
return self._construct_dataarray(key)
else:
return self._copy_listed(np.asarray(key))
def __setitem__(self, key: Hashable, value) -> None:
"""Add an array to this dataset.
If value is a `DataArray`, call its `select_vars()` method, rename it
to `key` and merge the contents of the resulting dataset into this
dataset.
If value is an `Variable` object (or tuple of form
``(dims, data[, attrs])``), add it to this dataset as a new
variable.
"""
if utils.is_dict_like(key):
raise NotImplementedError(
"cannot yet use a dictionary as a key to set Dataset values"
)
self.update({key: value})
def __delitem__(self, key: Hashable) -> None:
"""Remove a variable from this dataset."""
del self._variables[key]
self._coord_names.discard(key)
if key in self.indexes:
assert self._indexes is not None
del self._indexes[key]
self._dims = calculate_dimensions(self._variables)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore[assignment]
def _all_compat(self, other: "Dataset", compat_str: str) -> bool:
"""Helper function for equals and identical"""
# some stores (e.g., scipy) do not seem to preserve order, so don't
# require matching order for equality
def compat(x: Variable, y: Variable) -> bool:
return getattr(x, compat_str)(y)
return self._coord_names == other._coord_names and utils.dict_equiv(
self._variables, other._variables, compat=compat
)
def broadcast_equals(self, other: "Dataset") -> bool:
"""Two Datasets are broadcast equal if they are equal after
broadcasting all variables against each other.
For example, variables that are scalar in one dataset but non-scalar in
the other dataset can still be broadcast equal if the the non-scalar
variable is a constant.
See Also
--------
Dataset.equals
Dataset.identical
"""
try:
return self._all_compat(other, "broadcast_equals")
except (TypeError, AttributeError):
return False
def equals(self, other: "Dataset") -> bool:
"""Two Datasets are equal if they have matching variables and
coordinates, all of which are equal.
Datasets can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``Dataset``
does element-wise comparisons (like numpy.ndarrays).
See Also
--------
Dataset.broadcast_equals
Dataset.identical
"""
try:
return self._all_compat(other, "equals")
except (TypeError, AttributeError):
return False
def identical(self, other: "Dataset") -> bool:
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(
other, "identical"
)
except (TypeError, AttributeError):
return False
@property
def indexes(self) -> Indexes:
"""Mapping of pandas.Index objects used for label based indexing"""
if self._indexes is None:
self._indexes = default_indexes(self._variables, self._dims)
return Indexes(self._indexes)
@property
def coords(self) -> DatasetCoordinates:
"""Dictionary of xarray.DataArray objects corresponding to coordinate
variables
"""
return DatasetCoordinates(self)
@property
def data_vars(self) -> DataVariables:
"""Dictionary of DataArray objects corresponding to data variables"""
return DataVariables(self)
def set_coords(self, names: "Union[Hashable, Iterable[Hashable]]") -> "Dataset":
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : hashable or iterable of hashable
Name(s) of variables in this dataset to convert into coordinates.
Returns
-------
Dataset
See Also
--------
Dataset.swap_dims
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
if isinstance(names, str) or not isinstance(names, Iterable):
names = [names]
else:
names = list(names)
self._assert_all_in_dataset(names)
obj = self.copy()
obj._coord_names.update(names)
return obj
def reset_coords(
self,
names: "Union[Hashable, Iterable[Hashable], None]" = None,
drop: bool = False,
) -> "Dataset":
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : hashable or iterable of hashable, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
Returns
-------
Dataset
"""
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, str) or not isinstance(names, Iterable):
names = [names]
else:
names = list(names)
self._assert_all_in_dataset(names)
bad_coords = set(names) & set(self.dims)
if bad_coords:
raise ValueError(
"cannot remove index coordinates with reset_coords: %s" % bad_coords
)
obj = self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
def dump_to_store(self, store: "AbstractDataStore", **kwargs) -> None:
"""Store dataset contents to a backends.*DataStore object."""
from ..backends.api import dump_to_store
# TODO: rename and/or cleanup this method to make it more consistent
# with to_netcdf()
dump_to_store(self, store, **kwargs)
def to_netcdf(
self,
path=None,
mode: str = "w",
format: str = None,
group: str = None,
engine: str = None,
encoding: Mapping = None,
unlimited_dims: Iterable[Hashable] = None,
compute: bool = True,
invalid_netcdf: bool = False,
) -> Union[bytes, "Delayed", None]:
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, Path or file-like, optional
Path to which to save this dataset. File-like objects are only
supported by the scipy engine. If no path is provided, this
function returns the resulting netCDF file as bytes; in this case,
we need to use scipy, which does not support netCDF version 4 (the
default format becomes NETCDF3_64BIT).
mode : {"w", "a"}, default: "w"
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten. If mode='a', existing variables
will be overwritten.
format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
"NETCDF3_CLASSIC"}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {"netcdf4", "scipy", "h5netcdf"}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,
"zlib": True}, ...}``
The `h5netcdf` engine supports both the NetCDF4-style compression
encoding parameters ``{"zlib": True, "complevel": 9}`` and the h5py
ones ``{"compression": "gzip", "compression_opts": 9}``.
This allows using any compression plugin installed in the HDF5
library, e.g. LZF.
unlimited_dims : iterable of hashable, optional
Dimension(s) that should be serialized as unlimited dimensions.
By default, no dimensions are treated as unlimited dimensions.
Note that unlimited_dims may also be set via
``dataset.encoding["unlimited_dims"]``.
compute: bool, default: True
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
invalid_netcdf: bool, default: False
Only valid along with ``engine="h5netcdf"``. If True, allow writing
hdf5 files which are invalid netcdf as described in
https://github.com/shoyer/h5netcdf.
"""
if encoding is None:
encoding = {}
from ..backends.api import to_netcdf
return to_netcdf(
self,
path,
mode,
format=format,
group=group,
engine=engine,
encoding=encoding,
unlimited_dims=unlimited_dims,
compute=compute,
invalid_netcdf=invalid_netcdf,
)
def to_zarr(
self,
store: Union[MutableMapping, str, Path] = None,
chunk_store: Union[MutableMapping, str, Path] = None,
mode: str = None,
synchronizer=None,
group: str = None,
encoding: Mapping = None,
compute: bool = True,
consolidated: bool = False,
append_dim: Hashable = None,
region: Mapping[str, slice] = None,
) -> "ZarrStore":
"""Write dataset contents to a zarr group.
.. note:: Experimental
The Zarr backend is new and experimental. Please report any
unexpected behavior via github issues.
Parameters
----------
store : MutableMapping, str or Path, optional
Store or path to directory in file system.
chunk_store : MutableMapping, str or Path, optional
Store or path to directory in file system only for Zarr array chunks.
Requires zarr-python v2.4.0 or later.
mode : {"w", "w-", "a", None}, optional
Persistence mode: "w" means create (overwrite if exists);
"w-" means create (fail if exists);
"a" means override existing variables (create if does not exist).
If ``append_dim`` is set, ``mode`` can be omitted as it is
internally set to ``"a"``. Otherwise, ``mode`` will default to
`w-` if not set.
synchronizer : object, optional
Zarr array synchronizer.
group : str, optional
Group path. (a.k.a. `path` in zarr terminology.)
encoding : dict, optional
Nested dictionary with variable names as keys and dictionaries of
variable specific encodings as values, e.g.,
``{"my_variable": {"dtype": "int16", "scale_factor": 0.1,}, ...}``
compute : bool, optional
If True write array data immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed to write
array data later. Metadata is always updated eagerly.
consolidated : bool, optional
If True, apply zarr's `consolidate_metadata` function to the store
after writing metadata.
append_dim : hashable, optional
If set, the dimension along which the data will be appended. All
other dimensions on overriden variables must remain the same size.
region : dict, optional
Optional mapping from dimension names to integer slices along
dataset dimensions to indicate the region of existing zarr array(s)
in which to write this dataset's data. For example,
``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate
that values should be written to the region ``0:1000`` along ``x``
and ``10000:11000`` along ``y``.
Two restrictions apply to the use of ``region``:
- If ``region`` is set, _all_ variables in a dataset must have at
least one dimension in common with the region. Other variables
should be written in a separate call to ``to_zarr()``.
- Dimensions cannot be included in both ``region`` and
``append_dim`` at the same time. To create empty arrays to fill
in with ``region``, use a separate call to ``to_zarr()`` with
``compute=False``. See "Appending to existing Zarr stores" in
the reference documentation for full details.
References
----------
https://zarr.readthedocs.io/
Notes
-----
Zarr chunking behavior:
If chunks are found in the encoding argument or attribute
corresponding to any DataArray, those chunks are used.
If a DataArray is a dask array, it is written with those chunks.
If not other chunks are found, Zarr uses its own heuristics to
choose automatic chunk sizes.
"""
from ..backends.api import to_zarr
if encoding is None:
encoding = {}
return to_zarr(
self,
store=store,
chunk_store=chunk_store,
mode=mode,
synchronizer=synchronizer,
group=group,
encoding=encoding,
compute=compute,
consolidated=consolidated,
append_dim=append_dim,
region=region,
)
def __repr__(self) -> str:
return formatting.dataset_repr(self)
def _repr_html_(self):
if OPTIONS["display_style"] == "text":
return f"<pre>{escape(repr(self))}</pre>"
return formatting_html.dataset_repr(self)
def info(self, buf=None) -> None:
"""
Concise summary of a Dataset variables and attributes.
Parameters
----------
buf : file-like, default: sys.stdout
writable buffer
See Also
--------
pandas.DataFrame.assign
ncdump : netCDF's ncdump
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append("xarray.Dataset {")
lines.append("dimensions:")
for name, size in self.dims.items():
lines.append(f"\t{name} = {size} ;")
lines.append("\nvariables:")
for name, da in self.variables.items():
dims = ", ".join(da.dims)
lines.append(f"\t{da.dtype} {name}({dims}) ;")
for k, v in da.attrs.items():
lines.append(f"\t\t{name}:{k} = {v} ;")
lines.append("\n// global attributes:")
for k, v in self.attrs.items():
lines.append(f"\t:{k} = {v} ;")
lines.append("}")
buf.write("\n".join(lines))
@property
def chunks(self) -> Mapping[Hashable, Tuple[int, ...]]:
"""Block dimensions for this dataset's data or None if it's not a dask
array.
"""
chunks: Dict[Hashable, Tuple[int, ...]] = {}
for v in self.variables.values():
if v.chunks is not None:
for dim, c in zip(v.dims, v.chunks):
if dim in chunks and c != chunks[dim]:
raise ValueError(
f"Object has inconsistent chunks along dimension {dim}. "
"This can be fixed by calling unify_chunks()."
)
chunks[dim] = c
return Frozen(SortedKeysDict(chunks))
def chunk(
self,
chunks: Union[
Number,
str,
Mapping[Hashable, Union[None, Number, str, Tuple[Number, ...]]],
] = {}, # {} even though it's technically unsafe, is being used intentionally here (#4667)
name_prefix: str = "xarray-",
token: str = None,
lock: bool = False,
) -> "Dataset":
"""Coerce all arrays in this dataset into dask arrays with the given
chunks.
Non-dask arrays in this dataset will be converted to dask arrays. Dask
arrays will be rechunked to the given chunk sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, 'auto' or mapping, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{"x": 5, "y": 5}``.
name_prefix : str, optional
Prefix for the name of any new dask arrays.
token : str, optional
Token uniquely identifying this dataset.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Dataset
"""
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
)
chunks = {}
if isinstance(chunks, (Number, str)):
chunks = dict.fromkeys(self.dims, chunks)
bad_dims = chunks.keys() - self.dims.keys()
if bad_dims:
raise ValueError(
"some chunks keys are not dimensions on this " "object: %s" % bad_dims
)
variables = {
k: _maybe_chunk(k, v, chunks, token, lock, name_prefix)
for k, v in self.variables.items()
}
return self._replace(variables)
def _validate_indexers(
self, indexers: Mapping[Hashable, Any], missing_dims: str = "raise"
) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]:
"""Here we make sure
+ indexer has a valid keys
+ indexer is in a valid data type
+ string indexers are cast to the appropriate date type if the
associated index is a DatetimeIndex or CFTimeIndex
"""
from .dataarray import DataArray
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
# all indexers should be int, slice, np.ndarrays, or Variable
for k, v in indexers.items():
if isinstance(v, (int, slice, Variable)):
yield k, v
elif isinstance(v, DataArray):
yield k, v.variable
elif isinstance(v, tuple):
yield k, as_variable(v)
elif isinstance(v, Dataset):
raise TypeError("cannot use a Dataset as an indexer")
elif isinstance(v, Sequence) and len(v) == 0:
yield k, np.empty((0,), dtype="int64")
else:
v = np.asarray(v)
if v.dtype.kind in "US":
index = self.indexes[k]
if isinstance(index, pd.DatetimeIndex):
v = v.astype("datetime64[ns]")
elif isinstance(index, xr.CFTimeIndex):
v = _parse_array_of_cftime_strings(v, index.date_type)
if v.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
yield k, v
def _validate_interp_indexers(
self, indexers: Mapping[Hashable, Any]
) -> Iterator[Tuple[Hashable, Variable]]:
"""Variant of _validate_indexers to be used for interpolation"""
for k, v in self._validate_indexers(indexers):
if isinstance(v, Variable):
if v.ndim == 1:
yield k, v.to_index_variable()
else:
yield k, v
elif isinstance(v, int):
yield k, Variable((), v)
elif isinstance(v, np.ndarray):
if v.ndim == 0:
yield k, Variable((), v)
elif v.ndim == 1:
yield k, IndexVariable((k,), v)
else:
raise AssertionError() # Already tested by _validate_indexers
else:
raise TypeError(type(v))
def _get_indexers_coords_and_indexes(self, indexers):
"""Extract coordinates and indexes from indexers.
Only coordinate with a name different from any of self.variables will
be attached.
"""
from .dataarray import DataArray
coords_list = []
for k, v in indexers.items():
if isinstance(v, DataArray):
if v.dtype.kind == "b":
if v.ndim != 1: # we only support 1-d boolean array
raise ValueError(
"{:d}d-boolean array is used for indexing along "
"dimension {!r}, but only 1d boolean arrays are "
"supported.".format(v.ndim, k)
)
# Make sure in case of boolean DataArray, its
# coordinate also should be indexed.
v_coords = v[v.values.nonzero()[0]].coords
else:
v_coords = v.coords
coords_list.append(v_coords)
# we don't need to call align() explicitly or check indexes for
# alignment, because merge_variables already checks for exact alignment
# between dimension coordinates
coords, indexes = merge_coordinates_without_align(coords_list)
assert_coordinate_consistent(self, coords)
# silently drop the conflicted variables.
attached_coords = {k: v for k, v in coords.items() if k not in self._variables}
attached_indexes = {
k: v for k, v in indexes.items() if k not in self._variables
}
return attached_coords, attached_indexes
def isel(
self,
indexers: Mapping[Hashable, Any] = None,
drop: bool = False,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> "Dataset":
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by integers, slice objects or arrays.
indexer can be a integer, slice, array-like or DataArray.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
drop : bool, optional
If ``drop=True``, drop coordinates variables indexed by integers
instead of making them scalar.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
Dataset:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
if any(is_fancy_indexer(idx) for idx in indexers.values()):
return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims)
# Much faster algorithm for when all indexers are ints, slices, one-dimensional
# lists, or zero or one-dimensional np.ndarray's
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
variables = {}
dims: Dict[Hashable, Tuple[int, ...]] = {}
coord_names = self._coord_names.copy()
indexes = self._indexes.copy() if self._indexes is not None else None
for var_name, var_value in self._variables.items():
var_indexers = {k: v for k, v in indexers.items() if k in var_value.dims}
if var_indexers:
var_value = var_value.isel(var_indexers)
if drop and var_value.ndim == 0 and var_name in coord_names:
coord_names.remove(var_name)
if indexes:
indexes.pop(var_name, None)
continue
if indexes and var_name in indexes:
if var_value.ndim == 1:
indexes[var_name] = var_value.to_index()
else:
del indexes[var_name]
variables[var_name] = var_value
dims.update(zip(var_value.dims, var_value.shape))
return self._construct_direct(
variables=variables,
coord_names=coord_names,
dims=dims,
attrs=self._attrs,
indexes=indexes,
encoding=self._encoding,
close=self._close,
)
def _isel_fancy(
self,
indexers: Mapping[Hashable, Any],
*,
drop: bool,
missing_dims: str = "raise",
) -> "Dataset":
# Note: we need to preserve the original indexers variable in order to merge the
# coords below
indexers_list = list(self._validate_indexers(indexers, missing_dims))
variables: Dict[Hashable, Variable] = {}
indexes: Dict[Hashable, pd.Index] = {}
for name, var in self.variables.items():
var_indexers = {k: v for k, v in indexers_list if k in var.dims}
if drop and name in var_indexers:
continue # drop this variable
if name in self.indexes:
new_var, new_index = isel_variable_and_index(
name, var, self.indexes[name], var_indexers
)
if new_index is not None:
indexes[name] = new_index
elif var_indexers:
new_var = var.isel(indexers=var_indexers)
else:
new_var = var.copy(deep=False)
variables[name] = new_var
coord_names = self._coord_names & variables.keys()
selected = self._replace_with_new_dims(variables, coord_names, indexes)
# Extract coordinates from indexers
coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers)
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = self._coord_names & variables.keys() | coord_vars.keys()
return self._replace_with_new_dims(variables, coord_names, indexes=indexes)
def sel(
self,
indexers: Mapping[Hashable, Any] = None,
method: str = None,
tolerance: Number = None,
drop: bool = False,
**indexers_kwargs: Any,
) -> "Dataset":
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using pandas's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
indexers : dict, optional
A dict with keys matching dimensions and values given
by scalars, slices or arrays of tick labels. For dimensions with
multi-index, the indexer may also be a dict-like object with keys
matching index level names.
If DataArrays are passed as indexers, xarray-style indexing will be
carried out. See :ref:`indexing` for the details.
One of indexers or indexers_kwargs must be provided.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for inexact matches:
* None (default): only exact matches
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
drop : bool, optional
If ``drop=True``, drop coordinates variables in `indexers` instead
of making them scalar.
**indexers_kwargs : {dim: indexer, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers.
If indexer DataArrays have coordinates that do not conflict with
this object, then these coordinates will be attached.
In general, each array's data will be a view of the array's data
in this dataset, unless vectorized indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.isel
DataArray.sel
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "sel")
pos_indexers, new_indexes = remap_label_indexers(
self, indexers=indexers, method=method, tolerance=tolerance
)
result = self.isel(indexers=pos_indexers, drop=drop)
return result._overwrite_indexes(new_indexes)
def head(
self,
indexers: Union[Mapping[Hashable, int], int] = None,
**indexers_kwargs: Any,
) -> "Dataset":
"""Returns a new dataset with the first `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
See Also
--------
Dataset.tail
Dataset.thin
DataArray.head
"""
if not indexers_kwargs:
if indexers is None:
indexers = 5
if not isinstance(indexers, int) and not is_dict_like(indexers):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = {dim: indexers for dim in self.dims}
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "head")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
"dimension %r, found %r" % (k, type(v))
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
"for dimension %r, found %s" % (k, v)
)
indexers_slices = {k: slice(val) for k, val in indexers.items()}
return self.isel(indexers_slices)
def tail(
self,
indexers: Union[Mapping[Hashable, int], int] = None,
**indexers_kwargs: Any,
) -> "Dataset":
"""Returns a new dataset with the last `n` values of each array
for the specified dimension(s).
Parameters
----------
indexers : dict or int, default: 5
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
See Also
--------
Dataset.head
Dataset.thin
DataArray.tail
"""
if not indexers_kwargs:
if indexers is None:
indexers = 5
if not isinstance(indexers, int) and not is_dict_like(indexers):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = {dim: indexers for dim in self.dims}
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "tail")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
"dimension %r, found %r" % (k, type(v))
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
"for dimension %r, found %s" % (k, v)
)
indexers_slices = {
k: slice(-val, None) if val != 0 else slice(val)
for k, val in indexers.items()
}
return self.isel(indexers_slices)
def thin(
self,
indexers: Union[Mapping[Hashable, int], int] = None,
**indexers_kwargs: Any,
) -> "Dataset":
"""Returns a new dataset with each array indexed along every `n`-th
value for the specified dimension(s)
Parameters
----------
indexers : dict or int
A dict with keys matching dimensions and integer values `n`
or a single integer `n` applied over all dimensions.
One of indexers or indexers_kwargs must be provided.
**indexers_kwargs : {dim: n, ...}, optional
The keyword arguments form of ``indexers``.
One of indexers or indexers_kwargs must be provided.
See Also
--------
Dataset.head
Dataset.tail
DataArray.thin
"""
if (
not indexers_kwargs
and not isinstance(indexers, int)
and not is_dict_like(indexers)
):
raise TypeError("indexers must be either dict-like or a single integer")
if isinstance(indexers, int):
indexers = {dim: indexers for dim in self.dims}
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "thin")
for k, v in indexers.items():
if not isinstance(v, int):
raise TypeError(
"expected integer type indexer for "
"dimension %r, found %r" % (k, type(v))
)
elif v < 0:
raise ValueError(
"expected positive integer as indexer "
"for dimension %r, found %s" % (k, v)
)
elif v == 0:
raise ValueError("step cannot be zero")
indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}
return self.isel(indexers_slices)
def broadcast_like(
self, other: Union["Dataset", "DataArray"], exclude: Iterable[Hashable] = None
) -> "Dataset":
"""Broadcast this DataArray against another Dataset or DataArray.
This is equivalent to xr.broadcast(other, self)[1]
Parameters
----------
other : Dataset or DataArray
Object against which to broadcast this array.
exclude : iterable of hashable, optional
Dimensions that must not be broadcasted
"""
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
args = align(other, self, join="outer", copy=False, exclude=exclude)
dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)
return _broadcast_helper(args[1], exclude, dims_map, common_coords)
def reindex_like(
self,
other: Union["Dataset", "DataArray"],
method: str = None,
tolerance: Number = None,
copy: bool = True,
fill_value: Any = dtypes.NA,
) -> "Dataset":
"""Conform this object onto the indexes of another object, filling in
missing values with ``fill_value``. The default fill value is NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for filling index values from other not found in this
dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like maps
variable names to fill values.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
align
"""
indexers = alignment.reindex_like_indexers(self, other)
return self.reindex(
indexers=indexers,
method=method,
copy=copy,
fill_value=fill_value,
tolerance=tolerance,
)
def reindex(
self,
indexers: Mapping[Hashable, Any] = None,
method: str = None,
tolerance: Number = None,
copy: bool = True,
fill_value: Any = dtypes.NA,
**indexers_kwargs: Any,
) -> "Dataset":
"""Conform this object onto a new set of indexes, filling in
missing values with ``fill_value``. The default fill value is NaN.
Parameters
----------
indexers : dict, optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate
values will be filled in with NaN, and any mis-matched dimension
names will simply be ignored.
One of indexers or indexers_kwargs must be provided.
method : {None, "nearest", "pad", "ffill", "backfill", "bfill"}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like,
maps variable names (including coordinates) to fill values.
sparse : bool, default: False
use sparse-array.
**indexers_kwargs : {dim: indexer, ...}, optional
Keyword arguments in the same form as ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reindex_like
align
pandas.Index.get_indexer
Examples
--------
Create a dataset with some fictional data.
>>> import xarray as xr
>>> import pandas as pd
>>> x = xr.Dataset(
... {
... "temperature": ("station", 20 * np.random.rand(4)),
... "pressure": ("station", 500 * np.random.rand(4)),
... },
... coords={"station": ["boston", "nyc", "seattle", "denver"]},
... )
>>> x
<xarray.Dataset>
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 'boston' 'nyc' 'seattle' 'denver'
Data variables:
temperature (station) float64 10.98 14.3 12.06 10.9
pressure (station) float64 211.8 322.9 218.8 445.9
>>> x.indexes
station: Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')
Create a new index and reindex the dataset. By default values in the new index that
do not have corresponding records in the dataset are assigned `NaN`.
>>> new_index = ["boston", "austin", "seattle", "lincoln"]
>>> x.reindex({"station": new_index})
<xarray.Dataset>
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'
Data variables:
temperature (station) float64 10.98 nan 12.06 nan
pressure (station) float64 211.8 nan 218.8 nan
We can fill in the missing values by passing a value to the keyword `fill_value`.
>>> x.reindex({"station": new_index}, fill_value=0)
<xarray.Dataset>
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'
Data variables:
temperature (station) float64 10.98 0.0 12.06 0.0
pressure (station) float64 211.8 0.0 218.8 0.0
We can also use different fill values for each variable.
>>> x.reindex(
... {"station": new_index}, fill_value={"temperature": 0, "pressure": 100}
... )
<xarray.Dataset>
Dimensions: (station: 4)
Coordinates:
* station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'
Data variables:
temperature (station) float64 10.98 0.0 12.06 0.0
pressure (station) float64 211.8 100.0 218.8 100.0
Because the index is not monotonically increasing or decreasing, we cannot use arguments
to the keyword method to fill the `NaN` values.
>>> x.reindex({"station": new_index}, method="nearest")
Traceback (most recent call last):
...
raise ValueError('index must be monotonic increasing or decreasing')
ValueError: index must be monotonic increasing or decreasing
To further illustrate the filling functionality in reindex, we will create a
dataset with a monotonically increasing index (for example, a sequence of dates).
>>> x2 = xr.Dataset(
... {
... "temperature": (
... "time",
... [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12],
... ),
... "pressure": ("time", 500 * np.random.rand(6)),
... },
... coords={"time": pd.date_range("01/01/2019", periods=6, freq="D")},
... )
>>> x2
<xarray.Dataset>
Dimensions: (time: 6)
Coordinates:
* time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06
Data variables:
temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12
pressure (time) float64 481.8 191.7 395.9 264.4 284.0 462.8
Suppose we decide to expand the dataset to cover a wider date range.
>>> time_index2 = pd.date_range("12/29/2018", periods=10, freq="D")
>>> x2.reindex({"time": time_index2})
<xarray.Dataset>
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07
Data variables:
temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan
pressure (time) float64 nan nan nan 481.8 ... 264.4 284.0 462.8 nan
The index entries that did not have a value in the original data frame (for example, `2018-12-29`)
are by default filled with NaN. If desired, we can fill in the missing values using one of several options.
For example, to back-propagate the last valid value to fill the `NaN` values,
pass `bfill` as an argument to the `method` keyword.
>>> x3 = x2.reindex({"time": time_index2}, method="bfill")
>>> x3
<xarray.Dataset>
Dimensions: (time: 10)
Coordinates:
* time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07
Data variables:
temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan
pressure (time) float64 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan
Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)
will not be filled by any of the value propagation schemes.
>>> x2.where(x2.temperature.isnull(), drop=True)
<xarray.Dataset>
Dimensions: (time: 1)
Coordinates:
* time (time) datetime64[ns] 2019-01-03
Data variables:
temperature (time) float64 nan
pressure (time) float64 395.9
>>> x3.where(x3.temperature.isnull(), drop=True)
<xarray.Dataset>
Dimensions: (time: 2)
Coordinates:
* time (time) datetime64[ns] 2019-01-03 2019-01-07
Data variables:
temperature (time) float64 nan nan
pressure (time) float64 395.9 nan
This is because filling while reindexing does not look at dataset values, but only compares
the original and desired indexes. If you do want to fill in the `NaN` values present in the
original dataset, use the :py:meth:`~Dataset.fillna()` method.
"""
return self._reindex(
indexers,
method,
tolerance,
copy,
fill_value,
sparse=False,
**indexers_kwargs,
)
def _reindex(
self,
indexers: Mapping[Hashable, Any] = None,
method: str = None,
tolerance: Number = None,
copy: bool = True,
fill_value: Any = dtypes.NA,
sparse: bool = False,
**indexers_kwargs: Any,
) -> "Dataset":
"""
same to _reindex but support sparse option
"""
indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, "reindex")
bad_dims = [d for d in indexers if d not in self.dims]
if bad_dims:
raise ValueError("invalid reindex dimensions: %s" % bad_dims)
variables, indexes = alignment.reindex_variables(
self.variables,
self.sizes,
self.indexes,
indexers,
method,
tolerance,
copy=copy,
fill_value=fill_value,
sparse=sparse,
)
coord_names = set(self._coord_names)
coord_names.update(indexers)
return self._replace_with_new_dims(variables, coord_names, indexes=indexes)
def interp(
self,
coords: Mapping[Hashable, Any] = None,
method: str = "linear",
assume_sorted: bool = False,
kwargs: Mapping[str, Any] = None,
**coords_kwargs: Any,
) -> "Dataset":
"""Multidimensional interpolation of Dataset.
Parameters
----------
coords : dict, optional
Mapping from dimension names to the new coordinates.
New coordinate can be a scalar, array-like or DataArray.
If DataArrays are passed as new coordinates, their dimensions are
used for the broadcasting. Missing values are skipped.
method : str, optional
{"linear", "nearest"} for multidimensional array,
{"linear", "nearest", "zero", "slinear", "quadratic", "cubic"}
for 1-dimensional array. "linear" is used by default.
assume_sorted : bool, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs : dict, optional
Additional keyword arguments passed to scipy's interpolator. Valid
options and their behavior depend on if 1-dimensional or
multi-dimensional interpolation is used.
**coords_kwargs : {dim: coordinate, ...}, optional
The keyword arguments form of ``coords``.
One of coords or coords_kwargs must be provided.
Returns
-------
interpolated : Dataset
New dataset on the new coordinates.
Notes
-----
scipy is required.
See Also
--------
scipy.interpolate.interp1d
scipy.interpolate.interpn
Examples
--------
>>> ds = xr.Dataset(
... data_vars={
... "a": ("x", [5, 7, 4]),
... "b": (
... ("x", "y"),
... [[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]],
... ),
... },
... coords={"x": [0, 1, 2], "y": [10, 12, 14, 16]},
... )
>>> ds
<xarray.Dataset>
Dimensions: (x: 3, y: 4)
Coordinates:
* x (x) int64 0 1 2
* y (y) int64 10 12 14 16
Data variables:
a (x) int64 5 7 4
b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0
1D interpolation with the default method (linear):
>>> ds.interp(x=[0, 0.75, 1.25, 1.75])
<xarray.Dataset>
Dimensions: (x: 4, y: 4)
Coordinates:
* y (y) int64 10 12 14 16
* x (x) float64 0.0 0.75 1.25 1.75
Data variables:
a (x) float64 5.0 6.5 6.25 4.75
b (x, y) float64 1.0 4.0 2.0 nan 1.75 6.25 ... nan 5.0 nan 5.25 nan
1D interpolation with a different method:
>>> ds.interp(x=[0, 0.75, 1.25, 1.75], method="nearest")
<xarray.Dataset>
Dimensions: (x: 4, y: 4)
Coordinates:
* y (y) int64 10 12 14 16
* x (x) float64 0.0 0.75 1.25 1.75
Data variables:
a (x) float64 5.0 7.0 7.0 4.0
b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 ... 6.0 nan 6.0 nan 5.0 8.0
1D extrapolation:
>>> ds.interp(
... x=[1, 1.5, 2.5, 3.5],
... method="linear",
... kwargs={"fill_value": "extrapolate"},
... )
<xarray.Dataset>
Dimensions: (x: 4, y: 4)
Coordinates:
* y (y) int64 10 12 14 16
* x (x) float64 1.0 1.5 2.5 3.5
Data variables:
a (x) float64 7.0 5.5 2.5 -0.5
b (x, y) float64 2.0 7.0 6.0 nan 4.0 nan ... 4.5 nan 12.0 nan 3.5 nan
2D interpolation:
>>> ds.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method="linear")
<xarray.Dataset>
Dimensions: (x: 4, y: 3)
Coordinates:
* x (x) float64 0.0 0.75 1.25 1.75
* y (y) int64 11 13 15
Data variables:
a (x) float64 5.0 6.5 6.25 4.75
b (x, y) float64 2.5 3.0 nan 4.0 5.625 nan nan nan nan nan nan nan
"""
from . import missing
if kwargs is None:
kwargs = {}
coords = either_dict_or_kwargs(coords, coords_kwargs, "interp")
indexers = dict(self._validate_interp_indexers(coords))
if coords:
# This avoids broadcasting over coordinates that are both in
# the original array AND in the indexing array. It essentially
# forces interpolation along the shared coordinates.
sdims = (
set(self.dims)
.intersection(*[set(nx.dims) for nx in indexers.values()])
.difference(coords.keys())
)
indexers.update({d: self.variables[d] for d in sdims})
obj = self if assume_sorted else self.sortby([k for k in coords])
def maybe_variable(obj, k):
# workaround to get variable for dimension without coordinate.
try:
return obj._variables[k]
except KeyError:
return as_variable((k, range(obj.dims[k])))
def _validate_interp_indexer(x, new_x):
# In the case of datetimes, the restrictions placed on indexers
# used with interp are stronger than those which are placed on
# isel, so we need an additional check after _validate_indexers.
if _contains_datetime_like_objects(
x
) and not _contains_datetime_like_objects(new_x):
raise TypeError(
"When interpolating over a datetime-like "
"coordinate, the coordinates to "
"interpolate to must be either datetime "
"strings or datetimes. "
"Instead got\n{}".format(new_x)
)
return x, new_x
variables: Dict[Hashable, Variable] = {}
for name, var in obj._variables.items():
if name in indexers:
continue
if var.dtype.kind in "uifc":
var_indexers = {
k: _validate_interp_indexer(maybe_variable(obj, k), v)
for k, v in indexers.items()
if k in var.dims
}
variables[name] = missing.interp(var, var_indexers, method, **kwargs)
elif all(d not in indexers for d in var.dims):
# keep unrelated object array
variables[name] = var
coord_names = obj._coord_names & variables.keys()
indexes = {k: v for k, v in obj.indexes.items() if k not in indexers}
selected = self._replace_with_new_dims(
variables.copy(), coord_names, indexes=indexes
)
# attach indexer as coordinate
variables.update(indexers)
for k, v in indexers.items():
assert isinstance(v, Variable)
if v.dims == (k,):
indexes[k] = v.to_index()
# Extract coordinates from indexers
coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords)
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = obj._coord_names & variables.keys() | coord_vars.keys()
return self._replace_with_new_dims(variables, coord_names, indexes=indexes)
def interp_like(
self,
other: Union["Dataset", "DataArray"],
method: str = "linear",
assume_sorted: bool = False,
kwargs: Mapping[str, Any] = None,
) -> "Dataset":
"""Interpolate this object onto the coordinates of another object,
filling the out of range values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to an 1d array-like, which provides coordinates upon
which to index the variables in this dataset. Missing values are skipped.
method : str, optional
{"linear", "nearest"} for multidimensional array,
{"linear", "nearest", "zero", "slinear", "quadratic", "cubic"}
for 1-dimensional array. 'linear' is used by default.
assume_sorted : bool, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs : dict, optional
Additional keyword passed to scipy's interpolator.
Returns
-------
interpolated : Dataset
Another dataset by interpolating this dataset's data along the
coordinates of the other object.
Notes
-----
scipy is required.
If the dataset has object-type coordinates, reindex is used for these
coordinates instead of the interpolation.
See Also
--------
Dataset.interp
Dataset.reindex_like
"""
if kwargs is None:
kwargs = {}
coords = alignment.reindex_like_indexers(self, other)
numeric_coords: Dict[Hashable, pd.Index] = {}
object_coords: Dict[Hashable, pd.Index] = {}
for k, v in coords.items():
if v.dtype.kind in "uifcMm":
numeric_coords[k] = v
else:
object_coords[k] = v
ds = self
if object_coords:
# We do not support interpolation along object coordinate.
# reindex instead.
ds = self.reindex(object_coords)
return ds.interp(numeric_coords, method, assume_sorted, kwargs)
# Helper methods for rename()
def _rename_vars(self, name_dict, dims_dict):
variables = {}
coord_names = set()
for k, v in self.variables.items():
var = v.copy(deep=False)
var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
name = name_dict.get(k, k)
if name in variables:
raise ValueError(f"the new name {name!r} conflicts")
variables[name] = var
if k in self._coord_names:
coord_names.add(name)
return variables, coord_names
def _rename_dims(self, name_dict):
return {name_dict.get(k, k): v for k, v in self.dims.items()}
def _rename_indexes(self, name_dict, dims_set):
if self._indexes is None:
return None
indexes = {}
for k, v in self.indexes.items():
new_name = name_dict.get(k, k)
if new_name not in dims_set:
continue
if isinstance(v, pd.MultiIndex):
new_names = [name_dict.get(k, k) for k in v.names]
index = v.rename(names=new_names)
else:
index = v.rename(new_name)
indexes[new_name] = index
return indexes
def _rename_all(self, name_dict, dims_dict):
variables, coord_names = self._rename_vars(name_dict, dims_dict)
dims = self._rename_dims(dims_dict)
indexes = self._rename_indexes(name_dict, dims.keys())
return variables, coord_names, dims, indexes
def rename(
self,
name_dict: Mapping[Hashable, Hashable] = None,
**names: Hashable,
) -> "Dataset":
"""Returns a new object with renamed variables and dimensions.
Parameters
----------
name_dict : dict-like, optional
Dictionary whose keys are current variable or dimension names and
whose values are the desired names.
**names : optional
Keyword form of ``name_dict``.
One of name_dict or names must be provided.
Returns
-------
renamed : Dataset
Dataset with renamed variables and dimensions.
See Also
--------
Dataset.swap_dims
Dataset.rename_vars
Dataset.rename_dims
DataArray.rename
"""
name_dict = either_dict_or_kwargs(name_dict, names, "rename")
for k in name_dict.keys():
if k not in self and k not in self.dims:
raise ValueError(
"cannot rename %r because it is not a "
"variable or dimension in this dataset" % k
)
variables, coord_names, dims, indexes = self._rename_all(
name_dict=name_dict, dims_dict=name_dict
)
assert_unique_multiindex_level_names(variables)
return self._replace(variables, coord_names, dims=dims, indexes=indexes)
def rename_dims(
self, dims_dict: Mapping[Hashable, Hashable] = None, **dims: Hashable
) -> "Dataset":
"""Returns a new object with renamed dimensions only.
Parameters
----------
dims_dict : dict-like, optional
Dictionary whose keys are current dimension names and
whose values are the desired names. The desired names must
not be the name of an existing dimension or Variable in the Dataset.
**dims : optional
Keyword form of ``dims_dict``.
One of dims_dict or dims must be provided.
Returns
-------
renamed : Dataset
Dataset with renamed dimensions.
See Also
--------
Dataset.swap_dims
Dataset.rename
Dataset.rename_vars
DataArray.rename
"""
dims_dict = either_dict_or_kwargs(dims_dict, dims, "rename_dims")
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError(
"cannot rename %r because it is not a "
"dimension in this dataset" % k
)
if v in self.dims or v in self:
raise ValueError(
f"Cannot rename {k} to {v} because {v} already exists. "
"Try using swap_dims instead."
)
variables, coord_names, sizes, indexes = self._rename_all(
name_dict={}, dims_dict=dims_dict
)
return self._replace(variables, coord_names, dims=sizes, indexes=indexes)
def rename_vars(
self, name_dict: Mapping[Hashable, Hashable] = None, **names: Hashable
) -> "Dataset":
"""Returns a new object with renamed variables including coordinates
Parameters
----------
name_dict : dict-like, optional
Dictionary whose keys are current variable or coordinate names and
whose values are the desired names.
**names : optional
Keyword form of ``name_dict``.
One of name_dict or names must be provided.
Returns
-------
renamed : Dataset
Dataset with renamed variables including coordinates
See Also
--------
Dataset.swap_dims
Dataset.rename
Dataset.rename_dims
DataArray.rename
"""
name_dict = either_dict_or_kwargs(name_dict, names, "rename_vars")
for k in name_dict:
if k not in self:
raise ValueError(
"cannot rename %r because it is not a "
"variable or coordinate in this dataset" % k
)
variables, coord_names, dims, indexes = self._rename_all(
name_dict=name_dict, dims_dict={}
)
return self._replace(variables, coord_names, dims=dims, indexes=indexes)
def swap_dims(
self, dims_dict: Mapping[Hashable, Hashable] = None, **dims_kwargs
) -> "Dataset":
"""Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names.
**dims_kwargs : {existing_dim: new_dim, ...}, optional
The keyword arguments form of ``dims_dict``.
One of dims_dict or dims_kwargs must be provided.
Returns
-------
swapped : Dataset
Dataset with swapped dimensions.
Examples
--------
>>> ds = xr.Dataset(
... data_vars={"a": ("x", [5, 7]), "b": ("x", [0.1, 2.4])},
... coords={"x": ["a", "b"], "y": ("x", [0, 1])},
... )
>>> ds
<xarray.Dataset>
Dimensions: (x: 2)
Coordinates:
* x (x) <U1 'a' 'b'
y (x) int64 0 1
Data variables:
a (x) int64 5 7
b (x) float64 0.1 2.4
>>> ds.swap_dims({"x": "y"})
<xarray.Dataset>
Dimensions: (y: 2)
Coordinates:
x (y) <U1 'a' 'b'
* y (y) int64 0 1
Data variables:
a (y) int64 5 7
b (y) float64 0.1 2.4
>>> ds.swap_dims({"x": "z"})
<xarray.Dataset>
Dimensions: (z: 2)
Coordinates:
x (z) <U1 'a' 'b'
y (z) int64 0 1
Dimensions without coordinates: z
Data variables:
a (z) int64 5 7
b (z) float64 0.1 2.4
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
# TODO: deprecate this method in favor of a (less confusing)
# rename_dims() method that only renames dimensions.
dims_dict = either_dict_or_kwargs(dims_dict, dims_kwargs, "swap_dims")
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError(
"cannot swap from dimension %r because it is "
"not an existing dimension" % k
)
if v in self.variables and self.variables[v].dims != (k,):
raise ValueError(
"replacement dimension %r is not a 1D "
"variable along the old dimension %r" % (v, k)
)
result_dims = {dims_dict.get(dim, dim) for dim in self.dims}
coord_names = self._coord_names.copy()
coord_names.update({dim for dim in dims_dict.values() if dim in self.variables})
variables: Dict[Hashable, Variable] = {}
indexes: Dict[Hashable, pd.Index] = {}
for k, v in self.variables.items():
dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
if k in result_dims:
var = v.to_index_variable()
if k in self.indexes:
indexes[k] = self.indexes[k]
else:
new_index = var.to_index()
if new_index.nlevels == 1:
# make sure index name matches dimension name
new_index = new_index.rename(k)
indexes[k] = new_index
else:
var = v.to_base_variable()
var.dims = dims
variables[k] = var
return self._replace_with_new_dims(variables, coord_names, indexes=indexes)
def expand_dims(
self,
dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None,
axis: Union[None, int, Sequence[int]] = None,
**dim_kwargs: Any,
) -> "Dataset":
"""Return a new object with an additional axis (or axes) inserted at
the corresponding position in the array shape. The new object is a
view into the underlying array, not a copy.
If dim is already a scalar coordinate, it will be promoted to a 1D
coordinate consisting of a single value.
Parameters
----------
dim : hashable, sequence of hashable, mapping, or None
Dimensions to include on the new variable. If provided as hashable
or sequence of hashable, then dimensions are inserted with length
1. If provided as a mapping, then the keys are the new dimensions
and the values are either integers (giving the length of the new
dimensions) or array-like (giving the coordinates of the new
dimensions).
axis : int, sequence of int, or None
Axis position(s) where new axis is to be inserted (position(s) on
the result array). If a list (or tuple) of integers is passed,
multiple axes are inserted. In this case, dim arguments should be
same length list. If axis=None is passed, all the axes will be
inserted to the start of the result array.
**dim_kwargs : int or sequence or ndarray
The keywords are arbitrary dimensions being inserted and the values
are either the lengths of the new dims (if int is given), or their
coordinates. Note, this is an alternative to passing a dict to the
dim kwarg and will only be used if dim is None.
Returns
-------
expanded : same type as caller
This object, but with an additional dimension(s).
"""
if dim is None:
pass
elif isinstance(dim, Mapping):
# We're later going to modify dim in place; don't tamper with
# the input
dim = dict(dim)
elif isinstance(dim, int):
raise TypeError(
"dim should be hashable or sequence of hashables or mapping"
)
elif isinstance(dim, str) or not isinstance(dim, Sequence):
dim = {dim: 1}
elif isinstance(dim, Sequence):
if len(dim) != len(set(dim)):
raise ValueError("dims should not contain duplicate values.")
dim = {d: 1 for d in dim}
dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims")
assert isinstance(dim, MutableMapping)
if axis is None:
axis = list(range(len(dim)))
elif not isinstance(axis, Sequence):
axis = [axis]
if len(dim) != len(axis):
raise ValueError("lengths of dim and axis should be identical.")
for d in dim:
if d in self.dims:
raise ValueError(f"Dimension {d} already exists.")
if d in self._variables and not utils.is_scalar(self._variables[d]):
raise ValueError(
"{dim} already exists as coordinate or"
" variable name.".format(dim=d)
)
variables: Dict[Hashable, Variable] = {}
coord_names = self._coord_names.copy()
# If dim is a dict, then ensure that the values are either integers
# or iterables.
for k, v in dim.items():
if hasattr(v, "__iter__"):
# If the value for the new dimension is an iterable, then
# save the coordinates to the variables dict, and set the
# value within the dim dict to the length of the iterable
# for later use.
variables[k] = xr.IndexVariable((k,), v)
coord_names.add(k)
dim[k] = variables[k].size
elif isinstance(v, int):
pass # Do nothing if the dimensions value is just an int
else:
raise TypeError(
"The value of new dimension {k} must be "
"an iterable or an int".format(k=k)
)
for k, v in self._variables.items():
if k not in dim:
if k in coord_names: # Do not change coordinates
variables[k] = v
else:
result_ndim = len(v.dims) + len(axis)
for a in axis:
if a < -result_ndim or result_ndim - 1 < a:
raise IndexError(
f"Axis {a} of variable {k} is out of bounds of the "
f"expanded dimension size {result_ndim}"
)
axis_pos = [a if a >= 0 else result_ndim + a for a in axis]
if len(axis_pos) != len(set(axis_pos)):
raise ValueError("axis should not contain duplicate values")
# We need to sort them to make sure `axis` equals to the
# axis positions of the result array.
zip_axis_dim = sorted(zip(axis_pos, dim.items()))
all_dims = list(zip(v.dims, v.shape))
for d, c in zip_axis_dim:
all_dims.insert(d, c)
variables[k] = v.set_dims(dict(all_dims))
else:
# If dims includes a label of a non-dimension coordinate,
# it will be promoted to a 1D coordinate with a single value.
variables[k] = v.set_dims(k).to_index_variable()
new_dims = self._dims.copy()
new_dims.update(dim)
return self._replace_vars_and_dims(
variables, dims=new_dims, coord_names=coord_names
)
def set_index(
self,
indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,
append: bool = False,
**indexes_kwargs: Union[Hashable, Sequence[Hashable]],
) -> "Dataset":
"""Set Dataset (multi-)indexes using one or more existing coordinates
or variables.
Parameters
----------
indexes : {dim: index, ...}
Mapping from names matching dimensions and values given
by (lists of) the names of existing coordinates or variables to set
as new (multi-)index.
append : bool, optional
If True, append the supplied index(es) to the existing index(es).
Otherwise replace the existing index(es) (default).
**indexes_kwargs : optional
The keyword arguments form of ``indexes``.
One of indexes or indexes_kwargs must be provided.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
Examples
--------
>>> arr = xr.DataArray(
... data=np.ones((2, 3)),
... dims=["x", "y"],
... coords={"x": range(2), "y": range(3), "a": ("x", [3, 4])},
... )
>>> ds = xr.Dataset({"v": arr})
>>> ds
<xarray.Dataset>
Dimensions: (x: 2, y: 3)
Coordinates:
* x (x) int64 0 1
* y (y) int64 0 1 2
a (x) int64 3 4
Data variables:
v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0
>>> ds.set_index(x="a")
<xarray.Dataset>
Dimensions: (x: 2, y: 3)
Coordinates:
* x (x) int64 3 4
* y (y) int64 0 1 2
Data variables:
v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0
See Also
--------
Dataset.reset_index
Dataset.swap_dims
"""
indexes = either_dict_or_kwargs(indexes, indexes_kwargs, "set_index")
variables, coord_names = merge_indexes(
indexes, self._variables, self._coord_names, append=append
)
return self._replace_vars_and_dims(variables, coord_names=coord_names)
def reset_index(
self,
dims_or_levels: Union[Hashable, Sequence[Hashable]],
drop: bool = False,
) -> "Dataset":
"""Reset the specified index(es) or multi-index level(s).
Parameters
----------
dims_or_levels : str or list
Name(s) of the dimension(s) and/or multi-index level(s) that will
be reset.
drop : bool, optional
If True, remove the specified indexes and/or multi-index levels
instead of extracting them as new coordinates (default: False).
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.set_index
"""
variables, coord_names = split_indexes(
dims_or_levels,
self._variables,
self._coord_names,
cast(Mapping[Hashable, Hashable], self._level_coords),
drop=drop,
)
return self._replace_vars_and_dims(variables, coord_names=coord_names)
def reorder_levels(
self,
dim_order: Mapping[Hashable, Sequence[int]] = None,
**dim_order_kwargs: Sequence[int],
) -> "Dataset":
"""Rearrange index levels using input order.
Parameters
----------
dim_order : optional
Mapping from names matching dimensions and values given
by lists representing new level orders. Every given dimension
must have a multi-index.
**dim_order_kwargs : optional
The keyword arguments form of ``dim_order``.
One of dim_order or dim_order_kwargs must be provided.
Returns
-------
obj : Dataset
Another dataset, with this dataset's data but replaced
coordinates.
"""
dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, "reorder_levels")
variables = self._variables.copy()
indexes = dict(self.indexes)
for dim, order in dim_order.items():
coord = self._variables[dim]
index = self.indexes[dim]
if not isinstance(index, pd.MultiIndex):
raise ValueError(f"coordinate {dim} has no MultiIndex")
new_index = index.reorder_levels(order)
variables[dim] = IndexVariable(coord.dims, new_index)
indexes[dim] = new_index
return self._replace(variables, indexes=indexes)
def _stack_once(self, dims, new_dim):
if ... in dims:
dims = list(infix_dims(dims, self.dims))
variables = {}
for name, var in self.variables.items():
if name not in dims:
if any(d in var.dims for d in dims):
add_dims = [d for d in dims if d not in var.dims]
vdims = list(var.dims) + add_dims
shape = [self.dims[d] for d in vdims]
exp_var = var.set_dims(vdims, shape)
stacked_var = exp_var.stack(**{new_dim: dims})
variables[name] = stacked_var
else:
variables[name] = var.copy(deep=False)
# consider dropping levels that are unused?
levels = [self.get_index(dim) for dim in dims]
idx = utils.multiindex_from_product_levels(levels, names=dims)
variables[new_dim] = IndexVariable(new_dim, idx)
coord_names = set(self._coord_names) - set(dims) | {new_dim}
indexes = {k: v for k, v in self.indexes.items() if k not in dims}
indexes[new_dim] = idx
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def stack(
self,
dimensions: Mapping[Hashable, Sequence[Hashable]] = None,
**dimensions_kwargs: Sequence[Hashable],
) -> "Dataset":
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the corresponding
coordinate variables will be combined into a MultiIndex.
Parameters
----------
dimensions : mapping of hashable to sequence of hashable
Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new
dimensions, and the existing dimensions that they replace. An
ellipsis (`...`) will be replaced by all unlisted dimensions.
Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over
all dimensions.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Dataset
Dataset with stacked data.
See Also
--------
Dataset.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def to_stacked_array(
self,
new_dim: Hashable,
sample_dims: Sequence[Hashable],
variable_dim: str = "variable",
name: Hashable = None,
) -> "DataArray":
"""Combine variables of differing dimensionality into a DataArray
without broadcasting.
This method is similar to Dataset.to_array but does not broadcast the
variables.
Parameters
----------
new_dim : hashable
Name of the new stacked coordinate
sample_dims : sequence of hashable
Dimensions that **will not** be stacked. Each array in the dataset
must share these dimensions. For machine learning applications,
these define the dimensions over which samples are drawn.
variable_dim : str, optional
Name of the level in the stacked coordinate which corresponds to
the variables.
name : str, optional
Name of the new data array.
Returns
-------
stacked : DataArray
DataArray with the specified dimensions and data variables
stacked together. The stacked coordinate is named ``new_dim``
and represented by a MultiIndex object with a level containing the
data variable names. The name of this level is controlled using
the ``variable_dim`` argument.
See Also
--------
Dataset.to_array
Dataset.stack
DataArray.to_unstacked_dataset
Examples
--------
>>> data = xr.Dataset(
... data_vars={
... "a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]),
... "b": ("x", [6, 7]),
... },
... coords={"y": ["u", "v", "w"]},
... )
>>> data
<xarray.Dataset>
Dimensions: (x: 2, y: 3)
Coordinates:
* y (y) <U1 'u' 'v' 'w'
Dimensions without coordinates: x
Data variables:
a (x, y) int64 0 1 2 3 4 5
b (x) int64 6 7
>>> data.to_stacked_array("z", sample_dims=["x"])
<xarray.DataArray 'a' (x: 2, z: 4)>
array([[0, 1, 2, 6],
[3, 4, 5, 7]])
Coordinates:
* z (z) MultiIndex
- variable (z) object 'a' 'a' 'a' 'b'
- y (z) object 'u' 'v' 'w' nan
Dimensions without coordinates: x
"""
stacking_dims = tuple(dim for dim in self.dims if dim not in sample_dims)
for variable in self:
dims = self[variable].dims
dims_include_sample_dims = set(sample_dims) <= set(dims)
if not dims_include_sample_dims:
raise ValueError(
"All variables in the dataset must contain the "
"dimensions {}.".format(dims)
)
def ensure_stackable(val):
assign_coords = {variable_dim: val.name}
for dim in stacking_dims:
if dim not in val.dims:
assign_coords[dim] = None
expand_dims = set(stacking_dims).difference(set(val.dims))
expand_dims.add(variable_dim)
# must be list for .expand_dims
expand_dims = list(expand_dims)
return (
val.assign_coords(**assign_coords)
.expand_dims(expand_dims)
.stack({new_dim: (variable_dim,) + stacking_dims})
)
# concatenate the arrays
stackable_vars = [ensure_stackable(self[key]) for key in self.data_vars]
data_array = xr.concat(stackable_vars, dim=new_dim)
# coerce the levels of the MultiIndex to have the same type as the
# input dimensions. This code is messy, so it might be better to just
# input a dummy value for the singleton dimension.
idx = data_array.indexes[new_dim]
levels = [idx.levels[0]] + [
level.astype(self[level.name].dtype) for level in idx.levels[1:]
]
new_idx = idx.set_levels(levels)
data_array[new_dim] = IndexVariable(new_dim, new_idx)
if name is not None:
data_array.name = name
return data_array
def _unstack_once(self, dim: Hashable, fill_value) -> "Dataset":
index = self.get_index(dim)
index = remove_unused_levels_categories(index)
variables: Dict[Hashable, Variable] = {}
indexes = {k: v for k, v in self.indexes.items() if k != dim}
for name, var in self.variables.items():
if name != dim:
if dim in var.dims:
if isinstance(fill_value, Mapping):
fill_value_ = fill_value[name]
else:
fill_value_ = fill_value
variables[name] = var._unstack_once(
index=index, dim=dim, fill_value=fill_value_
)
else:
variables[name] = var
for name, lev in zip(index.names, index.levels):
variables[name] = IndexVariable(name, lev)
indexes[name] = lev
coord_names = set(self._coord_names) - {dim} | set(index.names)
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def _unstack_full_reindex(
self, dim: Hashable, fill_value, sparse: bool
) -> "Dataset":
index = self.get_index(dim)
index = remove_unused_levels_categories(index)
full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)
# take a shortcut in case the MultiIndex was not modified.
if index.equals(full_idx):
obj = self
else:
obj = self._reindex(
{dim: full_idx}, copy=False, fill_value=fill_value, sparse=sparse
)
new_dim_names = index.names
new_dim_sizes = [lev.size for lev in index.levels]
variables: Dict[Hashable, Variable] = {}
indexes = {k: v for k, v in self.indexes.items() if k != dim}
for name, var in obj.variables.items():
if name != dim:
if dim in var.dims:
new_dims = dict(zip(new_dim_names, new_dim_sizes))
variables[name] = var.unstack({dim: new_dims})
else:
variables[name] = var
for name, lev in zip(new_dim_names, index.levels):
variables[name] = IndexVariable(name, lev)
indexes[name] = lev
coord_names = set(self._coord_names) - {dim} | set(new_dim_names)
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def unstack(
self,
dim: Union[Hashable, Iterable[Hashable]] = None,
fill_value: Any = dtypes.NA,
sparse: bool = False,
) -> "Dataset":
"""
Unstack existing dimensions corresponding to MultiIndexes into
multiple new dimensions.
New dimensions will be added at the end.
Parameters
----------
dim : hashable or iterable of hashable, optional
Dimension(s) over which to unstack. By default unstacks all
MultiIndexes.
fill_value : scalar or dict-like, default: nan
value to be filled. If a dict-like, maps variable names to
fill values. If not provided or if the dict-like does not
contain all variables, the dtype's NA value will be used.
sparse : bool, default: False
use sparse-array if True
Returns
-------
unstacked : Dataset
Dataset with unstacked data.
See Also
--------
Dataset.stack
"""
if dim is None:
dims = [
d for d in self.dims if isinstance(self.get_index(d), pd.MultiIndex)
]
else:
if isinstance(dim, str) or not isinstance(dim, Iterable):
dims = [dim]
else:
dims = list(dim)
missing_dims = [d for d in dims if d not in self.dims]
if missing_dims:
raise ValueError(
"Dataset does not contain the dimensions: %s" % missing_dims
)
non_multi_dims = [
d for d in dims if not isinstance(self.get_index(d), pd.MultiIndex)
]
if non_multi_dims:
raise ValueError(
"cannot unstack dimensions that do not "
"have a MultiIndex: %s" % non_multi_dims
)
result = self.copy(deep=False)
for dim in dims:
if (
# Dask arrays don't support assignment by index, which the fast unstack
# function requires.
# https://github.com/pydata/xarray/pull/4746#issuecomment-753282125
any(is_duck_dask_array(v.data) for v in self.variables.values())
# Sparse doesn't currently support (though we could special-case
# it)
# https://github.com/pydata/sparse/issues/422
or any(
isinstance(v.data, sparse_array_type)
for v in self.variables.values()
)
or sparse
# numpy full_like only added `shape` in 1.17
or LooseVersion(np.__version__) < LooseVersion("1.17")
# Until https://github.com/pydata/xarray/pull/4751 is resolved,
# we check explicitly whether it's a numpy array. Once that is
# resolved, explicitly exclude pint arrays.
# # pint doesn't implement `np.full_like` in a way that's
# # currently compatible.
# # https://github.com/pydata/xarray/pull/4746#issuecomment-753425173
# # or any(
# # isinstance(v.data, pint_array_type) for v in self.variables.values()
# # )
or any(
not isinstance(v.data, np.ndarray) for v in self.variables.values()
)
):
result = result._unstack_full_reindex(dim, fill_value, sparse)
else:
result = result._unstack_once(dim, fill_value)
return result
def update(self, other: "CoercibleMapping") -> "Dataset":
"""Update this dataset's variables with those from another dataset.
Just like :py:meth:`dict.update` this is a in-place operation.
Parameters
----------
other : Dataset or mapping
Variables with which to update this dataset. One of:
- Dataset
- mapping {var name: DataArray}
- mapping {var name: Variable}
- mapping {var name: (dimension name, array-like)}
- mapping {var name: (tuple of dimension names, array-like)}
Returns
-------
updated : Dataset
Updated dataset. Note that since the update is in-place this is the input
dataset.
It is deprecated since version 0.17 and scheduled to be removed in 0.19.
Raises
------
ValueError
If any dimensions would have inconsistent sizes in the updated
dataset.
See Also
--------
Dataset.assign
"""
merge_result = dataset_update_method(self, other)
return self._replace(inplace=True, **merge_result._asdict())
def merge(
self,
other: Union["CoercibleMapping", "DataArray"],
overwrite_vars: Union[Hashable, Iterable[Hashable]] = frozenset(),
compat: str = "no_conflicts",
join: str = "outer",
fill_value: Any = dtypes.NA,
combine_attrs: str = "override",
) -> "Dataset":
"""Merge the arrays of two datasets into a single dataset.
This method generally does not allow for overriding data, with the
exception of attributes, which are ignored on the second dataset.
Variables with the same name are checked for conflicts via the equals
or identical methods.
Parameters
----------
other : Dataset or mapping
Dataset or variables to merge with this dataset.
overwrite_vars : hashable or iterable of hashable, optional
If provided, update variables of these name(s) without checking for
conflicts in this dataset.
compat : {"broadcast_equals", "equals", "identical", \
"no_conflicts"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
- 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
join : {"outer", "inner", "left", "right", "exact"}, optional
Method for joining ``self`` and ``other`` along shared dimensions:
- 'outer': use the union of the indexes
- 'inner': use the intersection of the indexes
- 'left': use indexes from ``self``
- 'right': use indexes from ``other``
- 'exact': error instead of aligning non-equal indexes
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names (including coordinates) to fill values.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"}, default: "override"
String indicating how to combine attrs of the objects being merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
Returns
-------
merged : Dataset
Merged dataset.
Raises
------
MergeError
If any variables conflict (see ``compat``).
"""
other = other.to_dataset() if isinstance(other, xr.DataArray) else other
merge_result = dataset_merge_method(
self,
other,
overwrite_vars=overwrite_vars,
compat=compat,
join=join,
fill_value=fill_value,
combine_attrs=combine_attrs,
)
return self._replace(**merge_result._asdict())
def _assert_all_in_dataset(
self, names: Iterable[Hashable], virtual_okay: bool = False
) -> None:
bad_names = set(names) - set(self._variables)
if virtual_okay:
bad_names -= self.virtual_variables
if bad_names:
raise ValueError(
"One or more of the specified variables "
"cannot be found in this dataset"
)
def drop_vars(
self, names: Union[Hashable, Iterable[Hashable]], *, errors: str = "raise"
) -> "Dataset":
"""Drop variables from this dataset.
Parameters
----------
names : hashable or iterable of hashable
Name(s) of variables to drop.
errors : {"raise", "ignore"}, optional
If 'raise' (default), raises a ValueError error if any of the variable
passed are not in the dataset. If 'ignore', any given names that are in the
dataset are dropped and no error is raised.
Returns
-------
dropped : Dataset
"""
# the Iterable check is required for mypy
if is_scalar(names) or not isinstance(names, Iterable):
names = {names}
else:
names = set(names)
if errors == "raise":
self._assert_all_in_dataset(names)
variables = {k: v for k, v in self._variables.items() if k not in names}
coord_names = {k for k in self._coord_names if k in variables}
indexes = {k: v for k, v in self.indexes.items() if k not in names}
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
def drop(self, labels=None, dim=None, *, errors="raise", **labels_kwargs):
"""Backward compatible method based on `drop_vars` and `drop_sel`
Using either `drop_vars` or `drop_sel` is encouraged
See Also
--------
Dataset.drop_vars
Dataset.drop_sel
"""
if errors not in ["raise", "ignore"]:
raise ValueError('errors must be either "raise" or "ignore"')
if is_dict_like(labels) and not isinstance(labels, dict):
warnings.warn(
"dropping coordinates using `drop` is be deprecated; use drop_vars.",
FutureWarning,
stacklevel=2,
)
return self.drop_vars(labels, errors=errors)
if labels_kwargs or isinstance(labels, dict):
if dim is not None:
raise ValueError("cannot specify dim and dict-like arguments.")
labels = either_dict_or_kwargs(labels, labels_kwargs, "drop")
if dim is None and (is_scalar(labels) or isinstance(labels, Iterable)):
warnings.warn(
"dropping variables using `drop` will be deprecated; using drop_vars is encouraged.",
PendingDeprecationWarning,
stacklevel=2,
)
return self.drop_vars(labels, errors=errors)
if dim is not None:
warnings.warn(
"dropping labels using list-like labels is deprecated; using "
"dict-like arguments with `drop_sel`, e.g. `ds.drop_sel(dim=[labels]).",
DeprecationWarning,
stacklevel=2,
)
return self.drop_sel({dim: labels}, errors=errors, **labels_kwargs)
warnings.warn(
"dropping labels using `drop` will be deprecated; using drop_sel is encouraged.",
PendingDeprecationWarning,
stacklevel=2,
)
return self.drop_sel(labels, errors=errors)
def drop_sel(self, labels=None, *, errors="raise", **labels_kwargs):
"""Drop index labels from this dataset.
Parameters
----------
labels : mapping of hashable to Any
Index labels to drop
errors : {"raise", "ignore"}, optional
If 'raise' (default), raises a ValueError error if
any of the index labels passed are not
in the dataset. If 'ignore', any given labels that are in the
dataset are dropped and no error is raised.
**labels_kwargs : {dim: label, ...}, optional
The keyword arguments form of ``dim`` and ``labels``
Returns
-------
dropped : Dataset
Examples
--------
>>> data = np.arange(6).reshape(2, 3)
>>> labels = ["a", "b", "c"]
>>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels})
>>> ds
<xarray.Dataset>
Dimensions: (x: 2, y: 3)
Coordinates:
* y (y) <U1 'a' 'b' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 0 1 2 3 4 5
>>> ds.drop_sel(y=["a", "c"])
<xarray.Dataset>
Dimensions: (x: 2, y: 1)
Coordinates:
* y (y) <U1 'b'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 1 4
>>> ds.drop_sel(y="b")
<xarray.Dataset>
Dimensions: (x: 2, y: 2)
Coordinates:
* y (y) <U1 'a' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 0 2 3 5
"""
if errors not in ["raise", "ignore"]:
raise ValueError('errors must be either "raise" or "ignore"')
labels = either_dict_or_kwargs(labels, labels_kwargs, "drop_sel")
ds = self
for dim, labels_for_dim in labels.items():
# Don't cast to set, as it would harm performance when labels
# is a large numpy array
if utils.is_scalar(labels_for_dim):
labels_for_dim = [labels_for_dim]
labels_for_dim = np.asarray(labels_for_dim)
try:
index = self.get_index(dim)
except KeyError:
raise ValueError("dimension %r does not have coordinate labels" % dim)
new_index = index.drop(labels_for_dim, errors=errors)
ds = ds.loc[{dim: new_index}]
return ds
def drop_isel(self, indexers=None, **indexers_kwargs):
"""Drop index positions from this Dataset.
Parameters
----------
indexers : mapping of hashable to Any
Index locations to drop
**indexers_kwargs : {dim: position, ...}, optional
The keyword arguments form of ``dim`` and ``positions``
Returns
-------
dropped : Dataset
Raises
------
IndexError
Examples
--------
>>> data = np.arange(6).reshape(2, 3)
>>> labels = ["a", "b", "c"]
>>> ds = xr.Dataset({"A": (["x", "y"], data), "y": labels})
>>> ds
<xarray.Dataset>
Dimensions: (x: 2, y: 3)
Coordinates:
* y (y) <U1 'a' 'b' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 0 1 2 3 4 5
>>> ds.drop_isel(y=[0, 2])
<xarray.Dataset>
Dimensions: (x: 2, y: 1)
Coordinates:
* y (y) <U1 'b'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 1 4
>>> ds.drop_isel(y=1)
<xarray.Dataset>
Dimensions: (x: 2, y: 2)
Coordinates:
* y (y) <U1 'a' 'c'
Dimensions without coordinates: x
Data variables:
A (x, y) int64 0 2 3 5
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "drop_isel")
ds = self
dimension_index = {}
for dim, pos_for_dim in indexers.items():
# Don't cast to set, as it would harm performance when labels
# is a large numpy array
if utils.is_scalar(pos_for_dim):
pos_for_dim = [pos_for_dim]
pos_for_dim = np.asarray(pos_for_dim)
index = self.get_index(dim)
new_index = index.delete(pos_for_dim)
dimension_index[dim] = new_index
ds = ds.loc[dimension_index]
return ds
def drop_dims(
self, drop_dims: Union[Hashable, Iterable[Hashable]], *, errors: str = "raise"
) -> "Dataset":
"""Drop dimensions and associated variables from this dataset.
Parameters
----------
drop_dims : hashable or iterable of hashable
Dimension or dimensions to drop.
errors : {"raise", "ignore"}, optional
If 'raise' (default), raises a ValueError error if any of the
dimensions passed are not in the dataset. If 'ignore', any given
labels that are in the dataset are dropped and no error is raised.
Returns
-------
obj : Dataset
The dataset without the given dimensions (or any variables
containing those dimensions)
errors : {"raise", "ignore"}, optional
If 'raise' (default), raises a ValueError error if
any of the dimensions passed are not
in the dataset. If 'ignore', any given dimensions that are in the
dataset are dropped and no error is raised.
"""
if errors not in ["raise", "ignore"]:
raise ValueError('errors must be either "raise" or "ignore"')
if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable):
drop_dims = {drop_dims}
else:
drop_dims = set(drop_dims)
if errors == "raise":
missing_dims = drop_dims - set(self.dims)
if missing_dims:
raise ValueError(
"Dataset does not contain the dimensions: %s" % missing_dims
)
drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims}
return self.drop_vars(drop_vars)
def transpose(self, *dims: Hashable) -> "Dataset":
"""Return a new Dataset object with all array dimensions transposed.
Although the order of dimensions on each array will change, the dataset
dimensions themselves will remain in fixed (sorted) order.
Parameters
----------
*dims : hashable, optional
By default, reverse the dimensions on each array. Otherwise,
reorder the dimensions to this order.
Returns
-------
transposed : Dataset
Each array in the dataset (including) coordinates will be
transposed to the given order.
Notes
-----
This operation returns a view of each array's data. It is
lazy for dask-backed DataArrays but not for numpy-backed DataArrays
-- the data will be fully loaded into memory.
See Also
--------
numpy.transpose
DataArray.transpose
"""
if dims:
if set(dims) ^ set(self.dims) and ... not in dims:
raise ValueError(
"arguments to transpose (%s) must be "
"permuted dataset dimensions (%s)" % (dims, tuple(self.dims))
)
ds = self.copy()
for name, var in self._variables.items():
var_dims = tuple(dim for dim in dims if dim in (var.dims + (...,)))
ds._variables[name] = var.transpose(*var_dims)
return ds
def dropna(
self,
dim: Hashable,
how: str = "any",
thresh: int = None,
subset: Iterable[Hashable] = None,
):
"""Returns a new dataset with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : hashable
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {"any", "all"}, default: "any"
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default: None
If supplied, require this many non-NA values.
subset : iterable of hashable, optional
Which variables to check for missing values. By default, all
variables in the dataset are checked.
Returns
-------
Dataset
"""
# TODO: consider supporting multiple dimensions? Or not, given that
# there are some ugly edge cases, e.g., pandas's dropna differs
# depending on the order of the supplied axes.
if dim not in self.dims:
raise ValueError("%s must be a single dataset dimension" % dim)
if subset is None:
subset = iter(self.data_vars)
count = np.zeros(self.dims[dim], dtype=np.int64)
size = np.int_(0) # for type checking
for k in subset:
array = self._variables[k]
if dim in array.dims:
dims = [d for d in array.dims if d != dim]
count += np.asarray(array.count(dims)) # type: ignore[attr-defined]
size += np.prod([self.dims[d] for d in dims])
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == size
elif how == "all":
mask = count > 0
elif how is not None:
raise ValueError("invalid how option: %s" % how)
else:
raise TypeError("must specify how or thresh")
return self.isel({dim: mask})
def fillna(self, value: Any) -> "Dataset":
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray, DataArray, dict or Dataset
Used to fill all matching missing values in this dataset's data
variables. Scalars, ndarrays or DataArrays arguments are used to
fill all data with aligned coordinates (for DataArrays).
Dictionaries or datasets match data variables and then align
coordinates if necessary.
Returns
-------
Dataset
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> ds = xr.Dataset(
... {
... "A": ("x", [np.nan, 2, np.nan, 0]),
... "B": ("x", [3, 4, np.nan, 1]),
... "C": ("x", [np.nan, np.nan, np.nan, 5]),
... "D": ("x", [np.nan, 3, np.nan, 4]),
... },
... coords={"x": [0, 1, 2, 3]},
... )
>>> ds
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) int64 0 1 2 3
Data variables:
A (x) float64 nan 2.0 nan 0.0
B (x) float64 3.0 4.0 nan 1.0
C (x) float64 nan nan nan 5.0
D (x) float64 nan 3.0 nan 4.0
Replace all `NaN` values with 0s.
>>> ds.fillna(0)
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) int64 0 1 2 3
Data variables:
A (x) float64 0.0 2.0 0.0 0.0
B (x) float64 3.0 4.0 0.0 1.0
C (x) float64 0.0 0.0 0.0 5.0
D (x) float64 0.0 3.0 0.0 4.0
Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.
>>> values = {"A": 0, "B": 1, "C": 2, "D": 3}
>>> ds.fillna(value=values)
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) int64 0 1 2 3
Data variables:
A (x) float64 0.0 2.0 0.0 0.0
B (x) float64 3.0 4.0 1.0 1.0
C (x) float64 2.0 2.0 2.0 5.0
D (x) float64 3.0 3.0 3.0 4.0
"""
if utils.is_dict_like(value):
value_keys = getattr(value, "data_vars", value).keys()
if not set(value_keys) <= set(self.data_vars.keys()):
raise ValueError(
"all variables in the argument to `fillna` "
"must be contained in the original dataset"
)
out = ops.fillna(self, value)
return out
def interpolate_na(
self,
dim: Hashable = None,
method: str = "linear",
limit: int = None,
use_coordinate: Union[bool, Hashable] = True,
max_gap: Union[
int, float, str, pd.Timedelta, np.timedelta64, datetime.timedelta
] = None,
**kwargs: Any,
) -> "Dataset":
"""Fill in NaNs by interpolating according to different methods.
Parameters
----------
dim : str
Specifies the dimension along which to interpolate.
method : str, optional
String indicating which method to use for interpolation:
- 'linear': linear interpolation (Default). Additional keyword
arguments are passed to :py:func:`numpy.interp`
- 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial':
are passed to :py:func:`scipy.interpolate.interp1d`. If
``method='polynomial'``, the ``order`` keyword argument must also be
provided.
- 'barycentric', 'krog', 'pchip', 'spline', 'akima': use their
respective :py:class:`scipy.interpolate` classes.
use_coordinate : bool, str, default: True
Specifies which index to use as the x values in the interpolation
formulated as `y = f(x)`. If False, values are treated as if
eqaully-spaced along ``dim``. If True, the IndexVariable `dim` is
used. If ``use_coordinate`` is a string, it specifies the name of a
coordinate variariable to use as the index.
limit : int, default: None
Maximum number of consecutive NaNs to fill. Must be greater than 0
or None for no limit. This filling is done regardless of the size of
the gap in the data. To only interpolate over gaps less than a given length,
see ``max_gap``.
max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None
Maximum size of gap, a continuous sequence of NaNs, that will be filled.
Use None for no limit. When interpolating along a datetime64 dimension
and ``use_coordinate=True``, ``max_gap`` can be one of the following:
- a string that is valid input for pandas.to_timedelta
- a :py:class:`numpy.timedelta64` object
- a :py:class:`pandas.Timedelta` object
- a :py:class:`datetime.timedelta` object
Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled
dimensions has not been implemented yet. Gap length is defined as the difference
between coordinate values at the first data point after a gap and the last value
before a gap. For gaps at the beginning (end), gap length is defined as the difference
between coordinate values at the first (last) valid data point and the first (last) NaN.
For example, consider::
<xarray.DataArray (x: 9)>
array([nan, nan, nan, 1., nan, nan, 4., nan, nan])
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 7 8
The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively
kwargs : dict, optional
parameters passed verbatim to the underlying interpolation function
Returns
-------
interpolated: Dataset
Filled in Dataset.
See Also
--------
numpy.interp
scipy.interpolate
Examples
--------
>>> ds = xr.Dataset(
... {
... "A": ("x", [np.nan, 2, 3, np.nan, 0]),
... "B": ("x", [3, 4, np.nan, 1, 7]),
... "C": ("x", [np.nan, np.nan, np.nan, 5, 0]),
... "D": ("x", [np.nan, 3, np.nan, -1, 4]),
... },
... coords={"x": [0, 1, 2, 3, 4]},
... )
>>> ds
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 0 1 2 3 4
Data variables:
A (x) float64 nan 2.0 3.0 nan 0.0
B (x) float64 3.0 4.0 nan 1.0 7.0
C (x) float64 nan nan nan 5.0 0.0
D (x) float64 nan 3.0 nan -1.0 4.0
>>> ds.interpolate_na(dim="x", method="linear")
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 0 1 2 3 4
Data variables:
A (x) float64 nan 2.0 3.0 1.5 0.0
B (x) float64 3.0 4.0 2.5 1.0 7.0
C (x) float64 nan nan nan 5.0 0.0
D (x) float64 nan 3.0 1.0 -1.0 4.0
>>> ds.interpolate_na(dim="x", method="linear", fill_value="extrapolate")
<xarray.Dataset>
Dimensions: (x: 5)
Coordinates:
* x (x) int64 0 1 2 3 4
Data variables:
A (x) float64 1.0 2.0 3.0 1.5 0.0
B (x) float64 3.0 4.0 2.5 1.0 7.0
C (x) float64 20.0 15.0 10.0 5.0 0.0
D (x) float64 5.0 3.0 1.0 -1.0 4.0
"""
from .missing import _apply_over_vars_with_dim, interp_na
new = _apply_over_vars_with_dim(
interp_na,
self,
dim=dim,
method=method,
limit=limit,
use_coordinate=use_coordinate,
max_gap=max_gap,
**kwargs,
)
return new
def ffill(self, dim: Hashable, limit: int = None) -> "Dataset":
"""Fill NaN values by propogating values forward
*Requires bottleneck.*
Parameters
----------
dim : Hashable
Specifies the dimension along which to propagate values when
filling.
limit : int, default: None
The maximum number of consecutive NaN values to forward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
"""
from .missing import _apply_over_vars_with_dim, ffill
new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)
return new
def bfill(self, dim: Hashable, limit: int = None) -> "Dataset":
"""Fill NaN values by propogating values backward
*Requires bottleneck.*
Parameters
----------
dim : str
Specifies the dimension along which to propagate values when
filling.
limit : int, default: None
The maximum number of consecutive NaN values to backward fill. In
other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. Must be greater
than 0 or None for no limit.
Returns
-------
Dataset
"""
from .missing import _apply_over_vars_with_dim, bfill
new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)
return new
def combine_first(self, other: "Dataset") -> "Dataset":
"""Combine two Datasets, default to data_vars of self.
The new coordinates follow the normal broadcasting and alignment rules
of ``join='outer'``. Vacant cells in the expanded coordinates are
filled with np.nan.
Parameters
----------
other : Dataset
Used to fill all matching missing values in this array.
Returns
-------
Dataset
"""
out = ops.fillna(self, other, join="outer", dataset_join="outer")
return out
def reduce(
self,
func: Callable,
dim: Union[Hashable, Iterable[Hashable]] = None,
keep_attrs: bool = None,
keepdims: bool = False,
numeric_only: bool = False,
**kwargs: Any,
) -> "Dataset":
"""Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one. Coordinates that use these dimensions
are removed.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : Any
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
"""
if "axis" in kwargs:
raise ValueError(
"passing 'axis' to Dataset reduce methods is ambiguous."
" Please use 'dim' instead."
)
if dim is None or dim is ...:
dims = set(self.dims)
elif isinstance(dim, str) or not isinstance(dim, Iterable):
dims = {dim}
else:
dims = set(dim)
missing_dimensions = [d for d in dims if d not in self.dims]
if missing_dimensions:
raise ValueError(
"Dataset does not contain the dimensions: %s" % missing_dimensions
)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
variables: Dict[Hashable, Variable] = {}
for name, var in self._variables.items():
reduce_dims = [d for d in var.dims if d in dims]
if name in self.coords:
if not reduce_dims:
variables[name] = var
else:
if (
not numeric_only
or np.issubdtype(var.dtype, np.number)
or (var.dtype == np.bool_)
):
if len(reduce_dims) == 1:
# unpack dimensions for the benefit of functions
# like np.argmin which can't handle tuple arguments
(reduce_dims,) = reduce_dims
elif len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None # type: ignore[assignment]
variables[name] = var.reduce(
func,
dim=reduce_dims,
keep_attrs=keep_attrs,
keepdims=keepdims,
**kwargs,
)
coord_names = {k for k in self.coords if k in variables}
indexes = {k: v for k, v in self.indexes.items() if k in variables}
attrs = self.attrs if keep_attrs else None
return self._replace_with_new_dims(
variables, coord_names=coord_names, attrs=attrs, indexes=indexes
)
def map(
self,
func: Callable,
keep_attrs: bool = None,
args: Iterable[Any] = (),
**kwargs: Any,
) -> "Dataset":
"""Apply a function to each variable in this dataset
Parameters
----------
func : callable
Function which can be called in the form `func(x, *args, **kwargs)`
to transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : Any
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` to each data variable.
Examples
--------
>>> da = xr.DataArray(np.random.randn(2, 3))
>>> ds = xr.Dataset({"foo": da, "bar": ("x", [-1, 2])})
>>> ds
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 -0.9773
bar (x) int64 -1 2
>>> ds.map(np.fabs)
<xarray.Dataset>
Dimensions: (dim_0: 2, dim_1: 3, x: 2)
Dimensions without coordinates: dim_0, dim_1, x
Data variables:
foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 0.9773
bar (x) float64 1.0 2.0
"""
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
variables = {
k: maybe_wrap_array(v, func(v, *args, **kwargs))
for k, v in self.data_vars.items()
}
if keep_attrs:
for k, v in variables.items():
v._copy_attrs_from(self.data_vars[k])
attrs = self.attrs if keep_attrs else None
return type(self)(variables, attrs=attrs)
def apply(
self,
func: Callable,
keep_attrs: bool = None,
args: Iterable[Any] = (),
**kwargs: Any,
) -> "Dataset":
"""
Backward compatible implementation of ``map``
See Also
--------
Dataset.map
"""
warnings.warn(
"Dataset.apply may be deprecated in the future. Using Dataset.map is encouraged",
PendingDeprecationWarning,
stacklevel=2,
)
return self.map(func, keep_attrs, args, **kwargs)
def assign(
self, variables: Mapping[Hashable, Any] = None, **variables_kwargs: Hashable
) -> "Dataset":
"""Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
variables : mapping of hashable to Any
Mapping from variables names to the new values. If the new values
are callable, they are computed on the Dataset and assigned to new
data variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
**variables_kwargs
The keyword arguments form of ``variables``.
One of variables or variables_kwargs must be provided.
Returns
-------
ds : Dataset
A new Dataset with the new variables in addition to all the
existing variables.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign`` is
possible, but you cannot reference other variables created within the
same ``assign`` call.
See Also
--------
pandas.DataFrame.assign
Examples
--------
>>> x = xr.Dataset(
... {
... "temperature_c": (
... ("lat", "lon"),
... 20 * np.random.rand(4).reshape(2, 2),
... ),
... "precipitation": (("lat", "lon"), np.random.rand(4).reshape(2, 2)),
... },
... coords={"lat": [10, 20], "lon": [150, 160]},
... )
>>> x
<xarray.Dataset>
Dimensions: (lat: 2, lon: 2)
Coordinates:
* lat (lat) int64 10 20
* lon (lon) int64 150 160
Data variables:
temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9
precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918
Where the value is a callable, evaluated on dataset:
>>> x.assign(temperature_f=lambda x: x.temperature_c * 9 / 5 + 32)
<xarray.Dataset>
Dimensions: (lat: 2, lon: 2)
Coordinates:
* lat (lat) int64 10 20
* lon (lon) int64 150 160
Data variables:
temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9
precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918
temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62
Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:
>>> x.assign(temperature_f=x["temperature_c"] * 9 / 5 + 32)
<xarray.Dataset>
Dimensions: (lat: 2, lon: 2)
Coordinates:
* lat (lat) int64 10 20
* lon (lon) int64 150 160
Data variables:
temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9
precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918
temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62
"""
variables = either_dict_or_kwargs(variables, variables_kwargs, "assign")
data = self.copy()
# do all calculations first...
results = data._calc_assign_results(variables)
# ... and then assign
data.update(results)
return data
def to_array(self, dim="variable", name=None):
"""Convert this dataset into an xarray.DataArray
The data variables of this dataset will be broadcast against each other
and stacked along the first axis of the new array. All coordinates of
this dataset will remain coordinates.
Parameters
----------
dim : str, optional
Name of the new dimension.
name : str, optional
Name of the new data array.
Returns
-------
array : xarray.DataArray
"""
from .dataarray import DataArray
data_vars = [self.variables[k] for k in self.data_vars]
broadcast_vars = broadcast_variables(*data_vars)
data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)
coords = dict(self.coords)
coords[dim] = list(self.data_vars)
indexes = propagate_indexes(self._indexes)
dims = (dim,) + broadcast_vars[0].dims
return DataArray(
data, coords, dims, attrs=self.attrs, name=name, indexes=indexes
)
def _normalize_dim_order(
self, dim_order: List[Hashable] = None
) -> Dict[Hashable, int]:
"""
Check the validity of the provided dimensions if any and return the mapping
between dimension name and their size.
Parameters
----------
dim_order
Dimension order to validate (default to the alphabetical order if None).
Returns
-------
result
Validated dimensions mapping.
"""
if dim_order is None:
dim_order = list(self.dims)
elif set(dim_order) != set(self.dims):
raise ValueError(
"dim_order {} does not match the set of dimensions of this "
"Dataset: {}".format(dim_order, list(self.dims))
)
ordered_dims = {k: self.dims[k] for k in dim_order}
return ordered_dims
def _to_dataframe(self, ordered_dims: Mapping[Hashable, int]):
columns = [k for k in self.variables if k not in self.dims]
data = [
self._variables[k].set_dims(ordered_dims).values.reshape(-1)
for k in columns
]
index = self.coords.to_index([*ordered_dims])
return pd.DataFrame(dict(zip(columns, data)), index=index)
def to_dataframe(self, dim_order: List[Hashable] = None) -> pd.DataFrame:
"""Convert this dataset into a pandas.DataFrame.
Non-index variables in this dataset form the columns of the
DataFrame. The DataFrame is indexed by the Cartesian product of
this dataset's indices.
Parameters
----------
dim_order
Hierarchical dimension order for the resulting dataframe. All
arrays are transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major
influence on which operations are efficient on the resulting
dataframe.
If provided, must include all dimensions of this dataset. By
default, dimensions are sorted alphabetically.
Returns
-------
result
Dataset as a pandas DataFrame.
"""
ordered_dims = self._normalize_dim_order(dim_order=dim_order)
return self._to_dataframe(ordered_dims=ordered_dims)
def _set_sparse_data_from_dataframe(
self, idx: pd.Index, arrays: List[Tuple[Hashable, np.ndarray]], dims: tuple
) -> None:
from sparse import COO
if isinstance(idx, pd.MultiIndex):
coords = np.stack([np.asarray(code) for code in idx.codes], axis=0)
is_sorted = idx.is_lexsorted()
shape = tuple(lev.size for lev in idx.levels)
else:
coords = np.arange(idx.size).reshape(1, -1)
is_sorted = True
shape = (idx.size,)
for name, values in arrays:
# In virtually all real use cases, the sparse array will now have
# missing values and needs a fill_value. For consistency, don't
# special case the rare exceptions (e.g., dtype=int without a
# MultiIndex).
dtype, fill_value = dtypes.maybe_promote(values.dtype)
values = np.asarray(values, dtype=dtype)
data = COO(
coords,
values,
shape,
has_duplicates=False,
sorted=is_sorted,
fill_value=fill_value,
)
self[name] = (dims, data)
def _set_numpy_data_from_dataframe(
self, idx: pd.Index, arrays: List[Tuple[Hashable, np.ndarray]], dims: tuple
) -> None:
if not isinstance(idx, pd.MultiIndex):
for name, values in arrays:
self[name] = (dims, values)
return
# NB: similar, more general logic, now exists in
# variable.unstack_once; we could consider combining them at some
# point.
shape = tuple(lev.size for lev in idx.levels)
indexer = tuple(idx.codes)
# We already verified that the MultiIndex has all unique values, so
# there are missing values if and only if the size of output arrays is
# larger that the index.
missing_values = np.prod(shape) > idx.shape[0]
for name, values in arrays:
# NumPy indexing is much faster than using DataFrame.reindex() to
# fill in missing values:
# https://stackoverflow.com/a/35049899/809705
if missing_values:
dtype, fill_value = dtypes.maybe_promote(values.dtype)
data = np.full(shape, fill_value, dtype)
else:
# If there are no missing values, keep the existing dtype
# instead of promoting to support NA, e.g., keep integer
# columns as integers.
# TODO: consider removing this special case, which doesn't
# exist for sparse=True.
data = np.zeros(shape, values.dtype)
data[indexer] = values
self[name] = (dims, data)
@classmethod
def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> "Dataset":
"""Convert a pandas.DataFrame into an xarray.Dataset
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
into a tensor product of one-dimensional indices (filling in missing
values with NaN). This method will produce a Dataset very similar to
that on which the 'to_dataframe' method was called, except with
possibly redundant dimensions (since all dataset variables will have
the same dimensionality)
Parameters
----------
dataframe : DataFrame
DataFrame from which to copy data and indices.
sparse : bool, default: False
If true, create a sparse arrays instead of dense numpy arrays. This
can potentially save a large amount of memory if the DataFrame has
a MultiIndex. Requires the sparse package (sparse.pydata.org).
Returns
-------
New Dataset.
See Also
--------
xarray.DataArray.from_series
pandas.DataFrame.to_xarray
"""
# TODO: Add an option to remove dimensions along which the variables
# are constant, to enable consistent serialization to/from a dataframe,
# even if some variables have different dimensionality.
if not dataframe.columns.is_unique:
raise ValueError("cannot convert DataFrame with non-unique columns")
idx = remove_unused_levels_categories(dataframe.index)
if isinstance(idx, pd.MultiIndex) and not idx.is_unique:
raise ValueError(
"cannot convert a DataFrame with a non-unique MultiIndex into xarray"
)
# Cast to a NumPy array first, in case the Series is a pandas Extension
# array (which doesn't have a valid NumPy dtype)
# TODO: allow users to control how this casting happens, e.g., by
# forwarding arguments to pandas.Series.to_numpy?
arrays = [(k, np.asarray(v)) for k, v in dataframe.items()]
obj = cls()
if isinstance(idx, pd.MultiIndex):
dims = tuple(
name if name is not None else "level_%i" % n
for n, name in enumerate(idx.names)
)
for dim, lev in zip(dims, idx.levels):
obj[dim] = (dim, lev)
else:
index_name = idx.name if idx.name is not None else "index"
dims = (index_name,)
obj[index_name] = (dims, idx)
if sparse:
obj._set_sparse_data_from_dataframe(idx, arrays, dims)
else:
obj._set_numpy_data_from_dataframe(idx, arrays, dims)
return obj
def to_dask_dataframe(self, dim_order=None, set_index=False):
"""
Convert this dataset into a dask.dataframe.DataFrame.
The dimensions, coordinates and data variables in this dataset form
the columns of the DataFrame.
Parameters
----------
dim_order : list, optional
Hierarchical dimension order for the resulting dataframe. All
arrays are transposed to this order and then written out as flat
vectors in contiguous order, so the last dimension in this list
will be contiguous in the resulting DataFrame. This has a major
influence on which operations are efficient on the resulting dask
dataframe.
If provided, must include all dimensions of this dataset. By
default, dimensions are sorted alphabetically.
set_index : bool, optional
If set_index=True, the dask DataFrame is indexed by this dataset's
coordinate. Since dask DataFrames do not support multi-indexes,
set_index only works if the dataset only contains one dimension.
Returns
-------
dask.dataframe.DataFrame
"""
import dask.array as da
import dask.dataframe as dd
ordered_dims = self._normalize_dim_order(dim_order=dim_order)
columns = list(ordered_dims)
columns.extend(k for k in self.coords if k not in self.dims)
columns.extend(self.data_vars)
series_list = []
for name in columns:
try:
var = self.variables[name]
except KeyError:
# dimension without a matching coordinate
size = self.dims[name]
data = da.arange(size, chunks=size, dtype=np.int64)
var = Variable((name,), data)
# IndexVariable objects have a dummy .chunk() method
if isinstance(var, IndexVariable):
var = var.to_base_variable()
dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data
series = dd.from_array(dask_array.reshape(-1), columns=[name])
series_list.append(series)
df = dd.concat(series_list, axis=1)
if set_index:
dim_order = [*ordered_dims]
if len(dim_order) == 1:
(dim,) = dim_order
df = df.set_index(dim)
else:
# triggers an error about multi-indexes, even if only one
# dimension is passed
df = df.set_index(dim_order)
return df
def to_dict(self, data=True):
"""
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for converting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
Parameters
----------
data : bool, optional
Whether to include the actual data in the dictionary. When set to
False, returns just the schema.
See Also
--------
Dataset.from_dict
"""
d = {
"coords": {},
"attrs": decode_numpy_dict_values(self.attrs),
"dims": dict(self.dims),
"data_vars": {},
}
for k in self.coords:
d["coords"].update({k: self[k].variable.to_dict(data=data)})
for k in self.data_vars:
d["data_vars"].update({k: self[k].variable.to_dict(data=data)})
return d
@classmethod
def from_dict(cls, d):
"""
Convert a dictionary into an xarray.Dataset.
Input dict can take several forms:
.. code:: python
d = {
"t": {"dims": ("t"), "data": t},
"a": {"dims": ("t"), "data": x},
"b": {"dims": ("t"), "data": y},
}
d = {
"coords": {"t": {"dims": "t", "data": t, "attrs": {"units": "s"}}},
"attrs": {"title": "air temperature"},
"dims": "t",
"data_vars": {
"a": {"dims": "t", "data": x},
"b": {"dims": "t", "data": y},
},
}
where "t" is the name of the dimesion, "a" and "b" are names of data
variables and t, x, and y are lists, numpy.arrays or pandas objects.
Parameters
----------
d : dict-like
Mapping with a minimum structure of
``{"var_0": {"dims": [..], "data": [..]}, \
...}``
Returns
-------
obj : xarray.Dataset
See also
--------
Dataset.to_dict
DataArray.from_dict
"""
if not {"coords", "data_vars"}.issubset(set(d)):
variables = d.items()
else:
import itertools
variables = itertools.chain(
d.get("coords", {}).items(), d.get("data_vars", {}).items()
)
try:
variable_dict = {
k: (v["dims"], v["data"], v.get("attrs")) for k, v in variables
}
except KeyError as e:
raise ValueError(
"cannot convert dict without the key "
"'{dims_data}'".format(dims_data=str(e.args[0]))
)
obj = cls(variable_dict)
# what if coords aren't dims?
coords = set(d.get("coords", {})) - set(d.get("dims", {}))
obj = obj.set_coords(coords)
obj.attrs.update(d.get("attrs", {}))
return obj
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
variables = {}
keep_attrs = kwargs.pop("keep_attrs", None)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
for k, v in self._variables.items():
if k in self._coord_names:
variables[k] = v
else:
variables[k] = f(v, *args, **kwargs)
if keep_attrs:
variables[k].attrs = v._attrs
attrs = self._attrs if keep_attrs else None
return self._replace_with_new_dims(variables, attrs=attrs)
return func
@staticmethod
def _binary_op(f, reflexive=False, join=None):
@functools.wraps(f)
def func(self, other):
from .dataarray import DataArray
if isinstance(other, groupby.GroupBy):
return NotImplemented
align_type = OPTIONS["arithmetic_join"] if join is None else join
if isinstance(other, (DataArray, Dataset)):
self, other = align(self, other, join=align_type, copy=False)
g = f if not reflexive else lambda x, y: f(y, x)
ds = self._calculate_binary_op(g, other, join=align_type)
return ds
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
from .dataarray import DataArray
if isinstance(other, groupby.GroupBy):
raise TypeError(
"in-place operations between a Dataset and "
"a grouped object are not permitted"
)
# we don't actually modify arrays in-place with in-place Dataset
# arithmetic -- this lets us automatically align things
if isinstance(other, (DataArray, Dataset)):
other = other.reindex_like(self, copy=False)
g = ops.inplace_to_noninplace_op(f)
ds = self._calculate_binary_op(g, other, inplace=True)
self._replace_with_new_dims(
ds._variables,
ds._coord_names,
attrs=ds._attrs,
indexes=ds._indexes,
inplace=True,
)
return self
return func
def _calculate_binary_op(self, f, other, join="inner", inplace=False):
def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):
if inplace and set(lhs_data_vars) != set(rhs_data_vars):
raise ValueError(
"datasets must have the same data variables "
"for in-place arithmetic operations: %s, %s"
% (list(lhs_data_vars), list(rhs_data_vars))
)
dest_vars = {}
for k in lhs_data_vars:
if k in rhs_data_vars:
dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
elif join in ["left", "outer"]:
dest_vars[k] = f(lhs_vars[k], np.nan)
for k in rhs_data_vars:
if k not in dest_vars and join in ["right", "outer"]:
dest_vars[k] = f(rhs_vars[k], np.nan)
return dest_vars
if utils.is_dict_like(other) and not isinstance(other, Dataset):
# can't use our shortcut of doing the binary operation with
# Variable objects, so apply over our data vars instead.
new_data_vars = apply_over_both(
self.data_vars, other, self.data_vars, other
)
return Dataset(new_data_vars)
other_coords = getattr(other, "coords", None)
ds = self.coords.merge(other_coords)
if isinstance(other, Dataset):
new_vars = apply_over_both(
self.data_vars, other.data_vars, self.variables, other.variables
)
else:
other_variable = getattr(other, "variable", other)
new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars}
ds._variables.update(new_vars)
ds._dims = calculate_dimensions(ds._variables)
return ds
def _copy_attrs_from(self, other):
self.attrs = other.attrs
for v in other.variables:
if v in self.variables:
self.variables[v].attrs = other.variables[v].attrs
def diff(self, dim, n=1, label="upper"):
"""Calculate the n-th order discrete difference along given axis.
Parameters
----------
dim : str
Dimension over which to calculate the finite difference.
n : int, optional
The number of times values are differenced.
label : str, optional
The new coordinate in dimension ``dim`` will have the
values of either the minuend's or subtrahend's coordinate
for values 'upper' and 'lower', respectively. Other
values are not supported.
Returns
-------
difference : same type as caller
The n-th order finite difference of this object.
.. note::
`n` matches numpy's behavior and is different from pandas' first
argument named `periods`.
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", [5, 5, 6, 6])})
>>> ds.diff("x")
<xarray.Dataset>
Dimensions: (x: 3)
Dimensions without coordinates: x
Data variables:
foo (x) int64 0 1 0
>>> ds.diff("x", 2)
<xarray.Dataset>
Dimensions: (x: 2)
Dimensions without coordinates: x
Data variables:
foo (x) int64 1 -1
See Also
--------
Dataset.differentiate
"""
if n == 0:
return self
if n < 0:
raise ValueError(f"order `n` must be non-negative but got {n}")
# prepare slices
kwargs_start = {dim: slice(None, -1)}
kwargs_end = {dim: slice(1, None)}
# prepare new coordinate
if label == "upper":
kwargs_new = kwargs_end
elif label == "lower":
kwargs_new = kwargs_start
else:
raise ValueError("The 'label' argument has to be either 'upper' or 'lower'")
variables = {}
for name, var in self.variables.items():
if dim in var.dims:
if name in self.data_vars:
variables[name] = var.isel(**kwargs_end) - var.isel(**kwargs_start)
else:
variables[name] = var.isel(**kwargs_new)
else:
variables[name] = var
indexes = dict(self.indexes)
if dim in indexes:
indexes[dim] = indexes[dim][kwargs_new[dim]]
difference = self._replace_with_new_dims(variables, indexes=indexes)
if n > 1:
return difference.diff(dim, n - 1)
else:
return difference
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""Shift this dataset by an offset along one or more dimensions.
Only data variables are moved; coordinates stay in place. This is
consistent with the behavior of ``shift`` in pandas.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names (including coordinates) to fill values.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Dataset
Dataset with the same coordinates and attributes but shifted data
variables.
See Also
--------
roll
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", list("abcde"))})
>>> ds.shift(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
foo (x) object nan nan 'a' 'b' 'c'
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
variables = {}
for name, var in self.variables.items():
if name in self.data_vars:
fill_value_ = (
fill_value.get(name, dtypes.NA)
if isinstance(fill_value, dict)
else fill_value
)
var_shifts = {k: v for k, v in shifts.items() if k in var.dims}
variables[name] = var.shift(fill_value=fill_value_, shifts=var_shifts)
else:
variables[name] = var
return self._replace(variables)
def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):
"""Roll this dataset by an offset along one or more dimensions.
Unlike shift, roll may rotate all variables, including coordinates
if specified. The direction of rotation is consistent with
:py:func:`numpy.roll`.
Parameters
----------
shifts : dict, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions. Positive
offsets roll to the right; negative offsets roll to the left.
roll_coords : bool
Indicates whether to roll the coordinates by the offset
The current default of roll_coords (None, equivalent to True) is
deprecated and will change to False in a future version.
Explicitly pass roll_coords to silence the warning.
**shifts_kwargs : {dim: offset, ...}, optional
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
rolled : Dataset
Dataset with the same coordinates and attributes but rolled
variables.
See Also
--------
shift
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", list("abcde"))})
>>> ds.roll(x=2)
<xarray.Dataset>
Dimensions: (x: 5)
Dimensions without coordinates: x
Data variables:
foo (x) <U1 'd' 'e' 'a' 'b' 'c'
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
invalid = [k for k in shifts if k not in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
if roll_coords is None:
warnings.warn(
"roll_coords will be set to False in the future."
" Explicitly set roll_coords to silence warning.",
FutureWarning,
stacklevel=2,
)
roll_coords = True
unrolled_vars = () if roll_coords else self.coords
variables = {}
for k, v in self.variables.items():
if k not in unrolled_vars:
variables[k] = v.roll(
**{k: s for k, s in shifts.items() if k in v.dims}
)
else:
variables[k] = v
if roll_coords:
indexes = {}
for k, v in self.indexes.items():
(dim,) = self.variables[k].dims
if dim in shifts:
indexes[k] = roll_index(v, shifts[dim])
else:
indexes[k] = v
else:
indexes = dict(self.indexes)
return self._replace(variables, indexes=indexes)
def sortby(self, variables, ascending=True):
"""
Sort object by labels or values (along an axis).
Sorts the dataset, either along specified dimensions,
or according to values of 1-D dataarrays that share dimension
with calling object.
If the input variables are dataarrays, then the dataarrays are aligned
(via left-join) to the calling object prior to sorting by cell values.
NaNs are sorted to the end, following Numpy convention.
If multiple sorts along the same dimension is
given, numpy's lexsort is performed along that dimension:
https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html
and the FIRST key in the sequence is used as the primary sort key,
followed by the 2nd key, etc.
Parameters
----------
variables : str, DataArray, or list of str or DataArray
1D DataArray objects or name(s) of 1D variable(s) in
coords/data_vars whose values are used to sort the dataset.
ascending : bool, optional
Whether to sort by ascending or descending order.
Returns
-------
sorted : Dataset
A new dataset where all the specified dims are sorted by dim
labels.
"""
from .dataarray import DataArray
if not isinstance(variables, list):
variables = [variables]
else:
variables = variables
variables = [v if isinstance(v, DataArray) else self[v] for v in variables]
aligned_vars = align(self, *variables, join="left")
aligned_self = aligned_vars[0]
aligned_other_vars = aligned_vars[1:]
vars_by_dim = defaultdict(list)
for data_array in aligned_other_vars:
if data_array.ndim != 1:
raise ValueError("Input DataArray is not 1-D.")
(key,) = data_array.dims
vars_by_dim[key].append(data_array)
indices = {}
for key, arrays in vars_by_dim.items():
order = np.lexsort(tuple(reversed(arrays)))
indices[key] = order if ascending else order[::-1]
return aligned_self.isel(**indices)
def quantile(
self,
q,
dim=None,
interpolation="linear",
numeric_only=False,
keep_attrs=None,
skipna=True,
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements for each variable
in the Dataset.
Parameters
----------
q : float or array-like of float
Quantile to compute, which must be between 0 and 1 inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
skipna : bool, optional
Whether to skip missing values when aggregating.
Returns
-------
quantiles : Dataset
If `q` is a single quantile, then the result is a scalar for each
variable in data_vars. If multiple percentiles are given, first
axis of the result corresponds to the quantile and a quantile
dimension is added to the return Dataset. The other dimensions are
the dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, numpy.quantile, pandas.Series.quantile, DataArray.quantile
Examples
--------
>>> ds = xr.Dataset(
... {"a": (("x", "y"), [[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]])},
... coords={"x": [7, 9], "y": [1, 1.5, 2, 2.5]},
... )
>>> ds.quantile(0) # or ds.quantile(0, dim=...)
<xarray.Dataset>
Dimensions: ()
Coordinates:
quantile float64 0.0
Data variables:
a float64 0.7
>>> ds.quantile(0, dim="x")
<xarray.Dataset>
Dimensions: (y: 4)
Coordinates:
* y (y) float64 1.0 1.5 2.0 2.5
quantile float64 0.0
Data variables:
a (y) float64 0.7 4.2 2.6 1.5
>>> ds.quantile([0, 0.5, 1])
<xarray.Dataset>
Dimensions: (quantile: 3)
Coordinates:
* quantile (quantile) float64 0.0 0.5 1.0
Data variables:
a (quantile) float64 0.7 3.4 9.4
>>> ds.quantile([0, 0.5, 1], dim="x")
<xarray.Dataset>
Dimensions: (quantile: 3, y: 4)
Coordinates:
* y (y) float64 1.0 1.5 2.0 2.5
* quantile (quantile) float64 0.0 0.5 1.0
Data variables:
a (quantile, y) float64 0.7 4.2 2.6 1.5 3.6 ... 1.7 6.5 7.3 9.4 1.9
"""
if isinstance(dim, str):
dims = {dim}
elif dim in [None, ...]:
dims = set(self.dims)
else:
dims = set(dim)
_assert_empty(
[d for d in dims if d not in self.dims],
"Dataset does not contain the dimensions: %s",
)
q = np.asarray(q, dtype=np.float64)
variables = {}
for name, var in self.variables.items():
reduce_dims = [d for d in var.dims if d in dims]
if reduce_dims or not var.dims:
if name not in self.coords:
if (
not numeric_only
or np.issubdtype(var.dtype, np.number)
or var.dtype == np.bool_
):
if len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.quantile(
q,
dim=reduce_dims,
interpolation=interpolation,
keep_attrs=keep_attrs,
skipna=skipna,
)
else:
variables[name] = var
# construct the new dataset
coord_names = {k for k in self.coords if k in variables}
indexes = {k: v for k, v in self.indexes.items() if k in variables}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self.attrs if keep_attrs else None
new = self._replace_with_new_dims(
variables, coord_names=coord_names, attrs=attrs, indexes=indexes
)
return new.assign_coords(quantile=q)
def rank(self, dim, pct=False, keep_attrs=None):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within
that set.
Ranks begin at 1, not 0. If pct is True, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
ranked : Dataset
Variables that do not depend on `dim` are dropped.
"""
if dim not in self.dims:
raise ValueError("Dataset does not contain the dimension: %s" % dim)
variables = {}
for name, var in self.variables.items():
if name in self.data_vars:
if dim in var.dims:
variables[name] = var.rank(dim, pct=pct)
else:
variables[name] = var
coord_names = set(self.coords)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self.attrs if keep_attrs else None
return self._replace(variables, coord_names, attrs=attrs)
def differentiate(self, coord, edge_order=1, datetime_unit=None):
""" Differentiate with the second order accurate central
differences.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
coord : str
The coordinate to be used to compute the gradient.
edge_order : {1, 2}, default: 1
N-th order accurate differences at the boundaries.
datetime_unit : None or {"Y", "M", "W", "D", "h", "m", "s", "ms", \
"us", "ns", "ps", "fs", "as"}, default: None
Unit to compute gradient. Only valid for datetime coordinate.
Returns
-------
differentiated: Dataset
See also
--------
numpy.gradient: corresponding numpy function
"""
from .variable import Variable
if coord not in self.variables and coord not in self.dims:
raise ValueError(f"Coordinate {coord} does not exist.")
coord_var = self[coord].variable
if coord_var.ndim != 1:
raise ValueError(
"Coordinate {} must be 1 dimensional but is {}"
" dimensional".format(coord, coord_var.ndim)
)
dim = coord_var.dims[0]
if _contains_datetime_like_objects(coord_var):
if coord_var.dtype.kind in "mM" and datetime_unit is None:
datetime_unit, _ = np.datetime_data(coord_var.dtype)
elif datetime_unit is None:
datetime_unit = "s" # Default to seconds for cftime objects
coord_var = coord_var._to_numeric(datetime_unit=datetime_unit)
variables = {}
for k, v in self.variables.items():
if k in self.data_vars and dim in v.dims and k not in self.coords:
if _contains_datetime_like_objects(v):
v = v._to_numeric(datetime_unit=datetime_unit)
grad = duck_array_ops.gradient(
v.data, coord_var, edge_order=edge_order, axis=v.get_axis_num(dim)
)
variables[k] = Variable(v.dims, grad)
else:
variables[k] = v
return self._replace(variables)
def integrate(
self, coord: Union[Hashable, Sequence[Hashable]], datetime_unit: str = None
) -> "Dataset":
"""Integrate along the given coordinate using the trapezoidal rule.
.. note::
This feature is limited to simple cartesian geometry, i.e. coord
must be one dimensional.
Parameters
----------
coord : hashable, or sequence of hashable
Coordinate(s) used for the integration.
datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
'ps', 'fs', 'as'}, optional
Specify the unit if datetime coordinate is used.
Returns
-------
integrated : Dataset
See also
--------
DataArray.integrate
numpy.trapz : corresponding numpy function
Examples
--------
>>> ds = xr.Dataset(
... data_vars={"a": ("x", [5, 5, 6, 6]), "b": ("x", [1, 2, 1, 0])},
... coords={"x": [0, 1, 2, 3], "y": ("x", [1, 7, 3, 5])},
... )
>>> ds
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) int64 0 1 2 3
y (x) int64 1 7 3 5
Data variables:
a (x) int64 5 5 6 6
b (x) int64 1 2 1 0
>>> ds.integrate("x")
<xarray.Dataset>
Dimensions: ()
Data variables:
a float64 16.5
b float64 3.5
>>> ds.integrate("y")
<xarray.Dataset>
Dimensions: ()
Data variables:
a float64 20.0
b float64 4.0
"""
if not isinstance(coord, (list, tuple)):
coord = (coord,)
result = self
for c in coord:
result = result._integrate_one(c, datetime_unit=datetime_unit)
return result
def _integrate_one(self, coord, datetime_unit=None):
from .variable import Variable
if coord not in self.variables and coord not in self.dims:
raise ValueError(f"Coordinate {coord} does not exist.")
coord_var = self[coord].variable
if coord_var.ndim != 1:
raise ValueError(
"Coordinate {} must be 1 dimensional but is {}"
" dimensional".format(coord, coord_var.ndim)
)
dim = coord_var.dims[0]
if _contains_datetime_like_objects(coord_var):
if coord_var.dtype.kind in "mM" and datetime_unit is None:
datetime_unit, _ = np.datetime_data(coord_var.dtype)
elif datetime_unit is None:
datetime_unit = "s" # Default to seconds for cftime objects
coord_var = coord_var._replace(
data=datetime_to_numeric(coord_var.data, datetime_unit=datetime_unit)
)
variables = {}
coord_names = set()
for k, v in self.variables.items():
if k in self.coords:
if dim not in v.dims:
variables[k] = v
coord_names.add(k)
else:
if k in self.data_vars and dim in v.dims:
if _contains_datetime_like_objects(v):
v = datetime_to_numeric(v, datetime_unit=datetime_unit)
integ = duck_array_ops.trapz(
v.data, coord_var.data, axis=v.get_axis_num(dim)
)
v_dims = list(v.dims)
v_dims.remove(dim)
variables[k] = Variable(v_dims, integ)
else:
variables[k] = v
indexes = {k: v for k, v in self.indexes.items() if k in variables}
return self._replace_with_new_dims(
variables, coord_names=coord_names, indexes=indexes
)
@property
def real(self):
return self.map(lambda x: x.real, keep_attrs=True)
@property
def imag(self):
return self.map(lambda x: x.imag, keep_attrs=True)
plot = utils.UncachedAccessor(_Dataset_PlotMethods)
def filter_by_attrs(self, **kwargs):
"""Returns a ``Dataset`` with variables that match specific conditions.
Can pass in ``key=value`` or ``key=callable``. A Dataset is returned
containing only the variables for which all the filter tests pass.
These tests are either ``key=value`` for which the attribute ``key``
has the exact value ``value`` or the callable passed into
``key=callable`` returns True. The callable will be passed a single
value, either the value of the attribute ``key`` or ``None`` if the
DataArray does not have an attribute with the name ``key``.
Parameters
----------
**kwargs
key : str
Attribute name.
value : callable or obj
If value is a callable, it should return a boolean in the form
of bool = func(attr) where attr is da.attrs[key].
Otherwise, value will be compared to the each
DataArray's attrs[key].
Returns
-------
new : Dataset
New dataset with variables filtered by attribute.
Examples
--------
>>> # Create an example dataset:
>>> temp = 15 + 8 * np.random.randn(2, 2, 3)
>>> precip = 10 * np.random.rand(2, 2, 3)
>>> lon = [[-99.83, -99.32], [-99.79, -99.23]]
>>> lat = [[42.25, 42.21], [42.63, 42.59]]
>>> dims = ["x", "y", "time"]
>>> temp_attr = dict(standard_name="air_potential_temperature")
>>> precip_attr = dict(standard_name="convective_precipitation_flux")
>>> ds = xr.Dataset(
... {
... "temperature": (dims, temp, temp_attr),
... "precipitation": (dims, precip, precip_attr),
... },
... coords={
... "lon": (["x", "y"], lon),
... "lat": (["x", "y"], lat),
... "time": pd.date_range("2014-09-06", periods=3),
... "reference_time": pd.Timestamp("2014-09-05"),
... },
... )
>>> # Get variables matching a specific standard_name.
>>> ds.filter_by_attrs(standard_name="convective_precipitation_flux")
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 42.25 42.21 42.63 42.59
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
reference_time datetime64[ns] 2014-09-05
Dimensions without coordinates: x, y
Data variables:
precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805
>>> # Get all variables that have a standard_name attribute.
>>> standard_name = lambda v: v is not None
>>> ds.filter_by_attrs(standard_name=standard_name)
<xarray.Dataset>
Dimensions: (time: 3, x: 2, y: 2)
Coordinates:
lon (x, y) float64 -99.83 -99.32 -99.79 -99.23
lat (x, y) float64 42.25 42.21 42.63 42.59
* time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08
reference_time datetime64[ns] 2014-09-05
Dimensions without coordinates: x, y
Data variables:
temperature (x, y, time) float64 29.11 18.2 22.83 ... 18.28 16.15 26.63
precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805
"""
selection = []
for var_name, variable in self.variables.items():
has_value_flag = False
for attr_name, pattern in kwargs.items():
attr_value = variable.attrs.get(attr_name)
if (callable(pattern) and pattern(attr_value)) or attr_value == pattern:
has_value_flag = True
else:
has_value_flag = False
break
if has_value_flag is True:
selection.append(var_name)
return self[selection]
def unify_chunks(self) -> "Dataset":
"""Unify chunk size along all chunked dimensions of this Dataset.
Returns
-------
Dataset with consistent chunk sizes for all dask-array variables
See Also
--------
dask.array.core.unify_chunks
"""
try:
self.chunks
except ValueError: # "inconsistent chunks"
pass
else:
# No variables with dask backend, or all chunks are already aligned
return self.copy()
# import dask is placed after the quick exit test above to allow
# running this method if dask isn't installed and there are no chunks
import dask.array
ds = self.copy()
dims_pos_map = {dim: index for index, dim in enumerate(ds.dims)}
dask_array_names = []
dask_unify_args = []
for name, variable in ds.variables.items():
if isinstance(variable.data, dask.array.Array):
dims_tuple = [dims_pos_map[dim] for dim in variable.dims]
dask_array_names.append(name)
dask_unify_args.append(variable.data)
dask_unify_args.append(dims_tuple)
_, rechunked_arrays = dask.array.core.unify_chunks(*dask_unify_args)
for name, new_array in zip(dask_array_names, rechunked_arrays):
ds.variables[name]._data = new_array
return ds
def map_blocks(
self,
func: "Callable[..., T_DSorDA]",
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] = None,
template: Union["DataArray", "Dataset"] = None,
) -> "T_DSorDA":
"""
Apply a function to each block of this Dataset.
.. warning::
This method is experimental and its signature may change.
Parameters
----------
func : callable
User-provided function that accepts a Dataset as its first
parameter. The function will receive a subset or 'block' of this Dataset (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(subset_dataset, *subset_args, **kwargs)``.
This function must return either a single DataArray or a single Dataset.
This function cannot add a new chunked dimension.
args : sequence
Passed to func after unpacking and subsetting any xarray objects by blocks.
xarray objects in args must be aligned with obj, otherwise an error is raised.
kwargs : mapping
Passed verbatim to func after unpacking. xarray objects, if any, will not be
subset to blocks. Passing dask collections in kwargs is not allowed.
template : DataArray or Dataset, optional
xarray object representing the final result after compute is called. If not provided,
the function will be first run on mocked-up data, that looks like this object but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, attributes, new dimensions and new indexes (if any).
``template`` must be provided if the function changes the size of existing dimensions.
When provided, ``attrs`` on variables in `template` are copied over to the result. Any
``attrs`` set by ``func`` will be ignored.
Returns
-------
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when ``func`` needs to manipulate a whole xarray object
subset to each block. Each block is loaded into memory. In the more common case where
``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.
If none of the variables in this object is backed by dask arrays, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks
xarray.DataArray.map_blocks
Examples
--------
Calculate an anomaly from climatology using ``.groupby()``. Using
``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,
its indices, and its methods like ``.groupby()``.
>>> def calculate_anomaly(da, groupby_type="time.month"):
... gb = da.groupby(groupby_type)
... clim = gb.mean(dim="time")
... return gb - clim
...
>>> time = xr.cftime_range("1990-01", "1992-01", freq="M")
>>> month = xr.DataArray(time.month, coords={"time": time}, dims=["time"])
>>> np.random.seed(123)
>>> array = xr.DataArray(
... np.random.rand(len(time)),
... dims=["time"],
... coords={"time": time, "month": month},
... ).chunk()
>>> ds = xr.Dataset({"a": array})
>>> ds.map_blocks(calculate_anomaly, template=ds).compute()
<xarray.Dataset>
Dimensions: (time: 24)
Coordinates:
* time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 1 2 3 4 5 6 7 8 9 10 11 12
Data variables:
a (time) float64 0.1289 0.1132 -0.0856 ... 0.2287 0.1906 -0.05901
Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments
to the function being applied in ``xr.map_blocks()``:
>>> ds.map_blocks(
... calculate_anomaly,
... kwargs={"groupby_type": "time.year"},
... template=ds,
... )
<xarray.Dataset>
Dimensions: (time: 24)
Coordinates:
* time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00
month (time) int64 dask.array<chunksize=(24,), meta=np.ndarray>
Data variables:
a (time) float64 dask.array<chunksize=(24,), meta=np.ndarray>
"""
from .parallel import map_blocks
return map_blocks(func, self, args, kwargs, template)
def polyfit(
self,
dim: Hashable,
deg: int,
skipna: bool = None,
rcond: float = None,
w: Union[Hashable, Any] = None,
full: bool = False,
cov: Union[bool, str] = False,
):
"""
Least squares polynomial fit.
This replicates the behaviour of `numpy.polyfit` but differs by skipping
invalid values when `skipna = True`.
Parameters
----------
dim : hashable
Coordinate along which to fit the polynomials.
deg : int
Degree of the fitting polynomial.
skipna : bool, optional
If True, removes all invalid values before fitting each 1D slices of the array.
Default is True if data is stored in a dask.array or if there is any
invalid values, False otherwise.
rcond : float, optional
Relative condition number to the fit.
w : hashable or Any, optional
Weights to apply to the y-coordinate of the sample points.
Can be an array-like object or the name of a coordinate in the dataset.
full : bool, optional
Whether to return the residuals, matrix rank and singular values in addition
to the coefficients.
cov : bool or str, optional
Whether to return to the covariance matrix in addition to the coefficients.
The matrix is not scaled if `cov='unscaled'`.
Returns
-------
polyfit_results : Dataset
A single dataset which contains (for each "var" in the input dataset):
[var]_polyfit_coefficients
The coefficients of the best fit for each variable in this dataset.
[var]_polyfit_residuals
The residuals of the least-square computation for each variable (only included if `full=True`)
When the matrix rank is deficient, np.nan is returned.
[dim]_matrix_rank
The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`)
The rank is computed ignoring the NaN values that might be skipped.
[dim]_singular_values
The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`)
[var]_polyfit_covariance
The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`)
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is deficient.
The warning is not raised with in-memory (not dask) data and `full=True`.
See Also
--------
numpy.polyfit
numpy.polyval
xarray.polyval
"""
variables = {}
skipna_da = skipna
x = get_clean_interp_index(self, dim, strict=False)
xname = "{}_".format(self[dim].name)
order = int(deg) + 1
lhs = np.vander(x, order)
if rcond is None:
rcond = (
x.shape[0] * np.core.finfo(x.dtype).eps # type: ignore[attr-defined]
)
# Weights:
if w is not None:
if isinstance(w, Hashable):
w = self.coords[w]
w = np.asarray(w)
if w.ndim != 1:
raise TypeError("Expected a 1-d array for weights.")
if w.shape[0] != lhs.shape[0]:
raise TypeError("Expected w and {} to have the same length".format(dim))
lhs *= w[:, np.newaxis]
# Scaling
scale = np.sqrt((lhs * lhs).sum(axis=0))
lhs /= scale
degree_dim = utils.get_temp_dimname(self.dims, "degree")
rank = np.linalg.matrix_rank(lhs)
if full:
rank = xr.DataArray(rank, name=xname + "matrix_rank")
variables[rank.name] = rank
sing = np.linalg.svd(lhs, compute_uv=False)
sing = xr.DataArray(
sing,
dims=(degree_dim,),
coords={degree_dim: np.arange(rank - 1, -1, -1)},
name=xname + "singular_values",
)
variables[sing.name] = sing
for name, da in self.data_vars.items():
if dim not in da.dims:
continue
if is_duck_dask_array(da.data) and (
rank != order or full or skipna is None
):
# Current algorithm with dask and skipna=False neither supports
# deficient ranks nor does it output the "full" info (issue dask/dask#6516)
skipna_da = True
elif skipna is None:
skipna_da = bool(np.any(da.isnull()))
dims_to_stack = [dimname for dimname in da.dims if dimname != dim]
stacked_coords: Dict[Hashable, DataArray] = {}
if dims_to_stack:
stacked_dim = utils.get_temp_dimname(dims_to_stack, "stacked")
rhs = da.transpose(dim, *dims_to_stack).stack(
{stacked_dim: dims_to_stack}
)
stacked_coords = {stacked_dim: rhs[stacked_dim]}
scale_da = scale[:, np.newaxis]
else:
rhs = da
scale_da = scale
if w is not None:
rhs *= w[:, np.newaxis]
with warnings.catch_warnings():
if full: # Copy np.polyfit behavior
warnings.simplefilter("ignore", np.RankWarning)
else: # Raise only once per variable
warnings.simplefilter("once", np.RankWarning)
coeffs, residuals = duck_array_ops.least_squares(
lhs, rhs.data, rcond=rcond, skipna=skipna_da
)
if isinstance(name, str):
name = "{}_".format(name)
else:
# Thus a ReprObject => polyfit was called on a DataArray
name = ""
coeffs = xr.DataArray(
coeffs / scale_da,
dims=[degree_dim] + list(stacked_coords.keys()),
coords={degree_dim: np.arange(order)[::-1], **stacked_coords},
name=name + "polyfit_coefficients",
)
if dims_to_stack:
coeffs = coeffs.unstack(stacked_dim)
variables[coeffs.name] = coeffs
if full or (cov is True):
residuals = xr.DataArray(
residuals if dims_to_stack else residuals.squeeze(),
dims=list(stacked_coords.keys()),
coords=stacked_coords,
name=name + "polyfit_residuals",
)
if dims_to_stack:
residuals = residuals.unstack(stacked_dim)
variables[residuals.name] = residuals
if cov:
Vbase = np.linalg.inv(np.dot(lhs.T, lhs))
Vbase /= np.outer(scale, scale)
if cov == "unscaled":
fac = 1
else:
if x.shape[0] <= order:
raise ValueError(
"The number of data points must exceed order to scale the covariance matrix."
)
fac = residuals / (x.shape[0] - order)
covariance = xr.DataArray(Vbase, dims=("cov_i", "cov_j")) * fac
variables[name + "polyfit_covariance"] = covariance
return Dataset(data_vars=variables, attrs=self.attrs.copy())
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
) -> "Dataset":
"""Pad this dataset along one or more dimensions.
.. warning::
This function is experimental and its behaviour is likely to change
especially regarding padding of dimension coordinates (or IndexVariables).
When using one of the modes ("edge", "reflect", "symmetric", "wrap"),
coordinates will be padded with the same mode, otherwise coordinates
are padded using the "constant" mode with fill_value dtypes.NA.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : str, default: "constant"
One of the following string values (taken from numpy docs).
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the
array edge value.
'maximum'
Pads with the maximum value of all or part of the
vector along each axis.
'mean'
Pads with the mean value of all or part of the
vector along each axis.
'median'
Pads with the median value of all or part of the
vector along each axis.
'minimum'
Pads with the minimum value of all or part of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
stat_length : int, tuple or mapping of hashable to tuple, default: None
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique
statistic lengths along each dimension.
((before, after),) yields same before and after statistic lengths
for each dimension.
(stat_length,) or int is a shortcut for before = after = statistic
length for all axes.
Default is ``None``, to use the entire axis.
constant_values : scalar, tuple or mapping of hashable to tuple, default: 0
Used in 'constant'. The values to set the padded values for each
axis.
``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique
pad constants along each dimension.
``((before, after),)`` yields same before and after constants for each
dimension.
``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all dimensions.
Default is 0.
end_values : scalar, tuple or mapping of hashable to tuple, default: 0
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique
end values along each dimension.
``((before, after),)`` yields same before and after end values for each
axis.
``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all axes.
Default is 0.
reflect_type : {"even", "odd"}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs
The keyword arguments form of ``pad_width``.
One of ``pad_width`` or ``pad_width_kwargs`` must be provided.
Returns
-------
padded : Dataset
Dataset with the padded coordinates and data.
See Also
--------
Dataset.shift, Dataset.roll, Dataset.bfill, Dataset.ffill, numpy.pad, dask.array.pad
Notes
-----
By default when ``mode="constant"`` and ``constant_values=None``, integer types will be
promoted to ``float`` and padded with ``np.nan``. To avoid type promotion
specify ``constant_values=np.nan``
Examples
--------
>>> ds = xr.Dataset({"foo": ("x", range(5))})
>>> ds.pad(x=(1, 2))
<xarray.Dataset>
Dimensions: (x: 8)
Dimensions without coordinates: x
Data variables:
foo (x) float64 nan 0.0 1.0 2.0 3.0 4.0 nan nan
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
if mode in ("edge", "reflect", "symmetric", "wrap"):
coord_pad_mode = mode
coord_pad_options = {
"stat_length": stat_length,
"constant_values": constant_values,
"end_values": end_values,
"reflect_type": reflect_type,
}
else:
coord_pad_mode = "constant"
coord_pad_options = {}
variables = {}
for name, var in self.variables.items():
var_pad_width = {k: v for k, v in pad_width.items() if k in var.dims}
if not var_pad_width:
variables[name] = var
elif name in self.data_vars:
variables[name] = var.pad(
pad_width=var_pad_width,
mode=mode,
stat_length=stat_length,
constant_values=constant_values,
end_values=end_values,
reflect_type=reflect_type,
)
else:
variables[name] = var.pad(
pad_width=var_pad_width,
mode=coord_pad_mode,
**coord_pad_options, # type: ignore[arg-type]
)
return self._replace_vars_and_dims(variables)
def idxmin(
self,
dim: Hashable = None,
skipna: bool = None,
fill_value: Any = dtypes.NA,
keep_attrs: bool = None,
) -> "Dataset":
"""Return the coordinate label of the minimum value along a dimension.
Returns a new `Dataset` named after the dimension with the values of
the coordinate labels along that dimension corresponding to minimum
values along that dimension.
In comparison to :py:meth:`~Dataset.argmin`, this returns the
coordinate label while :py:meth:`~Dataset.argmin` returns the index.
Parameters
----------
dim : str, optional
Dimension over which to apply `idxmin`. This is optional for 1D
variables, but required for variables with 2 or more dimensions.
skipna : bool or None, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for ``float``, ``complex``, and ``object``
dtypes; other dtypes either do not have a sentinel missing value
(``int``) or ``skipna=True`` has not been implemented
(``datetime64`` or ``timedelta64``).
fill_value : Any, default: NaN
Value to be filled in case all of the values along a dimension are
null. By default this is NaN. The fill value and result are
automatically converted to a compatible dtype if possible.
Ignored if ``skipna`` is False.
keep_attrs : bool, default: False
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False (default), the new object
will be returned without attributes.
Returns
-------
reduced : Dataset
New `Dataset` object with `idxmin` applied to its data and the
indicated dimension removed.
See Also
--------
DataArray.idxmin, Dataset.idxmax, Dataset.min, Dataset.argmin
Examples
--------
>>> array1 = xr.DataArray(
... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]}
... )
>>> array2 = xr.DataArray(
... [
... [2.0, 1.0, 2.0, 0.0, -2.0],
... [-4.0, np.NaN, 2.0, np.NaN, -2.0],
... [np.NaN, np.NaN, 1.0, np.NaN, np.NaN],
... ],
... dims=["y", "x"],
... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]},
... )
>>> ds = xr.Dataset({"int": array1, "float": array2})
>>> ds.min(dim="x")
<xarray.Dataset>
Dimensions: (y: 3)
Coordinates:
* y (y) int64 -1 0 1
Data variables:
int int64 -2
float (y) float64 -2.0 -4.0 1.0
>>> ds.argmin(dim="x")
<xarray.Dataset>
Dimensions: (y: 3)
Coordinates:
* y (y) int64 -1 0 1
Data variables:
int int64 4
float (y) int64 4 0 2
>>> ds.idxmin(dim="x")
<xarray.Dataset>
Dimensions: (y: 3)
Coordinates:
* y (y) int64 -1 0 1
Data variables:
int <U1 'e'
float (y) object 'e' 'a' 'c'
"""
return self.map(
methodcaller(
"idxmin",
dim=dim,
skipna=skipna,
fill_value=fill_value,
keep_attrs=keep_attrs,
)
)
def idxmax(
self,
dim: Hashable = None,
skipna: bool = None,
fill_value: Any = dtypes.NA,
keep_attrs: bool = None,
) -> "Dataset":
"""Return the coordinate label of the maximum value along a dimension.
Returns a new `Dataset` named after the dimension with the values of
the coordinate labels along that dimension corresponding to maximum
values along that dimension.
In comparison to :py:meth:`~Dataset.argmax`, this returns the
coordinate label while :py:meth:`~Dataset.argmax` returns the index.
Parameters
----------
dim : str, optional
Dimension over which to apply `idxmax`. This is optional for 1D
variables, but required for variables with 2 or more dimensions.
skipna : bool or None, default: None
If True, skip missing values (as marked by NaN). By default, only
skips missing values for ``float``, ``complex``, and ``object``
dtypes; other dtypes either do not have a sentinel missing value
(``int``) or ``skipna=True`` has not been implemented
(``datetime64`` or ``timedelta64``).
fill_value : Any, default: NaN
Value to be filled in case all of the values along a dimension are
null. By default this is NaN. The fill value and result are
automatically converted to a compatible dtype if possible.
Ignored if ``skipna`` is False.
keep_attrs : bool, default: False
If True, the attributes (``attrs``) will be copied from the
original object to the new one. If False (default), the new object
will be returned without attributes.
Returns
-------
reduced : Dataset
New `Dataset` object with `idxmax` applied to its data and the
indicated dimension removed.
See Also
--------
DataArray.idxmax, Dataset.idxmin, Dataset.max, Dataset.argmax
Examples
--------
>>> array1 = xr.DataArray(
... [0, 2, 1, 0, -2], dims="x", coords={"x": ["a", "b", "c", "d", "e"]}
... )
>>> array2 = xr.DataArray(
... [
... [2.0, 1.0, 2.0, 0.0, -2.0],
... [-4.0, np.NaN, 2.0, np.NaN, -2.0],
... [np.NaN, np.NaN, 1.0, np.NaN, np.NaN],
... ],
... dims=["y", "x"],
... coords={"y": [-1, 0, 1], "x": ["a", "b", "c", "d", "e"]},
... )
>>> ds = xr.Dataset({"int": array1, "float": array2})
>>> ds.max(dim="x")
<xarray.Dataset>
Dimensions: (y: 3)
Coordinates:
* y (y) int64 -1 0 1
Data variables:
int int64 2
float (y) float64 2.0 2.0 1.0
>>> ds.argmax(dim="x")
<xarray.Dataset>
Dimensions: (y: 3)
Coordinates:
* y (y) int64 -1 0 1
Data variables:
int int64 1
float (y) int64 0 2 2
>>> ds.idxmax(dim="x")
<xarray.Dataset>
Dimensions: (y: 3)
Coordinates:
* y (y) int64 -1 0 1
Data variables:
int <U1 'b'
float (y) object 'a' 'c' 'c'
"""
return self.map(
methodcaller(
"idxmax",
dim=dim,
skipna=skipna,
fill_value=fill_value,
keep_attrs=keep_attrs,
)
)
def argmin(self, dim=None, **kwargs):
"""Indices of the minima of the member variables.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : str, optional
The dimension over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will be an error, since DataArray.argmin will
return a dict with indices for all dimensions, which does not make sense for
a Dataset.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Dataset
See Also
--------
DataArray.argmin
"""
if dim is None:
warnings.warn(
"Once the behaviour of DataArray.argmin() and Variable.argmin() without "
"dim changes to return a dict of indices of each dimension, for "
"consistency it will be an error to call Dataset.argmin() with no argument,"
"since we don't return a dict of Datasets.",
DeprecationWarning,
stacklevel=2,
)
if (
dim is None
or (not isinstance(dim, Sequence) and dim is not ...)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
argmin_func = getattr(duck_array_ops, "argmin")
return self.reduce(argmin_func, dim=dim, **kwargs)
else:
raise ValueError(
"When dim is a sequence or ..., DataArray.argmin() returns a dict. "
"dicts cannot be contained in a Dataset, so cannot call "
"Dataset.argmin() with a sequence or ... for dim"
)
def argmax(self, dim=None, **kwargs):
"""Indices of the maxima of the member variables.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : str, optional
The dimension over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will be an error, since DataArray.argmax will
return a dict with indices for all dimensions, which does not make sense for
a Dataset.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Dataset
See Also
--------
DataArray.argmax
"""
if dim is None:
warnings.warn(
"Once the behaviour of DataArray.argmin() and Variable.argmin() without "
"dim changes to return a dict of indices of each dimension, for "
"consistency it will be an error to call Dataset.argmin() with no argument,"
"since we don't return a dict of Datasets.",
DeprecationWarning,
stacklevel=2,
)
if (
dim is None
or (not isinstance(dim, Sequence) and dim is not ...)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
argmax_func = getattr(duck_array_ops, "argmax")
return self.reduce(argmax_func, dim=dim, **kwargs)
else:
raise ValueError(
"When dim is a sequence or ..., DataArray.argmin() returns a dict. "
"dicts cannot be contained in a Dataset, so cannot call "
"Dataset.argmin() with a sequence or ... for dim"
)
def query(
self,
queries: Mapping[Hashable, Any] = None,
parser: str = "pandas",
engine: str = None,
missing_dims: str = "raise",
**queries_kwargs: Any,
) -> "Dataset":
"""Return a new dataset with each array indexed along the specified
dimension(s), where the indexers are given as strings containing
Python expressions to be evaluated against the data variables in the
dataset.
Parameters
----------
queries : dict, optional
A dict with keys matching dimensions and values given by strings
containing Python expressions to be evaluated against the data variables
in the dataset. The expressions will be evaluated using the pandas
eval() function, and can contain any valid Python expressions but cannot
contain any Python statements.
parser : {"pandas", "python"}, default: "pandas"
The parser to use to construct the syntax tree from the expression.
The default of 'pandas' parses code slightly different than standard
Python. Alternatively, you can parse an expression using the 'python'
parser to retain strict Python semantics.
engine: {"python", "numexpr", None}, default: None
The engine used to evaluate the expression. Supported engines are:
- None: tries to use numexpr, falls back to python
- "numexpr": evaluates expressions using numexpr
- "python": performs operations as if you had eval’d in top level python
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
Dataset:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
**queries_kwargs : {dim: query, ...}, optional
The keyword arguments form of ``queries``.
One of queries or queries_kwargs must be provided.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the results of the appropriate
queries.
See Also
--------
Dataset.isel
pandas.eval
"""
# allow queries to be given either as a dict or as kwargs
queries = either_dict_or_kwargs(queries, queries_kwargs, "query")
# check queries
for dim, expr in queries.items():
if not isinstance(expr, str):
msg = f"expr for dim {dim} must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
# evaluate the queries to create the indexers
indexers = {
dim: pd.eval(expr, resolvers=[self], parser=parser, engine=engine)
for dim, expr in queries.items()
}
# apply the selection
return self.isel(indexers, missing_dims=missing_dims)
def curvefit(
self,
coords: Union[Union[str, "DataArray"], Iterable[Union[str, "DataArray"]]],
func: Callable[..., Any],
reduce_dims: Union[Hashable, Iterable[Hashable]] = None,
skipna: bool = True,
p0: Dict[str, Any] = None,
bounds: Dict[str, Any] = None,
param_names: Sequence[str] = None,
kwargs: Dict[str, Any] = None,
):
"""
Curve fitting optimization for arbitrary functions.
Wraps `scipy.optimize.curve_fit` with `apply_ufunc`.
Parameters
----------
coords : DataArray, str or sequence of DataArray, str
Independent coordinate(s) over which to perform the curve fitting. Must share
at least one dimension with the calling object. When fitting multi-dimensional
functions, supply `coords` as a sequence in the same order as arguments in
`func`. To fit along existing dimensions of the calling object, `coords` can
also be specified as a str or sequence of strs.
func : callable
User specified function in the form `f(x, *params)` which returns a numpy
array of length `len(x)`. `params` are the fittable parameters which are optimized
by scipy curve_fit. `x` can also be specified as a sequence containing multiple
coordinates, e.g. `f((x0, x1), *params)`.
reduce_dims : str or sequence of str
Additional dimension(s) over which to aggregate while fitting. For example,
calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will
aggregate all lat and lon points and fit the specified function along the
time dimension.
skipna : bool, optional
Whether to skip missing values when fitting. Default is True.
p0 : dictionary, optional
Optional dictionary of parameter names to initial guesses passed to the
`curve_fit` `p0` arg. If none or only some parameters are passed, the rest will
be assigned initial values following the default scipy behavior.
bounds : dictionary, optional
Optional dictionary of parameter names to bounding values passed to the
`curve_fit` `bounds` arg. If none or only some parameters are passed, the rest
will be unbounded following the default scipy behavior.
param_names: seq, optional
Sequence of names for the fittable parameters of `func`. If not supplied,
this will be automatically determined by arguments of `func`. `param_names`
should be manually supplied when fitting a function that takes a variable
number of parameters.
kwargs : dictionary
Additional keyword arguments to passed to scipy curve_fit.
Returns
-------
curvefit_results : Dataset
A single dataset which contains:
[var]_curvefit_coefficients
The coefficients of the best fit.
[var]_curvefit_covariance
The covariance matrix of the coefficient estimates.
See also
--------
Dataset.polyfit
scipy.optimize.curve_fit
"""
from scipy.optimize import curve_fit
if p0 is None:
p0 = {}
if bounds is None:
bounds = {}
if kwargs is None:
kwargs = {}
if not reduce_dims:
reduce_dims_ = []
elif isinstance(reduce_dims, str) or not isinstance(reduce_dims, Iterable):
reduce_dims_ = [reduce_dims]
else:
reduce_dims_ = list(reduce_dims)
if (
isinstance(coords, str)
or isinstance(coords, xr.DataArray)
or not isinstance(coords, Iterable)
):
coords = [coords]
coords_ = [self[coord] if isinstance(coord, str) else coord for coord in coords]
# Determine whether any coords are dims on self
for coord in coords_:
reduce_dims_ += [c for c in self.dims if coord.equals(self[c])]
reduce_dims_ = list(set(reduce_dims_))
preserved_dims = list(set(self.dims) - set(reduce_dims_))
if not reduce_dims_:
raise ValueError(
"No arguments to `coords` were identified as a dimension on the calling "
"object, and no dims were supplied to `reduce_dims`. This would result "
"in fitting on scalar data."
)
# Broadcast all coords with each other
coords_ = xr.broadcast(*coords_)
coords_ = [
coord.broadcast_like(self, exclude=preserved_dims) for coord in coords_
]
params, func_args = _get_func_args(func, param_names)
param_defaults, bounds_defaults = _initialize_curvefit_params(
params, p0, bounds, func_args
)
n_params = len(params)
kwargs.setdefault("p0", [param_defaults[p] for p in params])
kwargs.setdefault(
"bounds",
[
[bounds_defaults[p][0] for p in params],
[bounds_defaults[p][1] for p in params],
],
)
def _wrapper(Y, *coords_, **kwargs):
# Wrap curve_fit with raveled coordinates and pointwise NaN handling
x = np.vstack([c.ravel() for c in coords_])
y = Y.ravel()
if skipna:
mask = np.all([np.any(~np.isnan(x), axis=0), ~np.isnan(y)], axis=0)
x = x[:, mask]
y = y[mask]
if not len(y):
popt = np.full([n_params], np.nan)
pcov = np.full([n_params, n_params], np.nan)
return popt, pcov
x = np.squeeze(x)
popt, pcov = curve_fit(func, x, y, **kwargs)
return popt, pcov
result = xr.Dataset()
for name, da in self.data_vars.items():
if name is xr.core.dataarray._THIS_ARRAY:
name = ""
else:
name = f"{str(name)}_"
popt, pcov = xr.apply_ufunc(
_wrapper,
da,
*coords_,
vectorize=True,
dask="parallelized",
input_core_dims=[reduce_dims_ for d in range(len(coords_) + 1)],
output_core_dims=[["param"], ["cov_i", "cov_j"]],
dask_gufunc_kwargs={
"output_sizes": {
"param": n_params,
"cov_i": n_params,
"cov_j": n_params,
},
},
output_dtypes=(np.float64, np.float64),
exclude_dims=set(reduce_dims_),
kwargs=kwargs,
)
result[name + "curvefit_coefficients"] = popt
result[name + "curvefit_covariance"] = pcov
result = result.assign_coords(
{"param": params, "cov_i": params, "cov_j": params}
)
result.attrs = self.attrs.copy()
return result
ops.inject_all_ops_and_reduce_methods(Dataset, array_only=False) | return sharedict.merge(*graphs.values())
|
api.py | from flask import Blueprint
from werkzeug.exceptions import NotFound, InternalServerError, TooManyRequests
from mldictionary_api.const import API_PREFIX
from mldictionary_api.resources.response import ResponseAPI
from mldictionary_api.resources.const import (
ENGLISH_REPR,
ENGLISH_TO_PORTUGUESE_REPR,
PORTUGUESE_REPR,
PORTUGUESE_TO_ENGLISH,
SPANISH_REPR,
)
api = Blueprint('mldictionary_api', __name__, url_prefix=API_PREFIX)
@api.route('/dictionary/en/<word>/')
def english(word: str):
return ResponseAPI().get_meanings(ENGLISH_REPR, word)
@api.route('/dictionary/pt/<word>/')
def portuguese(word: str):
return ResponseAPI().get_meanings(PORTUGUESE_REPR, word)
@api.route('/dictionary/es/<word>/')
def spanish(word: str):
return ResponseAPI().get_meanings(SPANISH_REPR, word)
@api.route('/translator/en-pt/<word>/')
def english_to_portuguese(word: str):
return ResponseAPI().get_meanings(ENGLISH_TO_PORTUGUESE_REPR, word)
@api.route('/translator/pt-en/<word>/')
def portuguese_to_english(word: str):
return ResponseAPI().get_meanings(PORTUGUESE_TO_ENGLISH, word)
@api.app_errorhandler(NotFound)
def not_found(err):
return ResponseAPI().handle_error(err)
@api.app_errorhandler(TooManyRequests)
def too_many_requests(err):
return ResponseAPI().handle_error(err)
@api.app_errorhandler(InternalServerError)
def internal_error(err):
return ResponseAPI().handle_error(err)
@api.app_errorhandler(Exception)
def general_exception(err):
err.description = 'Don\'t recognize erro'
err.code = 500 | return ResponseAPI().handle_error(err) |
|
types.rs | use super::{
constants::ConstantSolver, context::Context, Error, ErrorKind, Parser, Result, SourceMetadata,
};
use crate::{
proc::ResolveContext, ArraySize, Bytes, Constant, Expression, Handle, ImageClass,
ImageDimension, ScalarKind, Type, TypeInner, VectorSize,
};
pub fn parse_type(type_name: &str) -> Option<Type> {
match type_name {
"bool" => Some(Type {
name: None,
inner: TypeInner::Scalar {
kind: ScalarKind::Bool,
width: crate::BOOL_WIDTH,
},
}),
"float" => Some(Type {
name: None,
inner: TypeInner::Scalar {
kind: ScalarKind::Float,
width: 4,
},
}),
"double" => Some(Type {
name: None,
inner: TypeInner::Scalar {
kind: ScalarKind::Float,
width: 8,
},
}),
"int" => Some(Type {
name: None,
inner: TypeInner::Scalar {
kind: ScalarKind::Sint,
width: 4,
},
}),
"uint" => Some(Type {
name: None,
inner: TypeInner::Scalar {
kind: ScalarKind::Uint,
width: 4,
},
}),
"sampler" | "samplerShadow" => Some(Type {
name: None,
inner: TypeInner::Sampler {
comparison: type_name == "samplerShadow",
},
}),
word => {
fn kind_width_parse(ty: &str) -> Option<(ScalarKind, u8)> {
Some(match ty {
"" => (ScalarKind::Float, 4),
"b" => (ScalarKind::Bool, crate::BOOL_WIDTH),
"i" => (ScalarKind::Sint, 4),
"u" => (ScalarKind::Uint, 4),
"d" => (ScalarKind::Float, 8),
_ => return None,
})
}
fn size_parse(n: &str) -> Option<VectorSize> {
Some(match n {
"2" => VectorSize::Bi,
"3" => VectorSize::Tri,
"4" => VectorSize::Quad,
_ => return None,
})
}
let vec_parse = |word: &str| {
let mut iter = word.split("vec");
let kind = iter.next()?;
let size = iter.next()?;
let (kind, width) = kind_width_parse(kind)?;
let size = size_parse(size)?;
Some(Type {
name: None,
inner: TypeInner::Vector { size, kind, width },
})
};
let mat_parse = |word: &str| {
let mut iter = word.split("mat");
let kind = iter.next()?;
let size = iter.next()?;
let (_, width) = kind_width_parse(kind)?;
let (columns, rows) = if let Some(size) = size_parse(size) {
(size, size)
} else {
let mut iter = size.split('x');
match (iter.next()?, iter.next()?, iter.next()) {
(col, row, None) => (size_parse(col)?, size_parse(row)?),
_ => return None,
}
};
Some(Type {
name: None,
inner: TypeInner::Matrix {
columns,
rows,
width,
},
})
};
let texture_parse = |word: &str| {
let mut iter = word.split("texture");
let texture_kind = |ty| {
Some(match ty {
"" => ScalarKind::Float,
"i" => ScalarKind::Sint,
"u" => ScalarKind::Uint,
_ => return None,
})
};
let kind = iter.next()?;
let size = iter.next()?;
let kind = texture_kind(kind)?;
let sampled = |multi| ImageClass::Sampled { kind, multi };
let (dim, arrayed, class) = match size {
"1D" => (ImageDimension::D1, false, sampled(false)),
"1DArray" => (ImageDimension::D1, false, sampled(false)),
"2D" => (ImageDimension::D2, false, sampled(false)),
"2DArray" => (ImageDimension::D2, false, sampled(false)),
"2DMS" => (ImageDimension::D2, true, sampled(true)),
"2DMSArray" => (ImageDimension::D2, true, sampled(true)),
"3D" => (ImageDimension::D3, false, sampled(false)),
"Cube" => (ImageDimension::Cube, false, sampled(false)),
"CubeArray" => (ImageDimension::D2, false, sampled(false)),
_ => return None,
};
Some(Type {
name: None,
inner: TypeInner::Image {
dim,
arrayed,
class,
},
})
};
vec_parse(word)
.or_else(|| mat_parse(word))
.or_else(|| texture_parse(word))
}
}
}
pub fn scalar_components(ty: &TypeInner) -> Option<(ScalarKind, Bytes)> {
match *ty {
TypeInner::Scalar { kind, width } => Some((kind, width)),
TypeInner::Vector { kind, width, .. } => Some((kind, width)),
TypeInner::Matrix { width, .. } => Some((ScalarKind::Float, width)),
TypeInner::ValuePointer { kind, width, .. } => Some((kind, width)),
_ => None,
}
}
pub fn type_power(kind: ScalarKind, width: Bytes) -> Option<u32> {
Some(match kind {
ScalarKind::Sint => 0,
ScalarKind::Uint => 1,
ScalarKind::Float if width == 4 => 2,
ScalarKind::Float => 3,
ScalarKind::Bool => return None,
})
}
impl Parser {
pub(crate) fn typifier_grow(
&self,
ctx: &mut Context,
handle: Handle<Expression>,
meta: SourceMetadata,
) -> Result<()> {
let resolve_ctx = ResolveContext {
constants: &self.module.constants,
types: &self.module.types,
global_vars: &self.module.global_variables,
local_vars: &ctx.locals,
functions: &self.module.functions,
arguments: &ctx.arguments,
};
ctx.typifier
.grow(handle, &ctx.expressions, &resolve_ctx)
.map_err(|error| Error {
kind: ErrorKind::SemanticError(format!("Can't resolve type: {:?}", error).into()),
meta,
})
}
pub(crate) fn resolve_type<'b>(
&'b self,
ctx: &'b mut Context,
handle: Handle<Expression>,
meta: SourceMetadata,
) -> Result<&'b TypeInner> {
self.typifier_grow(ctx, handle, meta)?;
Ok(ctx.typifier.get(handle, &self.module.types))
}
/// Invalidates the cached type resolution for `handle` forcing a recomputation
pub(crate) fn invalidate_expression<'b>(
&'b self,
ctx: &'b mut Context,
handle: Handle<Expression>,
meta: SourceMetadata,
) -> Result<()> {
let resolve_ctx = ResolveContext {
constants: &self.module.constants,
types: &self.module.types,
global_vars: &self.module.global_variables,
local_vars: &ctx.locals,
functions: &self.module.functions,
arguments: &ctx.arguments,
};
ctx.typifier
.invalidate(handle, &ctx.expressions, &resolve_ctx)
.map_err(|error| Error {
kind: ErrorKind::SemanticError(format!("Can't resolve type: {:?}", error).into()),
meta,
})
}
pub(crate) fn | (
&mut self,
ctx: &Context,
root: Handle<Expression>,
meta: SourceMetadata,
) -> Result<Handle<Constant>> {
let mut solver = ConstantSolver {
types: &self.module.types,
expressions: &ctx.expressions,
constants: &mut self.module.constants,
};
solver.solve(root).map_err(|e| Error {
kind: e.into(),
meta,
})
}
pub(crate) fn maybe_array(
&mut self,
base: Handle<Type>,
meta: SourceMetadata,
array_specifier: Option<(ArraySize, SourceMetadata)>,
) -> Handle<Type> {
array_specifier
.map(|(size, size_meta)| {
self.module.types.fetch_or_append(
Type {
name: None,
inner: TypeInner::Array {
base,
size,
stride: self.module.types[base].inner.span(&self.module.constants),
},
},
meta.union(&size_meta).as_span(),
)
})
.unwrap_or(base)
}
}
| solve_constant |
graphic_manager.rs | /*
* Graphic Manager
* いずれはstdio.rsみたいなのを作ってそれのサブモジュールにしたい
*/
pub mod font;
pub mod frame_buffer_manager;
pub mod text_buffer_driver;
use self::font::FontManager;
use self::font::FontType;
use self::frame_buffer_manager::FrameBufferManager;
use self::text_buffer_driver::TextBufferDriver;
use crate::arch::target_arch::device::vga_text::VgaTextDriver;
use crate::kernel::drivers::multiboot::FrameBufferInfo;
use crate::kernel::manager_cluster::get_kernel_manager_cluster;
use crate::kernel::memory_manager::data_type::{Address, VAddress};
use crate::kernel::sync::spin_lock::{Mutex, SpinLockFlag};
use crate::kernel::tty::Writer;
use core::fmt;
pub struct GraphicManager {
lock: SpinLockFlag,
text: Mutex<VgaTextDriver>,
graphic: Mutex<FrameBufferManager>,
is_text_mode: bool,
font: Mutex<FontManager>,
cursor: Mutex<Cursor>,
is_font_loaded: bool,
}
struct Cursor {
x: usize,
y: usize,
}
impl GraphicManager {
pub const fn new() -> Self {
Self {
lock: SpinLockFlag::new(),
is_text_mode: false,
text: Mutex::new(VgaTextDriver::new()),
graphic: Mutex::new(FrameBufferManager::new()),
font: Mutex::new(FontManager::new()),
cursor: Mutex::new(Cursor { x: 0, y: 0 }),
is_font_loaded: false,
}
}
pub const fn is_text_mode(&self) -> bool {
self.is_text_mode
}
pub fn set_frame_buffer_memory_permission(&mut self) -> bool {
let _lock = self.lock.lock();
if self.is_text_mode {
self.text
.lock()
.unwrap()
.set_frame_buffer_memory_permission()
} else {
self.graphic
.lock()
.unwrap()
.set_frame_buffer_memory_permission()
}
}
pub fn init(&mut self, frame_buffer_info: &FrameBufferInfo) {
let _lock = self.lock.lock();
if !self
.graphic
.lock()
.unwrap()
.init_by_multiboot_information(frame_buffer_info)
{
self.text
.lock()
.unwrap()
.init_by_multiboot_information(frame_buffer_info);
self.is_text_mode = true;
}
}
pub fn clear_screen(&mut self) {
let _lock = self.lock.lock();
if self.is_text_mode {
self.text.lock().unwrap().clear_screen();
} else {
self.graphic.lock().unwrap().clear_screen();
}
}
pub fn load_font(
&mut self,
virtual_font_address: VAddress,
size: usize,
font_type: FontType,
) -> bool {
let _lock = self.lock.lock();
self.is_font_loaded = self
.font
.lock()
.unwrap()
.load(virtual_font_address, size, font_type);
self.is_font_loaded
}
fn draw_string(&self, s: &str, foreground_color: u32, background_color: u32) -> fmt::Result {
/* assume locked */
if !self.is_font_loaded {
return Err(fmt::Error {});
}
let mut cursor = self.cursor.lock().unwrap();
let mut font_manager = self.font.lock().unwrap();
let mut frame_buffer_manager = self.graphic.lock().unwrap();
let frame_buffer_size = frame_buffer_manager.get_frame_buffer_size();
for c in s.chars().into_iter() {
if c == '\n' {
cursor.x = 0;
cursor.y += font_manager.get_max_font_height();
} else if c == '\r' {
cursor.x = 0;
} else if c.is_control() {
} else {
let font_data = font_manager.get_font_data(c);
if font_data.is_none() {
continue;
}
let font_data = font_data.unwrap();
let font_bottom = font_manager.get_ascent() as isize - font_data.y_offset as isize;
let font_top = font_bottom as usize - font_data.height as usize;
let font_left = font_data.x_offset as usize;
if frame_buffer_size.0 < cursor.x + font_data.width as usize {
cursor.x = 0;
cursor.y += font_manager.get_max_font_height();
}
if frame_buffer_size.1 < cursor.y + font_manager.get_max_font_height() {
let scroll_y =
font_manager.get_max_font_height() + cursor.y - frame_buffer_size.1;
frame_buffer_manager.scroll_screen(scroll_y);
frame_buffer_manager.fill(
0,
frame_buffer_size.1 - scroll_y,
frame_buffer_size.0,
frame_buffer_size.1,
0,
); /* erase the last line */
cursor.y -= scroll_y;
}
frame_buffer_manager.write_monochrome_bitmap(
font_data.bitmap_address.to_usize(),
font_data.width as usize,
font_data.height as usize,
cursor.x + font_left,
cursor.y + font_top,
foreground_color,
background_color,
true,
);
cursor.x += font_data.device_width as usize;
}
}
Ok(())
}
pub fn puts(&self, string: &str, foreground_color: u32, background_color: u32) -> bool {
get_kernel_manager_cluster()
.serial_port_manager
.send_str(string);
let _lock = if let Ok(l) = self.lock.try_lock() {
l
} else {
return true;
};
if self.is_text_mode {
self.text.lock().unwrap().puts(string)
} else if self.is_font_loaded {
self.draw_string(string, foreground_color, background_color)
.is_ok()
} else {
true
}
}
pub fn get_frame_buffer_size(&self) -> (usize /*x*/, usize /*y*/) {
self.graphic.lock().unwrap().get_frame_buffer_size()
}
pub fn fill(&mut self, start_x: usize, start_y: usize, end_x: usize, end_y: usize, color: u32) {
if !self.is_text_mode {
let _lock = self.lock.lock();
self.graphic
.lock()
.unwrap()
.fill(start_x, start_y, end_x, end_y, color);
}
}
pub fn write_bitmap(
&mut self,
buffer: usize,
depth: u8,
size_x: usize,
size_y: usize,
offset_x: usize,
offset_y: usize,
) -> bool {
if !self.is_text_mode {
let _lock = self.lock.lock();
self.graphic
.lock()
.unwrap()
.write_bitmap(buffer, depth, size_x, size_y, offset_x, offset_y, false)
} else {
false
}
}
}
impl Writer for GraphicManager {
fn write(
&self,
buf: &[u8],
size_to_write: usize,
foreground_color: u32,
background_color: u32,
) -> fmt::Result {
use core::str;
if let Ok(s) = str::f | rom_utf8(buf.split_at(size_to_write).0) {
if self.puts(s, foreground_color, background_color) {
//適当
Ok(())
} else {
Err(fmt::Error {})
}
} else {
Err(fmt::Error {})
}
}
}
|
|
toml_serialize.rs | use std::collections::HashMap;
use toml;
use crate::models::bookmark::Bookmark;
use crate::utils::error::*;
pub fn | (store : HashMap<String, String> ) -> String {
let mut store_map : HashMap<String, Vec<Bookmark>> = HashMap::new();
let mut bookmark_vec: Vec<Bookmark> = Vec::new();
for val in store {
let bm = Bookmark::new(val.0, val.1);
bookmark_vec.push(bm);
}
store_map.insert("bookmark".into(), bookmark_vec);
let toml = toml::to_string(&store_map);
match toml {
Ok(str) => { return str; }
Err(err) => {
print_error_and_exit(format!("Serialization error of store: {}", err.to_string()),
ErrorCode::StoreFileSerializationError);
}
}
return "".into();
} | create_store_content |
refs.rs | //! `refs` or the references of dag-pb and other supported IPLD formats functionality.
use crate::ipld::{decode_ipld, Ipld};
use crate::{Block, Ipfs, IpfsTypes};
use async_stream::stream;
use cid::{self, Cid};
use futures::stream::Stream;
use std::borrow::Borrow;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::fmt;
/// Represents a single link in an IPLD tree encountered during a `refs` walk.
#[derive(Clone, PartialEq, Eq)]
pub struct Edge {
/// Source document which links to [`Edge::destination`]
pub source: Cid,
/// The destination document
pub destination: Cid,
/// The name of the link, in case of dag-pb
pub name: Option<String>,
}
impl fmt::Debug for Edge {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"Edge {{ source: {}, destination: {}, name: {:?} }}",
self.source, self.destination, self.name
)
}
}
#[derive(Debug, thiserror::Error)]
pub enum IpldRefsError {
#[error("nested ipld document parsing failed")]
Block(#[from] crate::ipld::BlockError),
#[error("loading failed")]
Loading(#[from] crate::Error),
#[error("block not found locally: {}", .0)]
BlockNotFound(Cid),
}
pub(crate) struct IpldRefs {
max_depth: Option<u64>,
unique: bool,
download_blocks: bool,
}
impl Default for IpldRefs {
fn default() -> Self {
IpldRefs {
max_depth: None, // unlimited
unique: false,
download_blocks: true,
}
}
}
impl IpldRefs {
/// Overrides the default maximum depth of "unlimited" with the given maximum depth. Zero is
/// allowed and will result in an empty stream.
#[allow(dead_code)]
pub fn with_max_depth(mut self, depth: u64) -> IpldRefs {
self.max_depth = Some(depth);
self
}
/// Overrides the default of returning all links by supressing the links which have already
/// been reported once.
pub fn with_only_unique(mut self) -> IpldRefs {
self.unique = true;
self
}
/// Overrides the default of allowing the refs operation to fetch blocks. Useful at least
/// internally in rust-ipfs to implement pinning recursively. This changes the stream's
/// behaviour to stop on first block which is not found locally.
pub fn with_existing_blocks(mut self) -> IpldRefs {
self.download_blocks = false;
self
}
pub fn refs_of_resolved<'a, Types, MaybeOwned, Iter>(
self,
ipfs: MaybeOwned,
iplds: Iter,
) -> impl Stream<Item = Result<Edge, IpldRefsError>> + Send + 'a
where
Types: IpfsTypes,
MaybeOwned: Borrow<Ipfs<Types>> + Send + 'a,
Iter: IntoIterator<Item = (Cid, Ipld)> + Send + 'a,
{
iplds_refs_inner(ipfs, iplds, self)
}
}
/// Gather links as edges between two documents from all of the `iplds` which represent the
/// document and it's original `Cid`, as the `Ipld` can be a subtree of the document.
///
/// This stream does not stop on **error**.
///
/// # Differences from other implementations
///
/// `js-ipfs` does seem to do a recursive descent on all links. Looking at the tests it would
/// appear that `go-ipfs` implements this in similar fashion. This implementation is breadth-first
/// to be simpler at least.
///
/// Related: https://github.com/ipfs/js-ipfs/pull/2982
///
/// # Lifetime of returned stream
///
/// Depending on how this function is called, the lifetime will be tied to the lifetime of given
/// `&Ipfs` or `'static` when given ownership of `Ipfs`.
pub fn iplds_refs<'a, Types, MaybeOwned, Iter>(
ipfs: MaybeOwned,
iplds: Iter,
max_depth: Option<u64>,
unique: bool,
) -> impl Stream<Item = Result<Edge, crate::ipld::BlockError>> + Send + 'a
where
Types: IpfsTypes,
MaybeOwned: Borrow<Ipfs<Types>> + Send + 'a,
Iter: IntoIterator<Item = (Cid, Ipld)> + Send + 'a,
{
use futures::stream::TryStreamExt;
let opts = IpldRefs {
max_depth,
unique,
download_blocks: true,
};
iplds_refs_inner(ipfs, iplds, opts).map_err(|e| match e {
IpldRefsError::Block(e) => e,
x => unreachable!(
"iplds_refs_inner should not return other errors for download_blocks: false; {}",
x
),
})
}
fn iplds_refs_inner<'a, Types, MaybeOwned, Iter>(
ipfs: MaybeOwned,
iplds: Iter,
opts: IpldRefs,
) -> impl Stream<Item = Result<Edge, IpldRefsError>> + Send + 'a
where
Types: IpfsTypes,
MaybeOwned: Borrow<Ipfs<Types>> + Send + 'a,
Iter: IntoIterator<Item = (Cid, Ipld)>,
{
let mut work = VecDeque::new();
let mut queued_or_visited = HashSet::new();
let IpldRefs {
max_depth,
unique,
download_blocks,
} = opts;
let empty_stream = max_depth.map(|n| n == 0).unwrap_or(false);
// double check the max_depth before filling the work and queued_or_visited up just in case we
// are going to be returning an empty stream
if !empty_stream {
// not building these before moving the work and hashset into the stream would impose
// apparently impossible bounds on `Iter`, in addition to `Send + 'a`.
for (origin, ipld) in iplds {
for (link_name, next_cid) in ipld_links(&origin, ipld) {
if unique && !queued_or_visited.insert(next_cid.clone()) {
trace!("skipping already queued {}", next_cid);
continue;
}
work.push_back((0, next_cid, origin.clone(), link_name));
}
}
}
stream! {
if empty_stream {
return;
}
while let Some((depth, cid, source, link_name)) = work.pop_front() {
let traverse_links = match max_depth {
Some(d) if d <= depth => {
// important to continue instead of stopping
continue;
},
// no need to list links which would be filtered out
Some(d) if d + 1 == depth => false,
_ => true
};
// if this is not bound to a local variable it'll introduce a Sync requirement on
// `MaybeOwned` which we don't necessarily need.
let borrowed = ipfs.borrow();
let data = if download_blocks {
match borrowed.get_block(&cid).await {
Ok(Block { data, .. }) => data,
Err(e) => {
warn!("failed to load {}, linked from {}: {}", cid, source, e);
// TODO: yield error msg
// unsure in which cases this happens, because we'll start to search the content
// and stop only when request has been cancelled (FIXME: no way to stop this
// operation)
continue;
}
}
} else {
match borrowed.repo.get_block_now(&cid).await {
Ok(Some(Block { data, .. })) => data,
Ok(None) => {
yield Err(IpldRefsError::BlockNotFound(cid.to_owned()));
return;
}
Err(e) => {
yield Err(IpldRefsError::from(e));
return;
}
}
};
trace!(cid = %cid, "loaded next");
let ipld = match decode_ipld(&cid, &data) {
Ok(ipld) => ipld,
Err(e) => {
warn!(cid = %cid, source = %cid, "failed to parse: {}", e);
// go-ipfs on raw Qm hash:
// > failed to decode Protocol Buffers: incorrectly formatted merkledag node: unmarshal failed. proto: illegal wireType 6
yield Err(e.into());
continue;
}
};
if traverse_links {
for (link_name, next_cid) in ipld_links(&cid, ipld) {
if unique && !queued_or_visited.insert(next_cid.clone()) {
trace!(queued = %next_cid, "skipping already queued");
continue;
}
work.push_back((depth + 1, next_cid, cid.clone(), link_name));
}
}
yield Ok(Edge { source, destination: cid, name: link_name });
}
}
}
fn ipld_links(
cid: &Cid,
ipld: Ipld,
) -> impl Iterator<Item = (Option<String>, Cid)> + Send + 'static {
// a wrapping iterator without there being a libipld_base::IpldIntoIter might not be doable
// with safe code
let items = if cid.codec() == cid::Codec::DagProtobuf {
dagpb_links(ipld)
} else {
ipld.iter()
.filter_map(|val| match val {
Ipld::Link(cid) => Some(cid),
_ => None,
})
.cloned()
// only dag-pb ever has any link names, probably because in cbor the "name" on the LHS
// might have a different meaning from a "link name" in dag-pb ... Doesn't seem
// immediatedly obvious why this is done.
.map(|cid| (None, cid))
.collect::<Vec<(Option<String>, Cid)>>()
};
items.into_iter()
}
/// Special handling for the structure created while loading dag-pb as ipld.
///
/// # Panics
///
/// If the dag-pb ipld tree doesn't conform to expectations, as in, we are out of sync with the
/// libipld crate. This is on purpose.
fn dagpb_links(ipld: Ipld) -> Vec<(Option<String>, Cid)> |
#[cfg(test)]
mod tests {
use super::{ipld_links, iplds_refs, Edge};
use crate::ipld::{decode_ipld, validate};
use crate::{Block, Node};
use cid::Cid;
use futures::stream::TryStreamExt;
use hex_literal::hex;
use std::collections::HashSet;
use std::convert::TryFrom;
#[test]
fn dagpb_links() {
// this is the same as in ipfs-http::v0::refs::path::tests::walk_dagpb_links
let payload = hex!(
"12330a2212206aad27d7e2fc815cd15bf679535062565dc927a831547281
fc0af9e5d7e67c74120b6166726963616e2e747874180812340a221220fd
36ac5279964db0cba8f7fa45f8c4c44ef5e2ff55da85936a378c96c9c632
04120c616d6572696361732e747874180812360a2212207564c20415869d
77a8a40ca68a9158e397dd48bdff1325cdb23c5bcd181acd17120e617573
7472616c69616e2e7478741808"
);
let cid = Cid::try_from("QmbrFTo4s6H23W6wmoZKQC2vSogGeQ4dYiceSqJddzrKVa").unwrap();
let decoded = decode_ipld(&cid, &payload).unwrap();
let links = ipld_links(&cid, decoded)
.map(|(name, _)| name.unwrap())
.collect::<Vec<_>>();
assert_eq!(links, ["african.txt", "americas.txt", "australian.txt",]);
}
#[tokio::test(max_threads = 1)]
async fn all_refs_from_root() {
let Node { ipfs, .. } = preloaded_testing_ipfs().await;
let (root, dag0, unixfs0, dag1, unixfs1) = (
// this is the dag with content: [dag0, unixfs0, dag1, unixfs1]
"bafyreihpc3vupfos5yqnlakgpjxtyx3smkg26ft7e2jnqf3qkyhromhb64",
// {foo: dag1, bar: unixfs0}
"bafyreidquig3arts3bmee53rutt463hdyu6ff4zeas2etf2h2oh4dfms44",
"QmPJ4A6Su27ABvvduX78x2qdWMzkdAYxqeH5TVrHeo3xyy",
// {foo: unixfs1}
"bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily",
"QmRgutAxd8t7oGkSm4wmeuByG6M51wcTso6cubDdQtuEfL",
);
let root_block = ipfs.get_block(&Cid::try_from(root).unwrap()).await.unwrap();
let ipld = decode_ipld(root_block.cid(), root_block.data()).unwrap();
let all_edges: Vec<_> = iplds_refs(ipfs, vec![(root_block.cid, ipld)], None, false)
.map_ok(
|Edge {
source,
destination,
..
}| (source.to_string(), destination.to_string()),
)
.try_collect()
.await
.unwrap();
// not sure why go-ipfs outputs this order, this is more like dfs?
let expected = [
(root, dag0),
(dag0, unixfs0),
(dag0, dag1),
(dag1, unixfs1),
(root, unixfs0),
(root, dag1),
(dag1, unixfs1),
(root, unixfs1),
];
println!("found edges:\n{:#?}", all_edges);
assert_edges(&expected, all_edges.as_slice());
}
#[tokio::test(max_threads = 1)]
async fn all_unique_refs_from_root() {
let Node { ipfs, .. } = preloaded_testing_ipfs().await;
let (root, dag0, unixfs0, dag1, unixfs1) = (
// this is the dag with content: [dag0, unixfs0, dag1, unixfs1]
"bafyreihpc3vupfos5yqnlakgpjxtyx3smkg26ft7e2jnqf3qkyhromhb64",
// {foo: dag1, bar: unixfs0}
"bafyreidquig3arts3bmee53rutt463hdyu6ff4zeas2etf2h2oh4dfms44",
"QmPJ4A6Su27ABvvduX78x2qdWMzkdAYxqeH5TVrHeo3xyy",
// {foo: unixfs1}
"bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily",
"QmRgutAxd8t7oGkSm4wmeuByG6M51wcTso6cubDdQtuEfL",
);
let root_block = ipfs.get_block(&Cid::try_from(root).unwrap()).await.unwrap();
let ipld = decode_ipld(root_block.cid(), root_block.data()).unwrap();
let destinations: HashSet<_> = iplds_refs(ipfs, vec![(root_block.cid, ipld)], None, true)
.map_ok(|Edge { destination, .. }| destination.to_string())
.try_collect()
.await
.unwrap();
// go-ipfs output:
// bafyreihpc3vupfos5yqnlakgpjxtyx3smkg26ft7e2jnqf3qkyhromhb64 -> bafyreidquig3arts3bmee53rutt463hdyu6ff4zeas2etf2h2oh4dfms44
// bafyreihpc3vupfos5yqnlakgpjxtyx3smkg26ft7e2jnqf3qkyhromhb64 -> QmPJ4A6Su27ABvvduX78x2qdWMzkdAYxqeH5TVrHeo3xyy
// bafyreihpc3vupfos5yqnlakgpjxtyx3smkg26ft7e2jnqf3qkyhromhb64 -> bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily
// bafyreihpc3vupfos5yqnlakgpjxtyx3smkg26ft7e2jnqf3qkyhromhb64 -> QmRgutAxd8t7oGkSm4wmeuByG6M51wcTso6cubDdQtuEfL
let expected = [dag0, unixfs0, dag1, unixfs1]
.iter()
.map(|&s| String::from(s))
.collect::<HashSet<_>>();
let diff = destinations
.symmetric_difference(&expected)
.map(|s| s.as_str())
.collect::<Vec<&str>>();
assert!(diff.is_empty(), "{:?}", diff);
}
fn assert_edges(expected: &[(&str, &str)], actual: &[(String, String)]) {
let expected: HashSet<_> = expected.iter().map(|&(a, b)| (a, b)).collect();
let actual: HashSet<_> = actual
.iter()
.map(|(a, b)| (a.as_str(), b.as_str()))
.collect();
let diff: Vec<_> = expected.symmetric_difference(&actual).collect();
assert!(diff.is_empty(), "{:#?}", diff);
}
async fn preloaded_testing_ipfs() -> Node {
let ipfs = Node::new("test_node").await;
let blocks = [
(
// echo -n '{ "foo": { "/": "bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily" }, "bar": { "/": "QmPJ4A6Su27ABvvduX78x2qdWMzkdAYxqeH5TVrHeo3xyy" } }' | /ipfs dag put
"bafyreidquig3arts3bmee53rutt463hdyu6ff4zeas2etf2h2oh4dfms44",
&hex!("a263626172d82a58230012200e317512b6f9f86e015a154cb97a9ddcdc7e372cccceb3947921634953c6537463666f6fd82a58250001711220354d455ff3a641b8cac25c38a77e64aa735dc8a48966a60f1a78caa172a4885e")[..]
),
(
// echo barfoo > file2 && ipfs add file2
"QmPJ4A6Su27ABvvduX78x2qdWMzkdAYxqeH5TVrHeo3xyy",
&hex!("0a0d08021207626172666f6f0a1807")[..]
),
(
// echo -n '{ "foo": { "/": "QmRgutAxd8t7oGkSm4wmeuByG6M51wcTso6cubDdQtuEfL" } }' | ipfs dag put
"bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily",
&hex!("a163666f6fd82a582300122031c3d57080d8463a3c63b2923df5a1d40ad7a73eae5a14af584213e5f504ac33")[..]
),
(
// echo foobar > file1 && ipfs add file1
"QmRgutAxd8t7oGkSm4wmeuByG6M51wcTso6cubDdQtuEfL",
&hex!("0a0d08021207666f6f6261720a1807")[..]
),
(
// echo -e '[{"/":"bafyreidquig3arts3bmee53rutt463hdyu6ff4zeas2etf2h2oh4dfms44"},{"/":"QmPJ4A6Su27ABvvduX78x2qdWMzkdAYxqeH5TVrHeo3xyy"},{"/":"bafyreibvjvcv745gig4mvqs4hctx4zfkono4rjejm2ta6gtyzkqxfjeily"},{"/":"QmRgutAxd8t7oGkSm4wmeuByG6M51wcTso6cubDdQtuEfL"}]' | ./ipfs dag put
"bafyreihpc3vupfos5yqnlakgpjxtyx3smkg26ft7e2jnqf3qkyhromhb64",
&hex!("84d82a5825000171122070a20db04672d858427771a4e7cf6ce3c53c52f32404b4499747d38fc19592e7d82a58230012200e317512b6f9f86e015a154cb97a9ddcdc7e372cccceb3947921634953c65374d82a58250001711220354d455ff3a641b8cac25c38a77e64aa735dc8a48966a60f1a78caa172a4885ed82a582300122031c3d57080d8463a3c63b2923df5a1d40ad7a73eae5a14af584213e5f504ac33")[..]
)
];
for (cid_str, data) in blocks.iter() {
let cid = Cid::try_from(*cid_str).unwrap();
validate(&cid, &data).unwrap();
decode_ipld(&cid, &data).unwrap();
let block = Block {
cid,
data: (*data).into(),
};
ipfs.put_block(block).await.unwrap();
}
ipfs
}
}
| {
let links = match ipld {
Ipld::Map(mut m) => m.remove("Links"),
// lets assume this means "no links"
_ => return Vec::new(),
};
let links = match links {
Some(Ipld::List(v)) => v,
x => panic!("Expected dag-pb2ipld \"Links\" to be a list, got: {:?}", x),
};
links
.into_iter()
.enumerate()
.filter_map(|(i, ipld)| {
match ipld {
Ipld::Map(mut m) => {
let link = match m.remove("Hash") {
Some(Ipld::Link(cid)) => cid,
Some(x) => panic!(
"Expected dag-pb2ipld \"Links[{}]/Hash\" to be a link, got: {:?}",
i, x
),
None => return None,
};
let name = match m.remove("Name") {
// not sure of this, not covered by tests, though these are only
// present for multi-block files so maybe it's better to panic
Some(Ipld::String(s)) if s == "/" => {
unimplemented!("Slashes as the name of link")
}
Some(Ipld::String(s)) => Some(s),
Some(x) => panic!(
"Expected dag-pb2ipld \"Links[{}]/Name\" to be a string, got: {:?}",
i, x
),
// not too sure of this, this could be the index as string as well?
None => unimplemented!(
"Default name for dag-pb2ipld links, should it be index?"
),
};
Some((name, link))
}
x => panic!(
"Expected dag-pb2ipld \"Links[{}]\" to be a map, got: {:?}",
i, x
),
}
})
.collect()
} |
getLinkedService.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20180601
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func LookupLinkedService(ctx *pulumi.Context, args *LookupLinkedServiceArgs, opts ...pulumi.InvokeOption) (*LookupLinkedServiceResult, error) {
var rv LookupLinkedServiceResult
err := ctx.Invoke("azure-nextgen:datafactory/v20180601:getLinkedService", args, &rv, opts...)
if err != nil |
return &rv, nil
}
type LookupLinkedServiceArgs struct {
// The factory name.
FactoryName string `pulumi:"factoryName"`
// The linked service name.
LinkedServiceName string `pulumi:"linkedServiceName"`
// The resource group name.
ResourceGroupName string `pulumi:"resourceGroupName"`
}
// Linked service resource type.
type LookupLinkedServiceResult struct {
// Etag identifies change in the resource.
Etag string `pulumi:"etag"`
// The resource name.
Name string `pulumi:"name"`
// Properties of linked service.
Properties interface{} `pulumi:"properties"`
// The resource type.
Type string `pulumi:"type"`
}
| {
return nil, err
} |
datasets.py | import fnmatch, warnings, json, os
import numpy as np
from six import string_types
from tinydb.storages import MemoryStorage
from tinydb import where
from espei.utils import PickleableTinyDB
from espei.core_utils import recursive_map
class DatasetError(Exception):
"""Exception raised when datasets are invalid."""
pass
def check_dataset(dataset):
"""Ensure that the dataset is valid and consistent.
Currently supports the following validation checks:
* data shape is valid
* phases and components used match phases and components entered
* individual shapes of keys, such as ZPF, sublattice configs and site ratios
Planned validation checks:
* all required keys are present
Note that this follows some of the implicit assumptions in ESPEI at the time
of writing, such that conditions are only P, T, configs for single phase and
essentially only T for ZPF data.
Parameters
----------
dataset : dict
Dictionary of the standard ESPEI dataset.
Returns
-------
None
Raises
------
DatasetError
If an error is found in the dataset
"""
is_activity = dataset['output'].startswith('ACR')
is_zpf = dataset['output'] == 'ZPF'
is_single_phase = (not is_zpf) and (not is_activity)
components = dataset['components']
conditions = dataset['conditions']
values = dataset['values']
phases = dataset['phases']
if is_single_phase:
solver = dataset['solver']
sublattice_configurations = solver['sublattice_configurations']
sublattice_site_ratios = solver['sublattice_site_ratios']
sublattice_occupancies = solver.get('sublattice_occupancies', None)
# check for mixing
is_mixing = any([any([isinstance(subl, list) for subl in config]) for config in sublattice_configurations])
# pad the values of sublattice occupancies if there is no mixing
if sublattice_occupancies is None and not is_mixing:
sublattice_occupancies = [None]*len(sublattice_configurations)
elif sublattice_occupancies is None:
raise DatasetError('At least one sublattice in the following sublattice configurations is mixing, but the "sublattice_occupancies" key is empty: {}'.format(sublattice_configurations))
if is_activity:
conditions = dataset['conditions']
ref_state = dataset['reference_state']
comp_conditions = {k: v for k, v in conditions.items() if k.startswith('X_')}
# check that the shape of conditions match the values
num_pressure = np.atleast_1d(conditions['P']).size
num_temperature = np.atleast_1d(conditions['T']).size
if is_activity:
values_shape = np.array(values).shape
# check each composition condition is the same shape
num_x_conds = [len(v) for _, v in comp_conditions.items()]
if num_x_conds.count(num_x_conds[0]) != len(num_x_conds):
raise DatasetError('All compositions in conditions are not the same shape. Note that conditions cannot be broadcast. Composition conditions are {}'.format(comp_conditions))
conditions_shape = (num_pressure, num_temperature, num_x_conds[0])
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (P, T, compositions): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
elif is_single_phase:
values_shape = np.array(values).shape
num_configs = len(dataset['solver']['sublattice_configurations'])
conditions_shape = (num_pressure, num_temperature, num_configs)
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (P, T, configs): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
elif is_zpf:
values_shape = (len(values))
conditions_shape = (num_temperature)
if conditions_shape != values_shape:
raise DatasetError('Shape of conditions (T): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))
# check that all of the correct phases are present
if is_zpf:
phases_entered = set(phases)
phases_used = set()
for zpf in values:
for tieline in zpf:
phases_used.add(tieline[0])
if len(phases_entered - phases_used) > 0:
raise DatasetError('Phases entered {} do not match phases used {}.'.format(phases_entered, phases_used))
# check that all of the components used match the components entered
components_entered = set(components)
components_used = set()
if is_single_phase:
for config in sublattice_configurations:
for sl in config:
if isinstance(sl, list):
components_used.update(set(sl))
else:
components_used.add(sl)
comp_dof = 0
elif is_activity:
components_used.update({c.split('_')[1] for c in comp_conditions.keys()})
# mass balance of components
comp_dof = len(comp_conditions.keys())
elif is_zpf:
for zpf in values:
for tieline in zpf:
tieline_comps = set(tieline[1])
components_used.update(tieline_comps)
if len(components_entered - tieline_comps - {'VA'}) != 1:
raise DatasetError('Degree of freedom error for entered components {} in tieline {} of ZPF {}'.format(components_entered, tieline, zpf))
# handle special case of mass balance in ZPFs
comp_dof = 1
if len(components_entered - components_used - {'VA'}) > comp_dof or len(components_used - components_entered) > 0:
raise DatasetError('Components entered {} do not match components used {}.'.format(components_entered, components_used))
# check that the ZPF values are formatted properly
if is_zpf:
for zpf in values:
for tieline in zpf:
phase = tieline[0]
component_list = tieline[1]
mole_fraction_list = tieline[2]
# check that the phase is a string, components a list of strings,
# and the fractions are a list of float
if not isinstance(phase, string_types):
raise DatasetError('The first element in the tieline {} for the ZPF point {} should be a string. Instead it is a {} of value {}'.format(tieline, zpf, type(phase), phase))
if not all([isinstance(comp, string_types) for comp in component_list]):
raise DatasetError('The second element in the tieline {} for the ZPF point {} should be a list of strings. Instead it is a {} of value {}'.format(tieline, zpf, type(component_list), component_list))
if not all([(isinstance(mole_frac, (int, float)) or mole_frac is None) for mole_frac in mole_fraction_list]):
raise DatasetError('The last element in the tieline {} for the ZPF point {} should be a list of numbers. Instead it is a {} of value {}'.format(tieline, zpf, type(mole_fraction_list), mole_fraction_list))
# check that the shape of components list and mole fractions list is the same
if len(component_list) != len(mole_fraction_list):
raise DatasetError('The length of the components list and mole fractions list in tieline {} for the ZPF point {} should be the same.'.format(tieline, zpf))
# check that all mole fractions are less than one
mf_sum = np.nansum(np.array(mole_fraction_list, dtype=np.float))
if any([mf is not None for mf in mole_fraction_list]) and mf_sum > 1.0:
raise DatasetError('Mole fractions for tieline {} for the ZPF point {} sum to greater than one.'.format(tieline, zpf))
# check that the site ratios are valid as well as site occupancies, if applicable
if is_single_phase:
nconfigs = len(sublattice_configurations)
noccupancies = len(sublattice_occupancies)
if nconfigs != noccupancies:
raise DatasetError('Number of sublattice configurations ({}) does not match the number of sublattice occupancies ({})'.format(nconfigs, noccupancies))
for configuration, occupancy in zip(sublattice_configurations, sublattice_occupancies):
if len(configuration) != len(sublattice_site_ratios):
raise DatasetError('Sublattice configuration {} and sublattice site ratio {} describe different numbers of sublattices ({} and {}).'.format(configuration, sublattice_site_ratios, len(configuration), len(sublattice_site_ratios)))
if is_mixing:
configuration_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in configuration)
occupancy_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in occupancy)
if configuration_shape != occupancy_shape:
raise DatasetError('The shape of sublattice configuration {} ({}) does not match the shape of occupancies {} ({})'.format(configuration, configuration_shape, occupancy, occupancy_shape))
# check that sublattice interactions are in sorted. Related to sorting in espei.core_utils.get_samples
for subl in configuration:
if isinstance(subl, (list, tuple)) and sorted(subl) != subl:
raise DatasetError('Sublattice {} in configuration {} is must be sorted in alphabetic order ({})'.format(subl, configuration, sorted(subl)))
def clean_dataset(dataset):
"""
Clean an ESPEI dataset dictionary.
Parameters
----------
dataset : dict
Dictionary of the standard ESPEI dataset. dataset : dic
Returns
-------
dict
Modified dataset that has been cleaned
Notes
-----
Assumes a valid, checked dataset. Currently handles
* Converting expected numeric values to floats
"""
dataset["conditions"] = {k: recursive_map(float, v) for k, v in dataset["conditions"].items()}
solver = dataset.get("solver")
if solver is not None:
solver["sublattice_site_ratios"] = recursive_map(float, solver["sublattice_site_ratios"])
occupancies = solver.get("sublattice_occupancies")
if occupancies is not None:
solver["sublattice_occupancies"] = recursive_map(float, occupancies)
if dataset["output"] == "ZPF":
values = dataset["values"]
new_values = []
for tieline in values:
new_tieline = []
for tieline_point in tieline:
if all([comp is None for comp in tieline_point[2]]):
# this is a null tieline point
new_tieline.append(tieline_point)
else:
new_tieline.append([tieline_point[0], tieline_point[1], recursive_map(float, tieline_point[2])])
new_values.append(new_tieline)
dataset["values"] = new_values
else:
# values should be all numerical
dataset["values"] = recursive_map(float, dataset["values"])
return dataset
def apply_tags(datasets, tags):
|
def add_ideal_exclusions(datasets):
"""
If there are single phase datasets present and none of them have an
`excluded_model_contributions` key, add ideal exclusions automatically and
emit a DeprecationWarning that this feature will be going away.
Parameters
----------
datasets : PickleableTinyDB
Returns
-------
PickleableTinyDB
"""
all_single_phase = datasets.search(where('solver').exists())
no_exclusions = datasets.search(where('solver').exists() & (~where('excluded_model_contributions').exists()))
if len(all_single_phase) > 0 and len(all_single_phase) == len(no_exclusions):
idmix_warning = "Single phase datasets are present, but there are no specified `excluded_model_contributions` keys present. " + \
"'idmix' exclusion will be added automatically for backwards compatibility, but this will go away in ESPEI v0.8. " + \
"If you want ideal mixing contributions to be excluded, see the documentation for building datasets: http://espei.org/en/latest/input_data.html"
warnings.warn(idmix_warning, DeprecationWarning)
print(idmix_warning)
import espei
if int(espei.__version__.split('.')[1]) >= 8 or int(espei.__version__.split('.')[0]) > 0:
raise ValueError("ESPEI developer: remove the automatic addition of ideal mixing exclusions")
for ds in all_single_phase:
ds['excluded_model_contributions'] = ['idmix']
datasets.write_back(all_single_phase)
return datasets
def load_datasets(dataset_filenames):
"""
Create a PickelableTinyDB with the data from a list of filenames.
Parameters
----------
dataset_filenames : [str]
List of filenames to load as datasets
Returns
-------
PickleableTinyDB
"""
ds_database = PickleableTinyDB(storage=MemoryStorage)
for fname in dataset_filenames:
with open(fname) as file_:
try:
d = json.load(file_)
check_dataset(d)
ds_database.insert(clean_dataset(d))
except ValueError as e:
raise ValueError('JSON Error in {}: {}'.format(fname, e))
except DatasetError as e:
raise DatasetError('Dataset Error in {}: {}'.format(fname, e))
return ds_database
def recursive_glob(start, pattern='*.json'):
"""
Recursively glob for the given pattern from the start directory.
Parameters
----------
start : str
Path of the directory to walk while for file globbing
pattern : str
Filename pattern to match in the glob.
Returns
-------
[str]
List of matched filenames
"""
matches = []
for root, dirnames, filenames in os.walk(start):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return sorted(matches)
| """
Modify datasets using the tags system
Parameters
----------
datasets : PickleableTinyDB
Datasets to modify
tags : dict
Dictionary of {tag: update_dict}
Returns
-------
PickleableTinyDB
Notes
-----
In general, everything replaces or is additive. We use the following update rules:
1. If the update value is a list, extend the existing list (empty list if key does not exist)
2. If the update value is scalar, override the previous (deleting any old value, if present)
3. If the update value is a dict, update the exist dict (empty dict if dict does not exist)
4. Otherwise, the value is updated, overriding the previous
Examples
--------
>>> from espei.utils import PickleableTinyDB
>>> from tinydb.storages import MemoryStorage
>>> ds = PickleableTinyDB(storage=MemoryStorage)
>>> doc_id = ds.insert({'tags': ['dft'], 'excluded_model_contributions': ['contrib']})
>>> my_tags = {'dft': {'excluded_model_contributions': ['idmix', 'mag'], 'weight': 5.0}}
>>> from espei.datasets import apply_tags
>>> apply_tags(ds, my_tags)
>>> all_data = ds.all()
>>> all(d['excluded_model_contributions'] == ['contrib', 'idmix', 'mag'] for d in all_data)
True
>>> all(d['weight'] == 5.0 for d in all_data)
True
"""
for tag, update_dict in tags.items():
matching_datasets = datasets.search(where("tags").test(lambda x: tag in x))
for newkey, newval in update_dict.items():
for match in matching_datasets:
if isinstance(newval, list):
match[newkey] = match.get(newkey, []) + newval
elif np.isscalar(newval):
match[newkey] = newval
elif isinstance(newval, dict):
d = match.get(newkey, dict())
d.update(newval)
match[newkey] = d
else:
match[newkey] = newval
datasets.write_back(matching_datasets) |
adapter_test.go | package server
import (
"context"
"crypto/tls"
"fmt"
"io"
"net"
"reflect"
"strings"
"testing"
"time"
"mosn.io/api"
v2 "mosn.io/mosn/pkg/config/v2"
"mosn.io/mosn/pkg/log"
"mosn.io/mosn/pkg/metrics"
"mosn.io/mosn/pkg/types"
"mosn.io/pkg/buffer"
)
const testServerName = "test_server"
func setup() {
handler := NewHandler(&mockClusterManagerFilter{}, &mockClusterManager{})
initListenerAdapterInstance(testServerName, handler)
}
func tearDown() {
for _, handler := range listenerAdapterInstance.connHandlerMap {
handler.StopListeners(context.Background(), true)
}
listenerAdapterInstance = nil
}
func baseListenerConfig(addrStr string, name string) *v2.Listener {
// add a new listener
addr, _ := net.ResolveTCPAddr("tcp", addrStr)
return &v2.Listener{
ListenerConfig: v2.ListenerConfig{
Name: name,
BindToPort: true,
FilterChains: []v2.FilterChain{
{
FilterChainConfig: v2.FilterChainConfig{
Filters: []v2.Filter{
{
Type: "mock_network",
},
},
},
TLSContexts: []v2.TLSConfig{
v2.TLSConfig{
Status: true,
CACert: mockCAPEM,
CertChain: mockCertPEM,
PrivateKey: mockKeyPEM,
},
},
},
},
StreamFilters: []v2.Filter{
{
Type: "mock_stream",
},
}, //no stream filters parsed, but the config still exists for test
},
Addr: addr,
PerConnBufferLimitBytes: 1 << 15,
}
}
func TestLDSWithFilter(t *testing.T) {
setup()
defer tearDown()
addrStr := "127.0.0.1:8079"
name := "listener_filter"
listenerConfig := baseListenerConfig(addrStr, name)
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, listenerConfig); err != nil {
t.Fatalf("add a new listener failed %v", err)
}
{
ln := GetListenerAdapterInstance().FindListenerByName(testServerName, name)
cfg := ln.Config()
if !(cfg.FilterChains[0].Filters[0].Type == "mock_network" && cfg.StreamFilters[0].Type == "mock_stream") {
t.Fatal("listener filter config is not expected")
}
}
nCfg := baseListenerConfig(addrStr, name)
nCfg.FilterChains[0] = v2.FilterChain{
FilterChainConfig: v2.FilterChainConfig{
Filters: []v2.Filter{
{
Type: "mock_network2",
},
},
},
}
nCfg.StreamFilters = nil
// update filter, can remove it
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, nCfg); err != nil {
t.Fatalf("update listener failed: %v", err)
}
{
ln := GetListenerAdapterInstance().FindListenerByName(testServerName, name)
cfg := ln.Config()
if !(cfg.FilterChains[0].Filters[0].Type == "mock_network2" && len(cfg.StreamFilters) == 0) {
t.Fatal("listener filter config is not expected")
}
}
}
// LDS include add\update\delete listener
func TestLDS(t *testing.T) {
setup()
defer tearDown()
addrStr := "127.0.0.1:8080"
name := "listener1"
listenerConfig := baseListenerConfig(addrStr, name)
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, listenerConfig); err != nil {
t.Fatalf("add a new listener failed %v", err)
}
time.Sleep(time.Second) // wait listener start
// verify
// add listener success
handler := listenerAdapterInstance.defaultConnHandler.(*connHandler)
if len(handler.listeners) != 1 {
t.Fatalf("listener numbers is not expected %d", len(handler.listeners))
}
ln := handler.FindListenerByName(name)
if ln == nil {
t.Fatal("no listener found")
}
// use real connection to test
// tls handshake success
dialer := &net.Dialer{
Timeout: time.Second,
}
if conn, err := tls.DialWithDialer(dialer, "tcp", addrStr, &tls.Config{
InsecureSkipVerify: true,
}); err != nil {
t.Fatal("dial tls failed", err)
} else {
conn.Close()
}
// update listener
// FIXME: update logger
newListenerConfig := &v2.Listener{
ListenerConfig: v2.ListenerConfig{
Name: name, // name should same as the exists listener
AccessLogs: []v2.AccessLog{
{},
},
FilterChains: []v2.FilterChain{
{
FilterChainConfig: v2.FilterChainConfig{
Filters: []v2.Filter{}, // network filter will not be updated
},
TLSContexts: []v2.TLSConfig{ // only tls will be updated
{
Status: false,
},
},
},
},
StreamFilters: []v2.Filter{}, // stream filter will not be updated
Inspector: true,
},
Addr: listenerConfig.Addr, // addr should not be changed
PerConnBufferLimitBytes: 1 << 10,
}
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, newListenerConfig); err != nil {
t.Fatal("update listener failed", err)
}
// verify
// 1. listener have only 1
if len(handler.listeners) != 1 {
t.Fatalf("listener numbers is not expected %d", len(handler.listeners))
}
// 2. verify config, the updated configs should be changed, and the others should be same as old config
newLn := handler.FindListenerByName(name)
cfg := newLn.Config()
if !(reflect.DeepEqual(cfg.FilterChains[0].TLSContexts[0], newListenerConfig.FilterChains[0].TLSContexts[0]) && //tls is new
cfg.PerConnBufferLimitBytes == 1<<10 && // PerConnBufferLimitBytes is new
cfg.Inspector && // inspector is new
reflect.DeepEqual(cfg.FilterChains[0].Filters, listenerConfig.FilterChains[0].Filters) && // network filter is old
reflect.DeepEqual(cfg.StreamFilters, listenerConfig.StreamFilters)) { // stream filter is old
// FIXME: log config is new
t.Fatal("new config is not expected")
}
// FIXME:
// Logger level is new
// 3. tls handshake should be failed, because tls is changed to false
if conn, err := tls.DialWithDialer(dialer, "tcp", addrStr, &tls.Config{
InsecureSkipVerify: true,
}); err == nil {
conn.Close()
t.Fatal("listener should not be support tls any more")
}
// 4.common connection should be success, network filter will not be changed
if conn, err := net.DialTimeout("tcp", addrStr, time.Second); err != nil {
t.Fatal("dial listener failed", err)
} else {
conn.Close()
}
// test delete listener
if err := GetListenerAdapterInstance().DeleteListener(testServerName, name); err != nil {
t.Fatal("delete listener failed", err)
}
time.Sleep(time.Second) // wait listener close
if len(handler.listeners) != 0 {
t.Fatal("handler still have listener")
}
// dial should be failed
if conn, err := net.DialTimeout("tcp", addrStr, time.Second); err == nil {
conn.Close()
t.Fatal("listener closed, dial should be failed")
}
}
func TestUpdateTLS(t *testing.T) {
setup()
defer tearDown()
addrStr := "127.0.0.1:8081"
name := "listener2"
listenerConfig := baseListenerConfig(addrStr, name)
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, listenerConfig); err != nil {
t.Fatalf("add a new listener failed %v", err)
}
time.Sleep(time.Second) // wait listener start
tlsCfg := v2.TLSConfig{
Status: false,
}
// tls handleshake success
dialer := &net.Dialer{
Timeout: time.Second,
}
if conn, err := tls.DialWithDialer(dialer, "tcp", addrStr, &tls.Config{
InsecureSkipVerify: true,
}); err != nil {
t.Fatal("dial tls failed", err)
} else {
conn.Close()
}
if err := GetListenerAdapterInstance().UpdateListenerTLS(testServerName, name, false, []v2.TLSConfig{tlsCfg}); err != nil {
t.Fatalf("update tls listener failed %v", err)
}
handler := listenerAdapterInstance.defaultConnHandler.(*connHandler)
newLn := handler.FindListenerByName(name)
cfg := newLn.Config()
// verify tls changed
if !(reflect.DeepEqual(cfg.FilterChains[0].TLSContexts[0], tlsCfg) &&
cfg.Inspector == false) {
t.Fatal("update tls config not expected")
}
// tls handshake should be failed, because tls is changed to false
if conn, err := tls.DialWithDialer(dialer, "tcp", addrStr, &tls.Config{
InsecureSkipVerify: true,
}); err == nil {
conn.Close()
t.Fatal("listener should not be support tls any more")
}
}
func TestIdleTimeoutAndUpdate(t *testing.T) {
setup()
defer tearDown()
defer func() {
buffer.ConnReadTimeout = types.DefaultConnReadTimeout
defaultIdleTimeout = types.DefaultIdleTimeout
}()
log.DefaultLogger.SetLogLevel(log.DEBUG)
buffer.ConnReadTimeout = time.Second
defaultIdleTimeout = 3 * time.Second
addrStr := "127.0.0.1:8082"
name := "listener3"
// bas listener config have no idle timeout config, set the default value
listenerConfig := baseListenerConfig(addrStr, name)
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, listenerConfig); err != nil {
t.Fatalf("add a new listener failed %v", err)
}
time.Sleep(time.Second) // wait listener start
// 0. test default idle timeout
func() {
n := time.Now()
conn, err := tls.Dial("tcp", addrStr, &tls.Config{
InsecureSkipVerify: true,
})
if err != nil {
t.Fatalf("dial failed, %v", err)
}
readChan := make(chan error)
// try read
go func() {
buf := make([]byte, 10)
_, err := conn.Read(buf)
readChan <- err
}()
select {
case err := <-readChan:
// connection should be closed by server
if err != io.EOF {
t.Fatalf("connection read returns error: %v", err)
}
if time.Now().Sub(n) < defaultIdleTimeout {
t.Fatal("connection closed too quickly")
}
case <-time.After(5 * time.Second):
conn.Close()
t.Fatal("connection should be closed, but not")
}
}()
// Update idle timeout
// 1. update as no idle timeout
noIdle := baseListenerConfig(addrStr, name)
noIdle.ConnectionIdleTimeout = &api.DurationConfig{
Duration: 0,
}
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, noIdle); err != nil {
t.Fatalf("update listener failed, %v", err)
}
func() {
conn, err := tls.Dial("tcp", addrStr, &tls.Config{
InsecureSkipVerify: true,
})
if err != nil {
t.Fatalf("dial failed, %v", err)
}
readChan := make(chan error)
// try read
go func() {
buf := make([]byte, 10)
_, err := conn.Read(buf)
readChan <- err
}()
select {
case err := <-readChan:
t.Fatalf("receive an error: %v", err)
case <-time.After(5 * time.Second):
conn.Close()
}
}()
// 2. update idle timeout with config
cfgIdle := baseListenerConfig(addrStr, name)
cfgIdle.ConnectionIdleTimeout = &api.DurationConfig{
Duration: 5 * time.Second,
}
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, cfgIdle); err != nil {
t.Fatalf("update listener failed, %v", err)
}
func() {
n := time.Now()
conn, err := tls.Dial("tcp", addrStr, &tls.Config{
InsecureSkipVerify: true,
})
if err != nil {
t.Fatalf("dial failed, %v", err)
}
readChan := make(chan error)
// try read
go func() {
buf := make([]byte, 10)
_, err := conn.Read(buf)
readChan <- err
}()
select {
case err := <-readChan:
// connection should be closed by server
if err != io.EOF {
t.Fatalf("connection read returns error: %v", err)
}
if time.Now().Sub(n) < 5*time.Second {
t.Fatal("connection closed too quickly")
}
case <-time.After(8 * time.Second):
conn.Close()
t.Fatal("connection should be closed, but not")
}
}()
}
func TestFindListenerByName(t *testing.T) {
setup()
defer tearDown()
addrStr := "127.0.0.1:8083"
name := "listener4"
cfg := baseListenerConfig(addrStr, name)
if ln := GetListenerAdapterInstance().FindListenerByName(testServerName, name); ln != nil {
t.Fatal("find listener name failed, expected not found")
}
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, cfg); err != nil {
t.Fatalf("update listener failed, %v", err)
}
if ln := GetListenerAdapterInstance().FindListenerByName(testServerName, name); ln == nil {
t.Fatal("expected find listener, but not")
}
}
func TestListenerMetrics(t *testing.T) | {
setup()
defer tearDown()
metrics.FlushMosnMetrics = true
for i := 0; i < 5; i++ {
name := fmt.Sprintf("test_listener_metrics_%d", i)
cfg := baseListenerConfig("127.0.0.1:0", name)
if err := GetListenerAdapterInstance().AddOrUpdateListener(testServerName, cfg); err != nil {
t.Fatalf("add listener failed, %v", err)
}
}
// wait start
time.Sleep(time.Second)
// read metrics
var mosn types.Metrics
for _, m := range metrics.GetAll() {
if m.Type() == metrics.MosnMetaType {
mosn = m
break
}
}
if mosn == nil {
t.Fatal("no mosn metrics found")
}
lnCount := 0
mosn.Each(func(key string, value interface{}) {
if strings.Contains(key, metrics.ListenerAddr) {
lnCount++
t.Logf("listener metrics: %s", key)
}
})
if lnCount != 5 {
t.Fatalf("mosn listener metrics is not expected, got %d", lnCount)
}
} |
|
append.rs | use crate::prelude::*;
use nu_engine::WholeStreamCommand;
use nu_errors::ShellError;
use nu_protocol::{ReturnSuccess, Signature, SyntaxShape, UntaggedValue, Value};
#[derive(Deserialize)]
struct Arguments {
value: Value,
}
pub struct Command;
#[async_trait]
impl WholeStreamCommand for Command {
fn name(&self) -> &str {
"append"
}
fn signature(&self) -> Signature {
Signature::build("append").required(
"row value",
SyntaxShape::Any,
"the value of the row to append to the table",
)
}
fn usage(&self) -> &str {
"Append a row to the table"
}
async fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> {
let (Arguments { mut value }, input) = args.process().await?;
let input: Vec<Value> = input.collect().await;
if let Some(first) = input.get(0) {
value.tag = first.tag();
}
// Checks if we are trying to append a row literal
if let Value {
value: UntaggedValue::Table(values),
tag,
} = &value
{
if values.len() == 1 && values[0].is_row() {
value = values[0].value.clone().into_value(tag);
}
}
Ok(futures::stream::iter(
input
.into_iter()
.chain(vec![value])
.map(ReturnSuccess::value),
)
.to_output_stream())
}
fn examples(&self) -> Vec<Example> {
use nu_protocol::row;
vec![
Example {
description: "Add values to the end of the table",
example: "echo [1 2 3] | append 4",
result: Some(vec![
UntaggedValue::int(1).into(),
UntaggedValue::int(2).into(),
UntaggedValue::int(3).into(),
UntaggedValue::int(4).into(),
]),
},
Example {
description: "Add row value to the end of the table",
example: "echo [[country]; [Ecuador] ['New Zealand']] | append [[country]; [USA]]",
result: Some(vec![
row! { "country".into() => Value::from("Ecuador")}, | ]),
},
]
}
} | row! { "country".into() => Value::from("New Zealand")},
row! { "country".into() => Value::from("USA")}, |
query.rs | use std::fmt;
use crate::Repo;
#[derive(Default)]
pub struct Query {
repo: Vec<String>,
is: Vec<String>,
r#type: Vec<String>,
state: Vec<String>,
}
impl Query {
pub fn new() -> Self {
Query {
..Default::default()
}
}
pub fn from_repo(repo: Repo) -> Self {
let repo = vec![String::from(repo.full_name())];
Query {
repo,
..Default::default()
}
}
/// *Adds* a repo to the query.
///
/// Results in `repo:user/repo`.
pub fn repo(mut self, user: &str, repo: &str) -> Self {
self.repo.push(format!("{}/{}", user, repo));
self
}
/// *Adds* an `is` statement to the query.
///
/// Results in `is:statement`.
pub fn is(mut self, statement: &str) -> Self {
self.is.push(String::from(statement));
self
}
/// *Adds* a `type` statement to the query.
///
/// Results in `type:statement`.
///
/// *Use `r#type` to escape `type` keyword.
pub fn r#type(mut self, statement: &str) -> Self {
self.r#type.push(String::from(statement));
self
}
}
impl fmt::Display for Query {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let queries = { | let mut is: Vec<String> = self.is.iter().map(|s| format!("is:{}", s)).collect();
let mut r#type: Vec<String> =
self.r#type.iter().map(|s| format!("type:{}", s)).collect();
let mut state: Vec<String> =
self.state.iter().map(|s| format!("state:{}", s)).collect();
let mut queries: Vec<String> =
Vec::with_capacity(repo.len() + is.len() + r#type.len() + state.len());
queries.append(&mut repo);
queries.append(&mut is);
queries.append(&mut r#type);
queries.append(&mut state);
queries
};
let queries = queries.join("+");
write!(f, "q={}", queries)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn built_query() {
let query = Query::new()
.repo("rust-lang", "rust")
.r#type("pr")
.is("merged")
.to_string();
assert_eq!("q=repo:rust-lang/rust+is:merged+type:pr", query);
}
} | let mut repo: Vec<String> = self.repo.iter().map(|s| format!("repo:{}", s)).collect(); |
core.rs | use crate::error::Result;
use std::collections::hash_map::DefaultHasher;
use std::ffi::OsStr;
use std::hash::Hasher;
use std::process::Command;
pub fn command<P: AsRef<OsStr>>(program: P, maybe_args: Option<Vec<P>>) -> Result<String> {
let args = maybe_args.unwrap_or_else(|| vec![]);
let output = Command::new(program).args(args).output()?;
let result = String::from_utf8_lossy(&output.stdout).into_owned();
Ok(result)
}
pub fn hash(input: &str) -> u64 {
let mut hasher = DefaultHasher::new();
hasher.write(input.as_bytes());
hasher.finish()
}
#[cfg(test)]
#[cfg(not(target_os = "windows"))]
mod tests {
use crate::core::*;
#[test]
#[cfg(target_os = "macos")]
fn | () {
let args = "test";
let expected = 16183295663280961421u64;
let actual = hash(args);
assert_eq!(expected, actual);
}
#[test]
fn command_echo_ok() {
let cmd = "echo";
let args = vec!["-n", "test"];
let expected = "test";
let actual = command(cmd, Some(args)).unwrap();
assert_eq!(expected, actual.as_str());
}
#[test]
fn command_none_args_ok() {
let cmd = "echo";
let actual = command(cmd, None);
assert!(actual.is_ok());
}
#[test]
fn command_invalid_cmd_ng() {
let cmd = "ls";
let actual = command(cmd, None);
assert!(actual.is_ok());
}
}
| hash_bore_ok |
client.rs | use std::fmt;
use super::errors::*;
use super::{
node::CraqClient,
proto::{CraqConsistencyModel, CraqObject, TCraqServiceSyncClient},
};
use std::net::{SocketAddr, ToSocketAddrs};
use thrift::protocol::{TBinaryInputProtocol, TBinaryOutputProtocol};
use thrift::transport::{
TFramedReadTransport, TFramedWriteTransport, TIoChannel, TTcpChannel as BiTcp,
};
pub struct ReadObject {
///
/// Object's value.
value: Vec<u8>,
///
/// Whether the read was dirty (true) or clean (false).
dirty: bool,
}
impl ReadObject {
///
/// Creates a new wrapper Read Object
pub fn new(value: Vec<u8>, dirty: bool) -> Self {
Self { value, dirty }
}
}
impl fmt::Debug for ReadObject {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReadObject")
.field("value", &self.value)
.field("dirty", &self.dirty)
.finish()
}
}
impl fmt::Display for ReadObject {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ReadObject")
.field("value", &self.value)
.field("dirty", &self.dirty)
.finish()
}
} | #[allow(dead_code)]
pub struct DDataCraqClient {
host: SocketAddr,
cc: CraqClient,
}
impl DDataCraqClient {
pub fn connect_host_port<T>(host: T, port: u16) -> Result<Self>
where
T: AsRef<str>,
{
Self::connect(format!("{}:{}", host.as_ref(), port))
}
pub fn connect<A>(addr: A) -> Result<Self>
where
A: ToSocketAddrs,
{
let host: SocketAddr = addr
.to_socket_addrs()?
.next()
.ok_or_else(|| CraqError::SocketAddrError("No node address given or parsed.".into()))?;
debug!("Client is initiating connection to: {}", host);
let mut c = BiTcp::new();
c.open(&host.to_string())?;
let (i_chan, o_chan) = c.split()?;
let (i_tran, o_tran) = (
TFramedReadTransport::new(i_chan),
TFramedWriteTransport::new(o_chan),
);
let (i_prot, o_prot) = (
TBinaryInputProtocol::new(i_tran, true),
TBinaryOutputProtocol::new(o_tran, true),
);
debug!("Created client: {}", host);
let cc = CraqClient::new(i_prot, o_prot);
Ok(Self { host, cc })
}
///
/// Writes an object to the cluster, returning the new object version or -1 upon failure.
pub fn write(&mut self, value: String) -> Result<i64> {
let mut obj = CraqObject::default();
obj.value = Some(value.into_bytes());
Ok(self.cc.write(obj)?)
}
///
/// Reads an object with given bound version.
pub fn read(&mut self, model: CraqConsistencyModel, version_bound: i64) -> Result<ReadObject> {
let obj = self.cc.read(model, version_bound)?;
match (obj.value, obj.dirty) {
(Some(v), Some(d)) => Ok(ReadObject::new(v, d)),
_ => bail!(CraqError::ReadError, "Read request failed"),
}
}
///
/// Performs a test-and-set operation, returning the new object version or -1 upon failure.
pub fn test_and_set(&mut self, value: String, expected_version: i64) -> Result<i64> {
let mut obj = CraqObject::default();
obj.value = Some(value.into_bytes());
Ok(self.cc.test_and_set(obj, expected_version)?)
}
} |
// Will be fixed as we implement stuff |
parser.rs | /*
* DMNTK - Decision Model and Notation Toolkit
*
* MIT license
*
* Copyright (c) 2018-2022 Dariusz Depta Engos Software
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Apache license, Version 2.0
*
* Copyright (c) 2018-2022 Dariusz Depta Engos Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Implementation of the `LALR` parser for `FEEL` grammar.
use self::errors::*;
use crate::lalr::{TokenType, *};
use crate::lexer::{Lexer, TokenValue};
use dmntk_common::Result;
use dmntk_feel::context::FeelContext;
use dmntk_feel::values::Value;
use dmntk_feel::{value_null, AstNode, FeelType, Name, Scope};
/// Parses an `expression` as defined in grammar rule `1`.
pub fn parse_expression(scope: &Scope, input: &str, trace: bool) -> Result<AstNode> {
Parser::new(scope, TokenType::StartExpression, input, trace).parse()
}
/// Parses a `textual expression` as defined in grammar rule `2`.
pub fn parse_textual_expression(scope: &Scope, input: &str, trace: bool) -> Result<AstNode> {
Parser::new(scope, TokenType::StartTextualExpression, input, trace).parse()
}
/// Parses `textual expressions` as defined in grammar rule `3`.
pub fn parse_textual_expressions(scope: &Scope, input: &str, trace: bool) -> Result<AstNode> {
Parser::new(scope, TokenType::StartTextualExpressions, input, trace).parse()
}
/// Parses `unary tests` as defined in grammar rule `17`.
pub fn parse_unary_tests(scope: &Scope, input: &str, trace: bool) -> Result<AstNode> {
Parser::new(scope, TokenType::StartUnaryTests, input, trace).parse()
}
/// Parses a `name` as defined grammar rule `25`.
pub fn parse_name(scope: &Scope, input: &str, trace: bool) -> Result<Name> {
if let AstNode::Name(name) = Parser::new(scope, TokenType::StartTextualExpression, input, trace).parse()? {
Ok(name)
} else {
Err(not_a_name(input))
}
}
/// Parses the `longest name` as defined in grammar rule `25`.
pub fn parse_longest_name(input: &str) -> Result<Name> {
parse_name(&Default::default(), input, false)
}
/// Parses a `boxed expression` as defined in grammar rule `53`.
pub fn parse_boxed_expression(scope: &Scope, input: &str, trace: bool) -> Result<AstNode> {
Parser::new(scope, TokenType::StartBoxedExpression, input, trace).parse()
}
/// Parses a `context` as defined in grammar rule `59`.
pub fn parse_context(scope: &Scope, input: &str, trace: bool) -> Result<AstNode> {
Parser::new(scope, TokenType::StartContext, input, trace).parse()
}
enum Action {
Accept,
NewState,
Default,
Shift,
Reduce,
Error,
Error1,
}
macro_rules! trace {
($s:tt, $fmt:expr, $($arg:tt)*) => {
if $s.yy_trace { println!($fmt, $($arg)*); }
};
($s:tt, $fmt:expr) => {
if $s.yy_trace { println!($fmt); }
};
}
macro_rules! trace_action {
($s:tt, $msg:literal) => {
if $s.yy_trace {
println!(" action: [\u{001b}[32m{}\u{001b}[0m]", $msg);
println!(" state_stack={:?}", $s.yy_state_stack);
println!(" value_stack={:?}", $s.yy_value_stack);
println!(" node_stack={:?}", $s.yy_node_stack);
}
};
}
/// Parser.
pub struct Parser<'parser> {
/// Parsing scope.
scope: &'parser Scope,
/// Parsed input.
input: &'parser str,
/// Flag indicating whether the tracing messages should be printed to standard output.
yy_trace: bool,
/// The FEEL [Lexer] used by this FEEL [Parser] as an input token stream.
yy_lexer: Lexer<'parser>,
/// The lookahead token type returned by lexer.
yy_char: i16,
/// The lookahead semantic value associated with token type returned by lexer.
yy_value: TokenValue,
///
yy_token: i16,
/// Current state.
yy_state: usize,
/// This is an all purpose variable, it may represent a state number or a rule number.
yy_n: i16,
/// The number of symbols on the RHS of the reduced rule, keep to zero when no symbol should be popped.
yy_len: i16,
/// State stack.
yy_state_stack: Vec<usize>,
/// Semantic value stack.
yy_value_stack: Vec<TokenValue>,
/// AST node stack.
yy_node_stack: Vec<AstNode>,
}
impl<'parser> Parser<'parser> {
/// Creates a new parser.
pub fn new(scope: &'parser Scope, start_token_type: TokenType, input: &'parser str, trace: bool) -> Self {
let lexer = Lexer::new(scope, start_token_type, input);
Self {
scope,
input,
yy_trace: trace,
yy_lexer: lexer,
yy_char: TokenType::YyEmpty as i16,
yy_value: TokenValue::YyEmpty,
yy_token: TokenType::YyEmpty as i16,
yy_state: 0,
yy_n: 0,
yy_len: 0,
yy_state_stack: vec![0],
yy_value_stack: vec![TokenValue::YyEmpty],
yy_node_stack: vec![],
}
}
/// Parses the input.
pub fn parse(&mut self) -> Result<AstNode> {
let mut action = Action::NewState;
loop {
match action {
Action::NewState => {
trace!(self, "\nNEW-STATE: {}", self.yy_state);
// if the final state then accept
if self.yy_state == YY_FINAL {
action = Action::Accept;
continue;
}
// first try to decide what to do without reference to lookahead token
self.yy_n = YY_PACT[self.yy_state];
if self.yy_n == YY_PACT_N_INF {
// process the default action
action = Action::Default;
continue;
}
// not known, so get a lookahead token if don't already have one
if self.yy_char == TokenType::YyEmpty as i16 {
let (token_type, opt_token_value) = self.yy_lexer.next_token()?;
self.yy_char = token_type as i16;
self.yy_token = SymbolKind::YyEmpty as i16;
self.yy_value = opt_token_value;
trace!(self, " lexer: yy_char={}", self.yy_char);
trace!(self, " lexer: yy_value={:?}", self.yy_value);
}
trace!(self, " yy_char={}", self.yy_char);
if self.yy_char <= TokenType::YyEof as i16 {
self.yy_char = TokenType::YyEof as i16;
self.yy_token = SymbolKind::YyEof as i16;
trace!(self, " Now at end of input.");
} else if self.yy_char == TokenType::YyError as i16 {
self.yy_char = TokenType::YyUndef as i16;
self.yy_token = SymbolKind::YyUndef as i16;
action = Action::Error1;
continue;
} else {
self.yy_token = YY_TRANSLATE[self.yy_char as usize] as i16;
}
trace!(self, " yy_token={}", self.yy_token);
trace!(self, " state_stack={:?}", self.yy_state_stack);
trace!(self, " value_stack={:?}", self.yy_value_stack);
trace!(self, " node_stack={:?}", self.yy_node_stack);
//
let yy_token_code = self.yy_token as i16;
self.yy_n += yy_token_code;
if self.yy_n < 0 || YY_LAST < self.yy_n || YY_CHECK[self.yy_n as usize] != yy_token_code {
action = Action::Default;
continue;
}
self.yy_n = YY_TABLE[self.yy_n as usize];
if self.yy_n <= 0 {
if self.yy_n == YY_TABLE_N_INF {
action = Action::Error;
} else {
self.yy_n = -self.yy_n;
action = Action::Reduce;
}
} else {
action = Action::Shift;
}
}
Action::Default => {
trace!(self, "\nDEFAULT");
self.yy_n = YY_DEF_ACT[self.yy_state] as i16;
if self.yy_n == 0 {
action = Action::Error;
} else {
trace!(self, " reduce_using_rule = {}", self.yy_n);
action = Action::Reduce;
}
}
Action::Shift => {
trace!(self, "\nSHIFT");
self.yy_state = self.yy_n as usize;
self.yy_state_stack.push(self.yy_state);
self.yy_value_stack.push(self.yy_value.clone());
trace!(self, " state_stack={:?}", self.yy_state_stack);
trace!(self, " value_stack={:?}", self.yy_value_stack);
trace!(self, " node_stack={:?}", self.yy_node_stack);
self.yy_char = TokenType::YyEmpty as i16;
self.yy_value = TokenValue::YyEmpty;
action = Action::NewState;
}
Action::Reduce => {
trace!(self, "\nREDUCE");
// get the length of the right-hand side
self.yy_len = YY_R2[self.yy_n as usize] as i16;
trace!(self, " reduce count = {}", self.yy_len);
// yy_n is the number of a rule to reduce with
trace!(self, " --------------------------------------------");
trace!(self, " reducing_using_rule = {}", self.yy_n);
crate::lalr::reduce(self, self.yy_n)?;
trace!(self, " --------------------------------------------");
// pop the state stack and semantic value stack
for _ in 0..self.yy_len {
self.yy_state_stack.pop();
self.yy_value_stack.pop();
}
// keep yy_len = 0
self.yy_len = 0;
let yy_lhs = (YY_R1[self.yy_n as usize] as usize) - (YY_N_TOKENS as usize);
let top_state = self.yy_state_stack[self.yy_state_stack.len() - 1] as i16;
let yy_i = YY_P_GOTO[yy_lhs] + top_state;
// calculate the new state number
self.yy_state = if (0..=YY_LAST).contains(&yy_i) && YY_CHECK[yy_i as usize] == top_state {
YY_TABLE[yy_i as usize] as usize
} else {
YY_DEF_GOTO[yy_lhs] as usize
};
trace!(self, " new_state = {}", self.yy_state);
// push the new state on the stack
self.yy_state_stack.push(self.yy_state);
self.yy_value_stack.push(TokenValue::YyState(self.yy_state));
trace!(self, " state_stack={:?}", self.yy_state_stack);
trace!(self, " value_stack={:?}", self.yy_value_stack);
trace!(self, " node_stack={:?}", self.yy_node_stack);
action = Action::NewState;
}
Action::Error => {
trace!(self, "\nERROR");
self.yy_token = SymbolKind::YyError as i16;
return Err(syntax_error(self.input));
}
Action::Error1 => {
trace!(self, "\nERROR 1");
return Err(syntax_error(self.input));
}
Action::Accept => {
trace!(self, "\n**********");
trace!(self, "* ACCEPT *");
trace!(self, "**********\n");
self.yy_token = SymbolKind::YyAccept as i16;
if let Some(node) = self.yy_node_stack.pop() {
if self.yy_node_stack.is_empty() {
if self.yy_trace {
node.trace();
}
return Ok(node);
}
}
return Err(invalid_parse_result());
}
}
}
}
}
impl<'parser> ReduceActions for Parser<'parser> {
///
fn action_addition(&mut self) -> Result<()> {
trace_action!(self, "addition");
let rhs = Box::new(self.yy_node_stack.pop().ok_or_else(err_pop)?);
let lhs = Box::new(self.yy_node_stack.pop().ok_or_else(err_pop)?);
self.yy_node_stack.push(AstNode::Add(lhs, rhs));
Ok(())
}
///
fn action_between(&mut self) -> Result<()> {
trace_action!(self, "between");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let mhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Between(Box::new(lhs), Box::new(mhs), Box::new(rhs)));
Ok(())
}
///
fn action_between_begin(&mut self) -> Result<()> {
trace_action!(self, "between_begin");
self.yy_lexer.set_between();
Ok(())
}
///
fn action_built_in_type_name(&mut self) -> Result<()> {
trace_action!(self, "built_in_type_name");
if let TokenValue::BuiltInTypeName(name) = &self.yy_value_stack[self.yy_value_stack.len() - 1] {
self.yy_node_stack.push(AstNode::FeelType(name.into()));
}
Ok(())
}
///
fn action_comparison_eq(&mut self) -> Result<()> {
trace_action!(self, "comparison_equal");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Eq(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_comparison_ge(&mut self) -> Result<()> {
trace_action!(self, "comparison_greater_or_equal");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Ge(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_comparison_gt(&mut self) -> Result<()> {
trace_action!(self, "comparison_greater_than");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Gt(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_comparison_in(&mut self) -> Result<()> {
trace_action!(self, "comparison_in");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::In(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_comparison_le(&mut self) -> Result<()> {
trace_action!(self, "comparison_less_or_equal");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Le(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_comparison_lt(&mut self) -> Result<()> {
trace_action!(self, "comparison_less_than");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Lt(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_comparison_nq(&mut self) -> Result<()> {
trace_action!(self, "comparison_not_equal");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Nq(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_comparison_unary_ge(&mut self) -> Result<()> {
trace_action!(self, "comparison_unary_ge");
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::UnaryGe(Box::new(lhs)));
Ok(())
}
///
fn action_comparison_unary_gt(&mut self) -> Result<()> {
trace_action!(self, "comparison_unary_gt");
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::UnaryGt(Box::new(lhs)));
Ok(())
}
///
fn action_comparison_unary_le(&mut self) -> Result<()> {
trace_action!(self, "comparison_unary_le");
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::UnaryLe(Box::new(lhs)));
Ok(())
}
///
fn action_comparison_unary_lt(&mut self) -> Result<()> {
trace_action!(self, "comparison_unary_lt");
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::UnaryLt(Box::new(lhs)));
Ok(())
}
///
fn action_conjunction(&mut self) -> Result<()> {
trace_action!(self, "conjunction");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::And(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_context_begin(&mut self) -> Result<()> {
trace_action!(self, "context_begin");
self.yy_lexer.push_to_scope();
Ok(())
}
///
fn action_context_end(&mut self) -> Result<()> {
trace_action!(self, "context_end");
self.yy_lexer.pop_from_scope();
Ok(())
}
///
fn action_context_entry(&mut self) -> Result<()> {
trace_action!(self, "context_entry");
let value_node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let key_node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::ContextEntryKey(name) = &key_node {
self.yy_lexer.add_name_to_scope(name);
}
self.yy_node_stack.push(AstNode::ContextEntry(Box::new(key_node), Box::new(value_node)));
Ok(())
}
///
fn action_context_entry_tail(&mut self) -> Result<()> {
trace_action!(self, "context_entry_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::Context(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::Context(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::Context(vec![node]));
Ok(())
}
///
fn action_context_type_entry(&mut self) -> Result<()> {
trace_action!(self, "context_type_entry");
let type_node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let TokenValue::Name(name) = &self.yy_value_stack[self.yy_value_stack.len() - self.yy_len as usize] {
let lhs = Box::new(AstNode::ContextTypeEntryKey(name.clone()));
let rhs = Box::new(type_node);
self.yy_node_stack.push(AstNode::ContextTypeEntry(lhs, rhs));
}
Ok(())
}
///
fn action_context_type_entry_tail(&mut self) -> Result<()> {
trace_action!(self, "context_type_entry_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::ContextType(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::ContextType(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::ContextType(vec![node]));
Ok(())
}
///
fn action_disjunction(&mut self) -> Result<()> {
trace_action!(self, "disjunction");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Or(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_division(&mut self) -> Result<()> {
trace_action!(self, "division");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Div(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_empty_context(&mut self) -> Result<()> {
trace_action!(self, "empty context");
self.yy_node_stack.push(AstNode::Context(vec![]));
Ok(())
}
///
fn action_every(&mut self) -> Result<()> {
trace_action!(self, "every");
// pop temporary context from the top of the scope
self.yy_lexer.pop_from_scope();
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let satisfies = Box::new(AstNode::Satisfies(Box::new(rhs)));
self.yy_node_stack.push(AstNode::Every(Box::new(lhs), satisfies));
Ok(())
}
///
fn action_every_begin(&mut self) -> Result<()> {
trace_action!(self, "every_begin");
// push temporary context on the top of the scope,
// this context will be used to store
// local variable names of quantified expressions
self.yy_lexer.push_to_scope();
Ok(())
}
///
fn action_exponentiation(&mut self) -> Result<()> {
trace_action!(self, "exponentiation");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Exp(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_expression_list_tail(&mut self) -> Result<()> {
trace_action!(self, "expression_list_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::ExpressionList(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::ExpressionList(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::ExpressionList(vec![node]));
Ok(())
}
///
fn action_filter(&mut self) -> Result<()> {
trace_action!(self, "filter");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Filter(Box::new(lhs), Box::new(rhs)));
Ok(())
}
/// Reduces `for` expression.
/// At the top of `yy_node_stack` there is a node representing an expression to be evaluated,
/// followed by the node representing iteration contexts.
fn action_for(&mut self) -> Result<()> {
trace_action!(self, "for");
// pop temporary context from the top of the scope
self.yy_lexer.pop_from_scope();
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let evaluated_expression = AstNode::EvaluatedExpression(Box::new(rhs));
self.yy_node_stack.push(AstNode::For(Box::new(lhs), Box::new(evaluated_expression)));
Ok(())
}
///
fn action_for_begin(&mut self) -> Result<()> {
trace_action!(self, "for_begin");
// push temporary context on the top of the scope,
// this context will be used to store
// local variable names in iteration contexts
self.yy_lexer.push_to_scope();
// add name `partial` to the temporary context present on top of the scope,
// this is the name of the implicit variable containing results of all previous iterations
self.yy_lexer.add_name_to_scope(&Name::from("partial"));
Ok(())
}
///
fn action_formal_parameter_with_type(&mut self) -> Result<()> {
trace_action!(self, "function_formal_parameter_with_type");
// the type of the parameter is on top of node stack
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
// the name of the parameter is in value stack
if let TokenValue::Name(name) = &self.yy_value_stack[self.yy_value_stack.len() - self.yy_len as usize] {
// push the new formal parameter on top of node stack
let parameter_name = Box::new(AstNode::ParameterName(name.clone()));
let parameter_type = Box::new(rhs);
self.yy_node_stack.push(AstNode::FormalParameter(parameter_name, parameter_type));
// set the name of the parameter to local context on the top of the scope stack
// this name will be properly interpreted as a name while parsing the function body
self.scope.set_entry(name, value_null!());
}
Ok(())
}
///
fn action_formal_parameter_without_type(&mut self) -> Result<()> {
trace_action!(self, "function_formal_parameter_without_type");
// the name of the parameter is in value stack
if let TokenValue::Name(name) = &self.yy_value_stack[self.yy_value_stack.len() - self.yy_len as usize] {
// push the new formal parameter on top of node stack
let parameter_name = Box::new(AstNode::ParameterName(name.clone()));
let parameter_type = Box::new(AstNode::FeelType(FeelType::Any));
self.yy_node_stack.push(AstNode::FormalParameter(parameter_name, parameter_type));
// set the name of the parameter to local context on the top of the scope stack
// this name will be properly interpreted as a name while parsing the function body
self.scope.set_entry(name, value_null!());
}
Ok(())
}
///
fn action_formal_parameters_begin(&mut self) -> Result<()> {
trace_action!(self, "function_formal_parameters_begin");
// when the list of formal parameters begins, push a local context onto scope stack
self.scope.push(FeelContext::default());
Ok(())
}
///
fn action_formal_parameters_empty(&mut self) -> Result<()> {
trace_action!(self, "function_formal_parameters_empty");
// push the empty list of formal parameters onto the node stack
self.yy_node_stack.push(AstNode::FormalParameters(vec![]));
Ok(())
}
///
fn action_formal_parameters_first(&mut self) -> Result<()> {
trace_action!(self, "function_formal_parameters_first");
// the first parameter is on the top of the node stack
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::FormalParameters(vec![lhs]));
Ok(())
}
///
fn action_formal_parameters_tail(&mut self) -> Result<()> {
trace_action!(self, "function_formal_parameters_tail");
// the next parameter is on the top of the node stack
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
// the collection of formal parameters is now on top of the node stack
if let Some(AstNode::FormalParameters(mut items)) = self.yy_node_stack.pop() {
items.push(rhs);
self.yy_node_stack.push(AstNode::FormalParameters(items));
}
Ok(())
}
/// Reduces the definition of the function body. This function body is **not** `external`.
/// The content of the function body is the [AstNode] on the top of the `yy_node-stack`.
/// After reducing the function body, the top context from scope is popped,
/// because this context is temporary and contains the function parameter names
/// to be properly interpreted while parsing the function's body.
fn action_function_body(&mut self) -> Result<()> {
trace_action!(self, "function_body");
if let Some(function_body_node) = self.yy_node_stack.pop() {
self.yy_node_stack.push(AstNode::FunctionBody(Box::new(function_body_node), false));
}
// pop temporary context from the top of scope stack
self.scope.pop();
Ok(())
}
/// Reduces the definition of the function body. This function body **is** `external`.
/// The content of the function body is the [AstNode] on the top of the `yy_node-stack`.
/// After reducing the function body, the top context from scope is popped,
/// because this context is temporary and contains the function parameter names
/// to be properly interpreted while parsing the function's body.
fn action_function_body_external(&mut self) -> Result<()> {
trace_action!(self, "function_body_external");
if let Some(function_body_node) = self.yy_node_stack.pop() {
self.yy_node_stack.push(AstNode::FunctionBody(Box::new(function_body_node), true));
}
// pop temporary context from the top of scope stack
self.scope.pop();
Ok(())
}
/// Reduces the function definition.
/// The top element on the `node stack` is the AstNode defining the function body.
/// The AstNode just below the function's body is the list of function's formal parameters.
fn action_function_definition(&mut self) -> Result<()> {
trace_action!(self, "function_definition");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::FunctionDefinition(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_function_invocation(&mut self) -> Result<()> {
trace_action!(self, "function_invocation");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::FunctionInvocation(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_function_invocation_no_parameters(&mut self) -> Result<()> {
trace_action!(self, "function_invocation_no_parameters");
if let Some(lhs) = self.yy_node_stack.pop() {
let rhs = AstNode::PositionalParameters(vec![]);
self.yy_node_stack.push(AstNode::FunctionInvocation(Box::new(lhs), Box::new(rhs)));
}
Ok(())
}
///
fn action_function_type(&mut self) -> Result<()> {
trace_action!(self, "function_type");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::FunctionType(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_function_type_parameters_empty(&mut self) -> Result<()> {
trace_action!(self, "function_type_parameters_empty");
self.yy_node_stack.push(AstNode::ParameterTypes(vec![]));
Ok(())
}
///
fn action_function_type_parameters_tail(&mut self) -> Result<()> {
trace_action!(self, "function_type_parameters_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::ParameterTypes(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::ParameterTypes(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::ParameterTypes(vec![node]));
Ok(())
}
/// Reduces `if` expression.
fn action_if(&mut self) -> Result<()> {
trace_action!(self, "if");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let mid = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::If(Box::new(lhs), Box::new(mid), Box::new(rhs)));
Ok(())
}
///
fn action_instance_of(&mut self) -> Result<()> {
trace_action!(self, "instance_of");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let checked_value = Box::new(lhs);
let expected_type = Box::new(rhs);
self.yy_node_stack.push(AstNode::InstanceOf(checked_value, expected_type));
Ok(())
}
///
fn action_interval(&mut self) -> Result<()> {
trace_action!(self, "interval");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Range(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_interval_end(&mut self) -> Result<()> {
trace_action!(self, "interval_end");
let closed = matches!(&self.yy_value_stack[self.yy_value_stack.len() - 1], TokenValue::RightBracket);
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::IntervalEnd(Box::new(lhs), closed));
Ok(())
}
///
fn action_interval_start(&mut self) -> Result<()> {
trace_action!(self, "interval_start");
let closed = matches!(&self.yy_value_stack[self.yy_value_stack.len() - self.yy_len as usize], TokenValue::LeftBracket);
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::IntervalStart(Box::new(lhs), closed));
Ok(())
}
/// Reduces iteration context containing a variable name and a range of numbers to iterate over.
/// Nodes are located on `yy_node_stack` in the following order (looking from top):
/// - range end,
/// - range start,
/// - variable name.
fn action_iteration_context_value_range(&mut self) -> Result<()> {
trace_action!(self, "iteration_context_value_range");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let mid = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let node = AstNode::IterationContextRange(Box::new(lhs), Box::new(mid), Box::new(rhs));
self.yy_node_stack.push(node);
Ok(())
}
/// Reduces iteration context containing a variable name and a single list of elements to iterate over.
/// Nodes are located on `yy_node_stack` in the following order (looking from top):
/// - list,
/// - variable name.
fn action_iteration_context_value_single(&mut self) -> Result<()> {
trace_action!(self, "iteration_context_value_single");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let node = AstNode::IterationContextSingle(Box::new(lhs), Box::new(rhs));
self.yy_node_stack.push(node);
Ok(())
}
/// Reduces the variable name of iteration context.
/// Variable name is located on the top of the `yy_value_stack`.
/// This name is pushed onto `yy_node_stack`.
fn action_iteration_context_variable_name(&mut self) -> Result<()> {
trace_action!(self, "iteration_context_variable_name");
if let TokenValue::Name(name) = &self.yy_value_stack[self.yy_value_stack.len() - 1] {
self.yy_node_stack.push(AstNode::Name(name.clone()));
// add this variable name to the temporary context present on top of the scope
self.yy_lexer.add_name_to_scope(name);
}
Ok(())
}
///
fn action_iteration_context_variable_name_begin(&mut self) -> Result<()> {
trace_action!(self, "iteration_context_variable_name_begin");
self.yy_lexer.set_till_in();
Ok(())
}
/// Reduces the iteration context.
fn action_iteration_contexts_tail(&mut self) -> Result<()> {
trace_action!(self, "iteration_contexts_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::IterationContexts(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::IterationContexts(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::IterationContexts(vec![node]));
Ok(())
}
///
fn action_key_name(&mut self) -> Result<()> {
trace_action!(self, "key_name");
if let Some(TokenValue::Name(name)) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::ContextEntryKey(name.clone()));
}
Ok(())
}
///
fn action_key_string(&mut self) -> Result<()> {
trace_action!(self, "key_string");
if let Some(TokenValue::String(value)) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::ContextEntryKey(Name::from(value.clone())));
}
Ok(())
}
///
fn action_list(&mut self) -> Result<()> {
trace_action!(self, "list");
if let Some(AstNode::CommaList(items)) = self.yy_node_stack.pop() {
self.yy_node_stack.push(AstNode::List(items));
}
Ok(())
}
///
fn action_list_empty(&mut self) -> Result<()> {
trace_action!(self, "list_empty");
self.yy_node_stack.push(AstNode::CommaList(vec![]));
Ok(())
}
///
fn action_list_tail(&mut self) -> Result<()> {
trace_action!(self, "list_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::CommaList(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::CommaList(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::CommaList(vec![node]));
Ok(())
}
///
fn action_list_type(&mut self) -> Result<()> {
trace_action!(self, "list_type");
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::ListType(Box::new(lhs)));
Ok(())
}
///
fn action_literal_at(&mut self) -> Result<()> {
trace_action!(self, "literal_at");
if let Some(TokenValue::String(value)) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::At(value.clone()));
}
Ok(())
}
///
fn action_literal_boolean(&mut self) -> Result<()> {
trace_action!(self, "literal_boolean");
if let Some(TokenValue::Boolean(value)) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::Boolean(*value));
}
Ok(())
}
///
fn action_literal_date_time(&mut self) -> Result<()> {
trace_action!(self, "literal_date_time");
if let TokenValue::NameDateTime(name) = &self.yy_value_stack[self.yy_value_stack.len() - 2] {
self.yy_node_stack.push(AstNode::Name(name.clone()));
}
Ok(())
}
///
fn action_literal_null(&mut self) -> Result<()> {
trace_action!(self, "literal_null");
if let Some(TokenValue::Null) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::Null);
}
Ok(())
}
///
fn action_literal_numeric(&mut self) -> Result<()> {
trace_action!(self, "numeric_literal");
if let Some(TokenValue::Numeric(before, after)) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::Numeric(before.clone(), after.clone()));
}
Ok(())
}
///
fn action_literal_string(&mut self) -> Result<()> {
trace_action!(self, "string_literal");
if let Some(TokenValue::String(value)) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::String(value.clone()));
}
Ok(())
}
///
fn action_multiplication(&mut self) -> Result<()> {
trace_action!(self, "multiplication");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Mul(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_name(&mut self) -> Result<()> {
trace_action!(self, "name");
if let Some(TokenValue::Name(value)) = self.yy_value_stack.last() {
self.yy_node_stack.push(AstNode::Name(value.clone()));
}
Ok(())
}
///
fn action_named_parameter(&mut self) -> Result<()> {
trace_action!(self, "named_parameter");
trace!(self, "{:?}", self.yy_value_stack);
if let TokenValue::Name(name) = &self.yy_value_stack[self.yy_value_stack.len() - 3] {
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let parameter_name = Box::new(AstNode::ParameterName(name.clone()));
let parameter_value = Box::new(rhs);
self.yy_node_stack.push(AstNode::NamedParameter(parameter_name, parameter_value));
}
Ok(())
}
///
fn action_named_parameters_tail(&mut self) -> Result<()> {
trace_action!(self, "named_parameters_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::NamedParameters(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::NamedParameters(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::NamedParameters(vec![node]));
Ok(())
}
///
fn action_negation(&mut self) -> Result<()> {
trace_action!(self, "negation");
if let Some(node) = self.yy_node_stack.pop() {
self.yy_node_stack.push(AstNode::Neg(Box::new(node)));
}
Ok(())
}
///
fn action_path(&mut self) -> Result<()> {
trace_action!(self, "path");
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let Some(TokenValue::Name(name)) = &self.yy_value_stack.last() {
let rhs = AstNode::Name(name.clone());
self.yy_node_stack.push(AstNode::Path(Box::new(lhs), Box::new(rhs)));
}
Ok(())
}
///
fn action_path_names(&mut self) -> Result<()> {
trace_action!(self, "path_names");
if let TokenValue::Name(lhs_name) = &self.yy_value_stack[self.yy_value_stack.len() - 3] {
if let Some(TokenValue::Name(rhs_name)) = &self.yy_value_stack.last() {
let lhs = AstNode::Name(lhs_name.clone());
let rhs = AstNode::Name(rhs_name.clone());
self.yy_node_stack.push(AstNode::Path(Box::new(lhs), Box::new(rhs)));
}
}
Ok(())
}
///
fn action_positional_parameters_tail(&mut self) -> Result<()> {
trace_action!(self, "positional_parameters_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::PositionalParameters(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::PositionalParameters(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::PositionalParameters(vec![node]));
Ok(())
}
///
fn action_qualified_name(&mut self) -> Result<()> {
trace_action!(self, "action_qualified_name");
if let Some(TokenValue::Name(name)) = &self.yy_value_stack.last() {
self
.yy_node_stack
.push(AstNode::QualifiedName(vec![AstNode::QualifiedNameSegment(name.clone())]));
}
Ok(())
}
///
fn action_qualified_name_tail(&mut self) -> Result<()> {
trace_action!(self, "action_qualified_name_tail");
if let TokenValue::Name(name) = &self.yy_value_stack[self.yy_value_stack.len() - 3] {
if let Some(AstNode::QualifiedName(mut parts)) = self.yy_node_stack.pop() {
parts.insert(0, AstNode::QualifiedNameSegment(name.clone()));
self.yy_node_stack.push(AstNode::QualifiedName(parts));
}
}
Ok(())
}
///
fn action_quantified_expression(&mut self) -> Result<()> {
trace_action!(self, "quantified_expression");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let node = AstNode::QuantifiedContext(Box::new(lhs), Box::new(rhs));
self.yy_node_stack.push(node);
Ok(())
}
/// Reduces the variable name of quantified expression.
/// Variable name is located on the top of the `yy_value_stack`.
/// This name is pushed onto `yy_node_stack`.
fn action_quantified_expression_variable_name(&mut self) -> Result<()> {
trace_action!(self, "quantified_expression_variable_name");
if let TokenValue::Name(name) = &self.yy_value_stack[self.yy_value_stack.len() - 1] {
self.yy_node_stack.push(AstNode::Name(name.clone()));
// add this variable name to the temporary context present on top of the scope
self.yy_lexer.add_name_to_scope(name);
}
Ok(())
}
///
fn action_quantified_expression_variable_name_begin(&mut self) -> Result<()> {
trace_action!(self, "quantified_expression_variable_name_begin");
self.yy_lexer.set_till_in();
Ok(())
}
///
fn action_quantified_expressions_tail(&mut self) -> Result<()> {
trace_action!(self, "quantified_expressions_tail");
let node = self.yy_node_stack.pop().ok_or_else(err_pop)?;
if let AstNode::QuantifiedContexts(mut items) = node {
let item = self.yy_node_stack.pop().ok_or_else(err_pop)?;
items.insert(0, item);
self.yy_node_stack.push(AstNode::QuantifiedContexts(items));
return Ok(());
}
self.yy_node_stack.push(AstNode::QuantifiedContexts(vec![node]));
Ok(())
}
///
fn action_range_type(&mut self) -> Result<()> {
trace_action!(self, "range_type");
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::RangeType(Box::new(lhs)));
Ok(())
}
///
fn action_some(&mut self) -> Result<()> {
trace_action!(self, "some");
// pop temporary context from the top of the scope
self.yy_lexer.pop_from_scope();
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let satisfies = Box::new(AstNode::Satisfies(Box::new(rhs)));
self.yy_node_stack.push(AstNode::Some(Box::new(lhs), satisfies));
Ok(())
}
///
fn action_some_begin(&mut self) -> Result<()> |
///
fn action_subtraction(&mut self) -> Result<()> {
trace_action!(self, "subtraction");
let rhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
let lhs = self.yy_node_stack.pop().ok_or_else(err_pop)?;
self.yy_node_stack.push(AstNode::Sub(Box::new(lhs), Box::new(rhs)));
Ok(())
}
///
fn action_type_name(&mut self) -> Result<()> {
trace_action!(self, "type_name");
self.yy_lexer.set_type_name();
Ok(())
}
///
fn action_unary_tests_begin(&mut self) -> Result<()> {
trace_action!(self, "unary_tests_begin");
self.yy_lexer.set_unary_tests();
Ok(())
}
///
fn action_unary_tests_irrelevant(&mut self) -> Result<()> {
trace_action!(self, "unary_tests_irrelevant");
self.yy_node_stack.push(AstNode::Irrelevant);
Ok(())
}
///
fn action_unary_tests_negated(&mut self) -> Result<()> {
trace_action!(self, "unary_tests_negated");
if let Some(AstNode::ExpressionList(items)) = self.yy_node_stack.pop() {
self.yy_node_stack.push(AstNode::NegatedList(items));
}
Ok(())
}
}
/// Definitions of errors raised by [parser](crate::parser) module.
mod errors {
use dmntk_common::DmntkError;
/// Definition of errors raised by [Parser](super::Parser).
enum ParserError {
NotAName(String),
InvalidParseResult,
SyntaxError(String),
PopError,
}
impl From<ParserError> for DmntkError {
fn from(e: ParserError) -> Self {
DmntkError::new("ParserError", &format!("{}", e))
}
}
impl std::fmt::Display for ParserError {
///
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ParserError::NotAName(s) => {
write!(f, "expected `FEEL` name on input but found `{}`", s)
}
ParserError::InvalidParseResult => {
write!(f, "invalid parse result, expected non empty AST node as a result when parser accepts input")
}
ParserError::SyntaxError(input) => {
write!(f, "syntax error: {}", input)
}
ParserError::PopError => {
write!(f, "pop error")
}
}
}
}
///
pub fn not_a_name(s: &str) -> DmntkError {
ParserError::NotAName(s.to_string()).into()
}
///
pub fn invalid_parse_result() -> DmntkError {
ParserError::InvalidParseResult.into()
}
///
pub fn syntax_error(input: &str) -> DmntkError {
ParserError::SyntaxError(input.to_string()).into()
}
///
pub fn err_pop() -> DmntkError {
ParserError::PopError.into()
}
}
| {
trace_action!(self, "some_begin");
// push temporary context on the top of the scope,
// this context will be used to store
// local variable names of quantified expressions
self.yy_lexer.push_to_scope();
Ok(())
} |
func_noerror_builtin_module_test.py | """test import from a builtin module"""
from __future__ import absolute_import
__revision__ = None
from math import log10
def | ():
"""bla bla bla"""
return log10(2)
| log10_2 |
common.rs | //! Common module with common used structures across different
//! commands.
use crate::VERSION;
use clap::Clap;
use std::env;
use std::path::PathBuf;
#[derive(Debug, Clap, Clone)]
/// The WebAssembly features that can be passed through the
/// Command Line args.
pub struct WasmFeatures {
/// Enable support for the SIMD proposal.
#[clap(long = "enable-simd")]
pub simd: bool,
/// Enable support for the threads proposal.
#[clap(long = "enable-threads")]
pub threads: bool,
/// Enable support for the reference types proposal.
#[clap(long = "enable-reference-types")]
pub reference_types: bool,
/// Enable support for the multi value proposal.
#[clap(long = "enable-multi-value")]
pub multi_value: bool,
/// Enable support for the bulk memory proposal.
#[clap(long = "enable-bulk-memory")]
pub bulk_memory: bool,
/// Enable support for all pre-standard proposals.
#[clap(long = "enable-all")]
pub all: bool,
}
/// Get the cache dir
pub fn get_cache_dir() -> PathBuf {
match env::var("WASMER_CACHE_DIR") {
Ok(dir) => {
let mut path = PathBuf::from(dir);
path.push(VERSION);
path
}
Err(_) => {
// We use a temporal directory for saving cache files
let mut temp_dir = env::temp_dir();
temp_dir.push("wasmer"); | } | temp_dir.push(VERSION);
temp_dir
}
} |
poly.go | package types
import (
"bytes"
"strconv"
)
type Poly []int
func (p Poly) Trim() Poly {
size := len(p)
for i := 0; i < size; i++ {
if p[size-1-i] != 0 {
return p[:size-i]
}
}
return []int{0}
}
func (p Poly) String() string {
var b bytes.Buffer
for _, elem := range p {
b.WriteString(strconv.Itoa(elem))
b.WriteString(" ")
}
return b.String()
}
func (p *Poly) Equal(other *Poly) bool {
if len(*p) != len(*other) |
for i, elem := range *p {
if elem != (*other)[i] {
return false
}
}
return true
}
| {
return false
} |
admin.py | from django.contrib import admin
from .models import Chat
class ChatAdmin(admin.ModelAdmin):
|
admin.site.register(Chat, ChatAdmin)
| list_display = ("pk",) |
describe_regions.go | package hsm
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// DescribeRegions invokes the hsm.DescribeRegions API synchronously
// api document: https://help.aliyun.com/api/hsm/describeregions.html
func (client *Client) DescribeRegions(request *DescribeRegionsRequest) (response *DescribeRegionsResponse, err error) {
response = CreateDescribeRegionsResponse()
err = client.DoAction(request, response)
return
}
// DescribeRegionsWithChan invokes the hsm.DescribeRegions API asynchronously
// api document: https://help.aliyun.com/api/hsm/describeregions.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) DescribeRegionsWithChan(request *DescribeRegionsRequest) (<-chan *DescribeRegionsResponse, <-chan error) {
responseChan := make(chan *DescribeRegionsResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.DescribeRegions(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// DescribeRegionsWithCallback invokes the hsm.DescribeRegions API asynchronously
// api document: https://help.aliyun.com/api/hsm/describeregions.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) DescribeRegionsWithCallback(request *DescribeRegionsRequest, callback func(response *DescribeRegionsResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *DescribeRegionsResponse
var err error
defer close(result)
response, err = client.DescribeRegions(request)
callback(response, err)
result <- 1
})
if err != nil |
return result
}
// DescribeRegionsRequest is the request struct for api DescribeRegions
type DescribeRegionsRequest struct {
*requests.RpcRequest
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
SourceIp string `position:"Query" name:"SourceIp"`
}
// DescribeRegionsResponse is the response struct for api DescribeRegions
type DescribeRegionsResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Regions []Region `json:"Regions" xml:"Regions"`
}
// CreateDescribeRegionsRequest creates a request to invoke DescribeRegions API
func CreateDescribeRegionsRequest() (request *DescribeRegionsRequest) {
request = &DescribeRegionsRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("hsm", "2018-01-11", "DescribeRegions", "hsm", "openAPI")
return
}
// CreateDescribeRegionsResponse creates a response to parse from DescribeRegions response
func CreateDescribeRegionsResponse() (response *DescribeRegionsResponse) {
response = &DescribeRegionsResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| {
defer close(result)
callback(nil, err)
result <- 0
} |
parser_test.go | package parser
import (
"bytes"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"github.com/FredHutch/GitWebhookProxy/pkg/providers"
)
const (
parserGitlabTestSecret = "testSecret"
parserGitlabTestEvent = "testEvent"
parserGitlabTestBody = "testBody"
)
func createGitlabRequest(method string, path string, tokenHeader string,
eventHeader string, body string) *http.Request {
req := httptest.NewRequest(method, path, bytes.NewReader([]byte(body)))
req.Header.Add(providers.XGitlabToken, tokenHeader)
req.Header.Add(providers.XGitlabEvent, eventHeader)
req.Header.Add(providers.ContentTypeHeader, providers.DefaultContentTypeHeaderValue)
return req
}
func createRequestWithWrongHeaders(method string, path string, tokenHeader string,
eventHeader string, body string) *http.Request {
req := httptest.NewRequest(method, path, bytes.NewReader([]byte(body)))
req.Header.Add("X-Wrong-Token", tokenHeader)
req.Header.Add("X-Wrong-Event", eventHeader)
return req
}
func createGitlabProvider(secret string) providers.Provider {
provider, _ := providers.NewGitlabProvider(secret)
return provider | return &providers.Hook{
Headers: map[string]string{
providers.XGitlabToken: tokenHeader,
providers.XGitlabEvent: tokenEvent,
providers.ContentTypeHeader: providers.DefaultContentTypeHeaderValue,
},
Payload: []byte(body),
RequestMethod: method,
}
}
func TestParse(t *testing.T) {
type args struct {
req *http.Request
provider providers.Provider
}
tests := []struct {
name string
args args
want *providers.Hook
wantErr bool
}{
{
name: "TestParseWithCorrectRequestValues",
args: args{
req: createGitlabRequest(http.MethodPost, "/dummy", parserGitlabTestSecret,
parserGitlabTestEvent, parserGitlabTestBody),
provider: createGitlabProvider(parserGitlabTestSecret),
},
want: createGitlabHook(parserGitlabTestSecret, parserGitlabTestEvent, parserGitlabTestBody, http.MethodPost),
},
{
name: "TestParseWithEmptyTokenHeaderValue",
args: args{
req: createGitlabRequest(http.MethodPost, "/dummy", "",
parserGitlabTestEvent, parserGitlabTestBody),
provider: createGitlabProvider(parserGitlabTestSecret),
},
wantErr: true,
},
{
name: "TestParseWithNoEventHeaderValue",
args: args{
req: createGitlabRequest(http.MethodPost, "/dummy", parserGitlabTestSecret,
"", parserGitlabTestBody),
provider: createGitlabProvider(parserGitlabTestSecret),
},
wantErr: true,
},
{
name: "TestParseWithNoBody",
args: args{
req: createGitlabRequest(http.MethodPost, "/dummy", parserGitlabTestSecret,
parserGitlabTestEvent, ""),
provider: createGitlabProvider(parserGitlabTestSecret),
},
want: createGitlabHook(parserGitlabTestSecret, parserGitlabTestEvent, "", http.MethodPost),
},
{
name: "TestParseWithNoHeaders",
args: args{
req: httptest.NewRequest(http.MethodPost, "/dummy", bytes.NewReader([]byte(parserGitlabTestBody))),
provider: createGitlabProvider(parserGitlabTestSecret),
},
wantErr: true,
},
{
name: "TestParseWithWrongHeaderKeys",
args: args{
req: createRequestWithWrongHeaders(http.MethodPost, "/dummy", parserGitlabTestSecret,
parserGitlabTestEvent, parserGitlabTestBody),
provider: createGitlabProvider(parserGitlabTestSecret),
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Parse(tt.args.req, tt.args.provider)
if (err != nil) != tt.wantErr {
t.Errorf("Parse() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Parse() = %v, want %v", got, tt.want)
}
})
}
} | }
func createGitlabHook(tokenHeader string, tokenEvent string, body string, method string) *providers.Hook { |
__init__.py | """Provide functionality to stream video source.
Components use create_stream with a stream source (e.g. an rtsp url) to create
a new Stream object. Stream manages:
- Background work to fetch and decode a stream
- Desired output formats
- Home Assistant URLs for viewing a stream
- Access tokens for URLs for viewing a stream
A Stream consists of a background worker, and one or more output formats each
with their own idle timeout managed by the stream component. When an output
format is no longer in use, the stream component will expire it. When there
are no active output formats, the background worker is shut down and access
tokens are expired. Alternatively, a Stream can be configured with keepalive
to always keep workers active.
"""
from __future__ import annotations
import logging
import re
import secrets
import threading
import time
from types import MappingProxyType
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from .const import (
ATTR_ENDPOINTS,
ATTR_STREAMS,
DOMAIN,
HLS_PROVIDER,
MAX_SEGMENTS,
OUTPUT_IDLE_TIMEOUT,
RECORDER_PROVIDER,
STREAM_RESTART_INCREMENT,
STREAM_RESTART_RESET_TIME,
)
from .core import PROVIDERS, IdleTimer, StreamOutput
from .hls import async_setup_hls
_LOGGER = logging.getLogger(__name__)
STREAM_SOURCE_RE = re.compile("//.*:.*@")
def redact_credentials(data):
"""Redact credentials from string data."""
return STREAM_SOURCE_RE.sub("//****:****@", data)
def create_stream(hass, stream_source, options=None):
"""Create a stream with the specified identfier based on the source url.
The stream_source is typically an rtsp url and options are passed into
pyav / ffmpeg as options.
"""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
if options is None:
options = {}
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
stream = Stream(hass, stream_source, options=options)
hass.data[DOMAIN][ATTR_STREAMS].append(stream)
return stream
async def async_setup(hass, config):
"""Set up stream."""
# Set log level to error for libav
logging.getLogger("libav").setLevel(logging.ERROR)
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = []
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS][HLS_PROVIDER] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event):
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS]:
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
return True
class Stream:
"""Represents a single stream."""
def __init__(self, hass, source, options=None):
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self.keepalive = False
self.access_token = None
self._thread = None
self._thread_quit = threading.Event()
self._outputs: dict[str, StreamOutput] = {}
self._fast_restart_once = False
if self.options is None:
self.options = {}
def endpoint_url(self, fmt: str) -> str:
"""Start the stream and returns a url for the output format."""
if fmt not in self._outputs:
raise ValueError(f"Stream is not configured for format '{fmt}'")
if not self.access_token:
self.access_token = secrets.token_hex()
return self.hass.data[DOMAIN][ATTR_ENDPOINTS][fmt].format(self.access_token)
def outputs(self):
"""Return a copy of the stream outputs."""
# A copy is returned so the caller can iterate through the outputs
# without concern about self._outputs being modified from another thread.
return MappingProxyType(self._outputs.copy())
def add_provider(self, fmt, timeout=OUTPUT_IDLE_TIMEOUT):
"""Add provider output stream."""
if not self._outputs.get(fmt):
@callback
def idle_callback():
if (
not self.keepalive or fmt == RECORDER_PROVIDER
) and fmt in self._outputs:
self.remove_provider(self._outputs[fmt])
self.check_idle()
provider = PROVIDERS[fmt](
self.hass, IdleTimer(self.hass, timeout, idle_callback)
)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider):
"""Remove provider output stream."""
if provider.name in self._outputs:
self._outputs[provider.name].cleanup()
del self._outputs[provider.name]
if not self._outputs:
self.stop()
def check_idle(self):
"""Reset access token if all providers are idle."""
if all(p.idle for p in self._outputs.values()):
self.access_token = None
def start(self):
"""Start a stream."""
if self._thread is None or not self._thread.is_alive():
if self._thread is not None:
# The thread must have crashed/exited. Join to clean up the
# previous thread.
self._thread.join(timeout=0)
self._thread_quit.clear()
self._thread = threading.Thread(
name="stream_worker",
target=self._run_worker,
)
self._thread.start()
_LOGGER.info("Started stream: %s", redact_credentials(str(self.source)))
def | (self, new_source):
"""Restart the stream with a new stream source."""
_LOGGER.debug("Updating stream source %s", new_source)
self.source = new_source
self._fast_restart_once = True
self._thread_quit.set()
def _run_worker(self):
"""Handle consuming streams and restart keepalive streams."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .worker import SegmentBuffer, stream_worker
segment_buffer = SegmentBuffer(self.outputs)
wait_timeout = 0
while not self._thread_quit.wait(timeout=wait_timeout):
start_time = time.time()
stream_worker(self.source, self.options, segment_buffer, self._thread_quit)
segment_buffer.discontinuity()
if not self.keepalive or self._thread_quit.is_set():
if self._fast_restart_once:
# The stream source is updated, restart without any delay.
self._fast_restart_once = False
self._thread_quit.clear()
continue
break
# To avoid excessive restarts, wait before restarting
# As the required recovery time may be different for different setups, start
# with trying a short wait_timeout and increase it on each reconnection attempt.
# Reset the wait_timeout after the worker has been up for several minutes
if time.time() - start_time > STREAM_RESTART_RESET_TIME:
wait_timeout = 0
wait_timeout += STREAM_RESTART_INCREMENT
_LOGGER.debug(
"Restarting stream worker in %d seconds: %s",
wait_timeout,
self.source,
)
self._worker_finished()
def _worker_finished(self):
"""Schedule cleanup of all outputs."""
@callback
def remove_outputs():
for provider in self.outputs().values():
self.remove_provider(provider)
self.hass.loop.call_soon_threadsafe(remove_outputs)
def stop(self):
"""Remove outputs and access token."""
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self):
"""Stop worker thread."""
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
_LOGGER.info("Stopped stream: %s", redact_credentials(str(self.source)))
async def async_record(self, video_path, duration=30, lookback=5):
"""Make a .mp4 recording from a provided stream."""
# Check for file access
if not self.hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Add recorder
recorder = self.outputs().get(RECORDER_PROVIDER)
if recorder:
raise HomeAssistantError(
f"Stream already recording to {recorder.video_path}!"
)
recorder = self.add_provider(RECORDER_PROVIDER, timeout=duration)
recorder.video_path = video_path
self.start()
_LOGGER.debug("Started a stream recording of %s seconds", duration)
# Take advantage of lookback
hls = self.outputs().get(HLS_PROVIDER)
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), MAX_SEGMENTS)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segments())[-num_segments:])
| update_source |
test_switch.py | """Tests for HomematicIP Cloud switch."""
from openpeerpower.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from openpeerpower.components.homematicip_cloud.generic_entity import (
ATTR_GROUP_MEMBER_UNREACHABLE,
)
from openpeerpower.components.switch import (
ATTR_CURRENT_POWER_W,
ATTR_TODAY_ENERGY_KWH,
DOMAIN as SWITCH_DOMAIN,
)
from openpeerpower.const import STATE_OFF, STATE_ON
from openpeerpower.setup import async_setup_component
from .helper import async_manipulate_test_data, get_and_check_entity_basics
async def test_manually_configured_platform(opp):
"""Test that we do not set up an access point."""
assert await async_setup_component(
opp, SWITCH_DOMAIN, {SWITCH_DOMAIN: {"platform": HMIPC_DOMAIN}}
)
assert not opp.data.get(HMIPC_DOMAIN)
async def test_hmip_switch(opp, default_mock_hap_factory):
"""Test HomematicipSwitch."""
entity_id = "switch.schrank"
entity_name = "Schrank"
device_model = "HMIP-PS"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
async def test_hmip_switch_input(opp, default_mock_hap_factory):
|
async def test_hmip_switch_measuring(opp, default_mock_hap_factory):
"""Test HomematicipSwitchMeasuring."""
entity_id = "switch.pc"
entity_name = "Pc"
device_model = "HMIP-PSM"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
await async_manipulate_test_data(opp, hmip_device, "currentPowerConsumption", 50)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
assert ha_state.attributes[ATTR_CURRENT_POWER_W] == 50
assert ha_state.attributes[ATTR_TODAY_ENERGY_KWH] == 36
await async_manipulate_test_data(opp, hmip_device, "energyCounter", None)
ha_state = opp.states.get(entity_id)
assert not ha_state.attributes.get(ATTR_TODAY_ENERGY_KWH)
async def test_hmip_group_switch(opp, default_mock_hap_factory):
"""Test HomematicipGroupSwitch."""
entity_id = "switch.strom_group"
entity_name = "Strom Group"
device_model = None
mock_hap = await default_mock_hap_factory.async_get_mock_hap(test_groups=["Strom"])
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == ()
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
assert not ha_state.attributes.get(ATTR_GROUP_MEMBER_UNREACHABLE)
await async_manipulate_test_data(opp, hmip_device, "unreach", True)
ha_state = opp.states.get(entity_id)
assert ha_state.attributes[ATTR_GROUP_MEMBER_UNREACHABLE]
async def test_hmip_multi_switch(opp, default_mock_hap_factory):
"""Test HomematicipMultiSwitch."""
entity_id = "switch.jalousien_1_kizi_2_schlazi_channel1"
entity_name = "Jalousien - 1 KiZi, 2 SchlaZi Channel1"
device_model = "HmIP-PCBS2"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[
"Jalousien - 1 KiZi, 2 SchlaZi",
"Multi IO Box",
"Heizungsaktor",
"ioBroker",
"Schaltaktor Verteiler",
]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_OFF
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
ha_state, hmip_device = get_and_check_entity_basics(
opp,
mock_hap,
"switch.schaltaktor_verteiler_channel3",
"Schaltaktor Verteiler Channel3",
"HmIP-DRSI4",
)
assert ha_state.state == STATE_OFF
async def test_hmip_wired_multi_switch(opp, default_mock_hap_factory):
"""Test HomematicipMultiSwitch."""
entity_id = "switch.fernseher_wohnzimmer"
entity_name = "Fernseher (Wohnzimmer)"
device_model = "HmIPW-DRS8"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[
"Wired Schaltaktor – 8-fach",
]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON
| """Test HomematicipSwitch."""
entity_id = "switch.wohnzimmer_beleuchtung"
entity_name = "Wohnzimmer Beleuchtung"
device_model = "HmIP-FSI16"
mock_hap = await default_mock_hap_factory.async_get_mock_hap(
test_devices=[entity_name]
)
ha_state, hmip_device = get_and_check_entity_basics(
opp, mock_hap, entity_id, entity_name, device_model
)
assert ha_state.state == STATE_ON
service_call_counter = len(hmip_device.mock_calls)
await opp.services.async_call(
"switch", "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 1
assert hmip_device.mock_calls[-1][0] == "turn_off"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", False)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_OFF
await opp.services.async_call(
"switch", "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(hmip_device.mock_calls) == service_call_counter + 3
assert hmip_device.mock_calls[-1][0] == "turn_on"
assert hmip_device.mock_calls[-1][1] == (1,)
await async_manipulate_test_data(opp, hmip_device, "on", True)
ha_state = opp.states.get(entity_id)
assert ha_state.state == STATE_ON |
streamable.py | # flake8: noqa
# pylint: disable
from __future__ import annotations
import dataclasses
import io
import pprint
import sys
from enum import Enum
from typing import Any, BinaryIO, Dict, List, Tuple, Type, Callable, Optional, Iterator
from blspy import G1Element, G2Element, PrivateKey
from wheat.types.blockchain_format.program import Program, SerializedProgram
from wheat.types.blockchain_format.sized_bytes import bytes32
from wheat.util.byte_types import hexstr_to_bytes
from wheat.util.hash import std_hash
from wheat.util.ints import int64, int512, uint32, uint64, uint128
from wheat.util.type_checking import is_type_List, is_type_SpecificOptional, is_type_Tuple, strictdataclass
if sys.version_info < (3, 8):
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
return getattr(t, "__args__", ())
else:
from typing import get_args
pp = pprint.PrettyPrinter(indent=1, width=120, compact=True)
# TODO: Remove hack, this allows streaming these objects from binary
size_hints = {
"PrivateKey": PrivateKey.PRIVATE_KEY_SIZE,
"G1Element": G1Element.SIZE,
"G2Element": G2Element.SIZE,
"ConditionOpcode": 1,
} | PrivateKey,
G1Element,
G2Element,
Program,
SerializedProgram,
]
# JSON does not support big ints, so these types must be serialized differently in JSON
big_ints = [uint64, int64, uint128, int512]
def dataclass_from_dict(klass, d):
"""
Converts a dictionary based on a dataclass, into an instance of that dataclass.
Recursively goes through lists, optionals, and dictionaries.
"""
if is_type_SpecificOptional(klass):
# Type is optional, data is either None, or Any
if not d:
return None
return dataclass_from_dict(get_args(klass)[0], d)
elif is_type_Tuple(klass):
# Type is tuple, can have multiple different types inside
i = 0
klass_properties = []
for item in d:
klass_properties.append(dataclass_from_dict(klass.__args__[i], item))
i = i + 1
return tuple(klass_properties)
elif dataclasses.is_dataclass(klass):
# Type is a dataclass, data is a dictionary
fieldtypes = {f.name: f.type for f in dataclasses.fields(klass)}
return klass(**{f: dataclass_from_dict(fieldtypes[f], d[f]) for f in d})
elif is_type_List(klass):
# Type is a list, data is a list
return [dataclass_from_dict(get_args(klass)[0], item) for item in d]
elif issubclass(klass, bytes):
# Type is bytes, data is a hex string
return klass(hexstr_to_bytes(d))
elif klass in unhashable_types:
# Type is unhashable (bls type), so cast from hex string
return klass.from_bytes(hexstr_to_bytes(d))
else:
# Type is a primitive, cast with correct class
return klass(d)
def recurse_jsonify(d):
"""
Makes bytes objects and unhashable types into strings with 0x, and makes large ints into
strings.
"""
if isinstance(d, list) or isinstance(d, tuple):
new_list = []
for item in d:
if type(item) in unhashable_types or issubclass(type(item), bytes):
item = f"0x{bytes(item).hex()}"
if isinstance(item, dict):
item = recurse_jsonify(item)
if isinstance(item, list):
item = recurse_jsonify(item)
if isinstance(item, tuple):
item = recurse_jsonify(item)
if isinstance(item, Enum):
item = item.name
if isinstance(item, int) and type(item) in big_ints:
item = int(item)
new_list.append(item)
d = new_list
else:
for key, value in d.items():
if type(value) in unhashable_types or issubclass(type(value), bytes):
d[key] = f"0x{bytes(value).hex()}"
if isinstance(value, dict):
d[key] = recurse_jsonify(value)
if isinstance(value, list):
d[key] = recurse_jsonify(value)
if isinstance(value, tuple):
d[key] = recurse_jsonify(value)
if isinstance(value, Enum):
d[key] = value.name
if isinstance(value, int) and type(value) in big_ints:
d[key] = int(value)
return d
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS = {}
def streamable(cls: Any):
"""
This is a decorator for class definitions. It applies the strictdataclass decorator,
which checks all types at construction. It also defines a simple serialization format,
and adds parse, from bytes, stream, and __bytes__ methods.
Serialization format:
- Each field is serialized in order, by calling from_bytes/__bytes__.
- For Lists, there is a 4 byte prefix for the list length.
- For Optionals, there is a one byte prefix, 1 iff object is present, 0 iff not.
All of the constituents must have parse/from_bytes, and stream/__bytes__ and therefore
be of fixed size. For example, int cannot be a constituent since it is not a fixed size,
whereas uint32 can be.
Furthermore, a get_hash() member is added, which performs a serialization and a sha256.
This class is used for deterministic serialization and hashing, for consensus critical
objects such as the block header.
Make sure to use the Streamable class as a parent class when using the streamable decorator,
as it will allow linters to recognize the methods that are added by the decorator. Also,
use the @dataclass(frozen=True) decorator as well, for linters to recognize constructor
arguments.
"""
cls1 = strictdataclass(cls)
t = type(cls.__name__, (cls1, Streamable), {})
parse_functions = []
try:
fields = cls1.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for _, f_type in fields.items():
parse_functions.append(cls.function_to_parse_one_item(f_type))
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[t] = parse_functions
return t
def parse_bool(f: BinaryIO) -> bool:
bool_byte = f.read(1)
assert bool_byte is not None and len(bool_byte) == 1 # Checks for EOF
if bool_byte == bytes([0]):
return False
elif bool_byte == bytes([1]):
return True
else:
raise ValueError("Bool byte must be 0 or 1")
def parse_optional(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> Optional[Any]:
is_present_bytes = f.read(1)
assert is_present_bytes is not None and len(is_present_bytes) == 1 # Checks for EOF
if is_present_bytes == bytes([0]):
return None
elif is_present_bytes == bytes([1]):
return parse_inner_type_f(f)
else:
raise ValueError("Optional must be 0 or 1")
def parse_bytes(f: BinaryIO) -> bytes:
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4 # Checks for EOF
list_size: uint32 = uint32(int.from_bytes(list_size_bytes, "big"))
bytes_read = f.read(list_size)
assert bytes_read is not None and len(bytes_read) == list_size
return bytes_read
def parse_list(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> List[Any]:
full_list: List = []
# wjb assert inner_type != get_args(List)[0]
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4 # Checks for EOF
list_size = uint32(int.from_bytes(list_size_bytes, "big"))
for list_index in range(list_size):
full_list.append(parse_inner_type_f(f))
return full_list
def parse_tuple(f: BinaryIO, list_parse_inner_type_f: List[Callable[[BinaryIO], Any]]) -> Tuple[Any, ...]:
full_list = []
for parse_f in list_parse_inner_type_f:
full_list.append(parse_f(f))
return tuple(full_list)
def parse_size_hints(f: BinaryIO, f_type: Type, bytes_to_read: int) -> Any:
bytes_read = f.read(bytes_to_read)
assert bytes_read is not None and len(bytes_read) == bytes_to_read
return f_type.from_bytes(bytes_read)
def parse_str(f: BinaryIO) -> str:
str_size_bytes = f.read(4)
assert str_size_bytes is not None and len(str_size_bytes) == 4 # Checks for EOF
str_size: uint32 = uint32(int.from_bytes(str_size_bytes, "big"))
str_read_bytes = f.read(str_size)
assert str_read_bytes is not None and len(str_read_bytes) == str_size # Checks for EOF
return bytes.decode(str_read_bytes, "utf-8")
class Streamable:
@classmethod
def function_to_parse_one_item(cls: Type[cls.__name__], f_type: Type): # type: ignore
"""
This function returns a function taking one argument `f: BinaryIO` that parses
and returns a value of the given type.
"""
inner_type: Type
if f_type is bool:
return parse_bool
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_optional(f, parse_inner_type_f)
if hasattr(f_type, "parse"):
return f_type.parse
if f_type == bytes:
return parse_bytes
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_list(f, parse_inner_type_f)
if is_type_Tuple(f_type):
inner_types = get_args(f_type)
list_parse_inner_type_f = [cls.function_to_parse_one_item(_) for _ in inner_types]
return lambda f: parse_tuple(f, list_parse_inner_type_f)
if hasattr(f_type, "from_bytes") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read)
if f_type is str:
return parse_str
raise NotImplementedError(f"Type {f_type} does not have parse")
@classmethod
def parse(cls: Type[cls.__name__], f: BinaryIO) -> cls.__name__: # type: ignore
# Create the object without calling __init__() to avoid unnecessary post-init checks in strictdataclass
obj: Streamable = object.__new__(cls)
fields: Iterator[str] = iter(getattr(cls, "__annotations__", {}))
values: Iterator = (parse_f(f) for parse_f in PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[cls])
for field, value in zip(fields, values):
object.__setattr__(obj, field, value)
# Use -1 as a sentinel value as it's not currently serializable
if next(fields, -1) != -1:
raise ValueError("Failed to parse incomplete Streamable object")
if next(values, -1) != -1:
raise ValueError("Failed to parse unknown data in Streamable object")
return obj
def stream_one_item(self, f_type: Type, item, f: BinaryIO) -> None:
inner_type: Type
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
if item is None:
f.write(bytes([0]))
else:
f.write(bytes([1]))
self.stream_one_item(inner_type, item, f)
elif f_type == bytes:
f.write(uint32(len(item)).to_bytes(4, "big"))
f.write(item)
elif hasattr(f_type, "stream"):
item.stream(f)
elif hasattr(f_type, "__bytes__"):
f.write(bytes(item))
elif is_type_List(f_type):
assert is_type_List(type(item))
f.write(uint32(len(item)).to_bytes(4, "big"))
inner_type = get_args(f_type)[0]
# wjb assert inner_type != get_args(List)[0] # type: ignore
for element in item:
self.stream_one_item(inner_type, element, f)
elif is_type_Tuple(f_type):
inner_types = get_args(f_type)
assert len(item) == len(inner_types)
for i in range(len(item)):
self.stream_one_item(inner_types[i], item[i], f)
elif f_type is str:
str_bytes = item.encode("utf-8")
f.write(uint32(len(str_bytes)).to_bytes(4, "big"))
f.write(str_bytes)
elif f_type is bool:
f.write(int(item).to_bytes(1, "big"))
else:
raise NotImplementedError(f"can't stream {item}, {f_type}")
def stream(self, f: BinaryIO) -> None:
try:
fields = self.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for f_name, f_type in fields.items():
self.stream_one_item(f_type, getattr(self, f_name), f)
def get_hash(self) -> bytes32:
return bytes32(std_hash(bytes(self)))
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
f = io.BytesIO(blob)
parsed = cls.parse(f)
assert f.read() == b""
return parsed
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
def __str__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def __repr__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def to_json_dict(self) -> Dict:
return recurse_jsonify(dataclasses.asdict(self))
@classmethod
def from_json_dict(cls: Any, json_dict: Dict) -> Any:
return dataclass_from_dict(cls, json_dict) | unhashable_types = [ |
tree_diameter.py | def tree_diameter(t: networkx.Graph):
if __debug__:
assert networkx.is_tree(t)
v, _ = dfs(t)
_, longest_path_length = dfs(t, v)
return longest_path_length | import networkx
from algorithms.dfs import dfs
|
|
SetInterval.py | import logging
import threading
import time
from typing import Any |
class SetInterval:
def __init__(self, interval: float, action: Any) -> None:
"""コンストラクタ
Args:
interval (float): 呼び出し間隔
action (Any): 呼ぶ出す関数
"""
logging.info("init")
self.interval = interval
self.action = action
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.__set_interval)
self.thread.start()
def __set_interval(self) -> None:
"""スレッド処理"""
next_time = time.time() + self.interval
while not self.stopEvent.wait(next_time - time.time()):
next_time += self.interval
self.action()
# t.daemon = True
def cancel(self) -> None:
"""スレッドを止める"""
logging.info("cancel")
self.stopEvent.set() | |
deny_supplement.go | package trademark
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// DenySupplement invokes the trademark.DenySupplement API synchronously
// api document: https://help.aliyun.com/api/trademark/denysupplement.html
func (client *Client) DenySupplement(request *DenySupplementRequest) (response *DenySupplementResponse, err error) {
response = CreateDenySupplementResponse()
err = client.DoAction(request, response)
return
}
// DenySupplementWithChan invokes the trademark.DenySupplement API asynchronously
// api document: https://help.aliyun.com/api/trademark/denysupplement.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) DenySupplementWithChan(request *DenySupplementRequest) (<-chan *DenySupplementResponse, <-chan error) {
responseChan := make(chan *DenySupplementResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.DenySupplement(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// DenySupplementWithCallback invokes the trademark.DenySupplement API asynchronously
// api document: https://help.aliyun.com/api/trademark/denysupplement.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) DenySupplementWithCallback(request *DenySupplementRequest, callback func(response *DenySupplementResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *DenySupplementResponse
var err error
defer close(result)
response, err = client.DenySupplement(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// DenySupplementRequest is the request struct for api DenySupplement
type DenySupplementRequest struct {
*requests.RpcRequest
Id requests.Integer `position:"Query" name:"Id"`
}
// DenySupplementResponse is the response struct for api DenySupplement
type DenySupplementResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
ErrorMsg string `json:"ErrorMsg" xml:"ErrorMsg"`
ErrorCode string `json:"ErrorCode" xml:"ErrorCode"`
}
// CreateDenySupplementRequest creates a request to invoke DenySupplement API
func | () (request *DenySupplementRequest) {
request = &DenySupplementRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Trademark", "2018-07-24", "DenySupplement", "trademark", "openAPI")
return
}
// CreateDenySupplementResponse creates a response to parse from DenySupplement response
func CreateDenySupplementResponse() (response *DenySupplementResponse) {
response = &DenySupplementResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| CreateDenySupplementRequest |
peripheral.py | # Generic memory-mapped peripheral interface.
#
# Luz micro-controller simulator
# Eli Bendersky (C) 2008-2010
#
class Peripheral(object):
""" An abstract memory-mapped perhipheral interface.
Memory-mapped peripherals are accessed through memory
reads and writes.
The address given to reads and writes is relative to the
peripheral's memory map.
Width is 1, 2, 4 for byte, halfword and word accesses.
"""
def | (self, addr, width):
raise NotImplementedError()
def write_mem(self, addr, width, data):
raise NotImplementedError()
| read_mem |
clippy1.rs | // clippy1.rs
// The Clippy tool is a collection of lints to analyze your code
// so you can catch common mistakes and improve your Rust code.
//
// For these exercises the code will fail to compile when there are clippy warnings
// check clippy's suggestions from the output to solve the exercise.
// Execute `rustlings hint clippy1` for hints :)
fn | () {
let x = 1.2331f64;
let y = 1.2332f64;
let error_margin = f64::EPSILON;
if (y - x).abs() > error_margin {
println!("Success!");
}
}
| main |
move_verification_test.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use move_cli::package::prover::ProverTest;
#[test]
fn prove_stdlib() {
ProverTest::create(".").run()
}
#[test]
fn prove_nursery() | {
ProverTest::create("nursery").run()
} |
|
main.rs | fn constant_declaration() {
// Constant declaration
const A_MILLION: u32 = 1_000_000;
println!("A million = {}", A_MILLION);
}
fn | () {
// Shadowing
let x = 1;
println!("{}", x);
let x = "one";
println!("{}", x);
let x = 1.0;
println!("{}", x);
}
fn integers() {
// 8-bit
let x: i8 = 0;
println!("{}", x);
let x: u8 = 1;
println!("{}", x);
// 16-bit
let x: i16 = 0;
println!("{}", x);
let x: u16 = 1;
println!("{}", x);
// 32-bit
let x: i32 = 0;
println!("{}", x);
let x: u32 = 1;
println!("{}", x);
// 64-bit
let x: i64 = 0;
println!("{}", x);
let x: u64 = 1;
println!("{}", x);
// 128-bit
let x: i128 = 0;
println!("{}", x);
let x: u128 = 1;
println!("{}", x);
// arch
let x: isize = 0;
println!("{}", x);
let x: usize = 1;
println!("{}", x);
}
fn floating_points() {
// Floating point
let x = 0.0; // f64
println!("{}", x);
let x: f32 = 0.0;
println!("{}", x);
}
fn literals() {
// Decimal
println!("{}", 98_222);
// Hex
println!("{}", 0xff);
// Octal
println!("{}", 0o77);
// Binary
println!("{}", 0b1111_0000);
// Byte (u8 only)
println!("{}", b'A');
}
fn tuples() {
// Tuple
let tup: (i32, f64, u8) = (500, 6.4, 1);
println!("{} {} {}", tup.0, tup.1, tup.2);
// Destructuring
let (x, y, z) = tup;
println!("{} {} {}", x, y, z);
}
fn arrays() {
let a = [1, 2, 3, 4, 5];
println!("{}", a[0]);
// Array of 5 u32
let a: [u32; 5] = [1, 2, 3, 4, 5];
println!("{}", a[1]);
// Array of 2 elements initialized to 3
let a = [3; 2];
println!("{} {}", a[0], a[1]);
}
fn main() {
constant_declaration();
shadowing();
integers();
floating_points();
literals();
tuples();
arrays();
}
| shadowing |
validate_error_bag_test.go | package validator
import (
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestSet_SetGetError(t *testing.T) {
t.Run("ok", func(t *testing.T) {
bag := NewValidateErrorBag()
errMsg := errors.New("error")
bag.SetError("test", "test summary", errMsg)
error, errorExists := bag.GetError("test")
assert.True(t, errorExists) | })
}
func TestSet_ContainsError(t *testing.T) {
t.Run("ok", func(t *testing.T) {
bag := NewValidateErrorBag()
errMsg := errors.New("error")
bag.SetError("test", "test summary", errMsg)
assert.True(t, bag.ContainsError("test", errMsg))
assert.False(t, bag.ContainsError("not in", errMsg))
})
} | assert.Equal(t, "test summary", error.Summary) |
siege.py | """
A script for generating siege files with a bunch of URL variations.
"""
import re
import sys
part_re = re.compile(r'\{([-\w]+)\}')
AMO_LANGUAGES = (
'af', 'ar', 'ca', 'cs', 'da', 'de', 'el', 'en-US', 'es', 'eu', 'fa', 'fi',
'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko', 'mn', 'nl', 'pl',
'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sq', 'sr', 'sv-SE', 'uk', 'vi',
'zh-CN', 'zh-TW',
)
config = {
'base': [],
'locale': AMO_LANGUAGES,
'app': ['firefox'],
'extension-slug': [''] + """
alerts-and-updates appearance bookmarks download-management
feeds-news-blogging language-support photos-music-videos
privacy-security social-communication tabs toolbars web-development
other""".split(),
'theme-slug': [''] + """
animals compact large miscellaneous modern nature os-integration retro
sports""".split(),
'theme-sort': 'name updated created downloads rating'.split(),
'page': '1 2'.split(),
'exp': 'on off'.split(),
'personas-slug': [''] + """
abstract causes fashion firefox foxkeh holiday music nature other
scenery seasonal solid sports websites""".split(),
'personas-sort': """up-and-coming created popular rating""".split()
}
root = '{base}/{locale}/{app}'
templates = t = {
'root': '/',
'extensions': '/extensions/{extension-slug}/',
'language-tools': '/language-tools',
'themes': '/themes/{theme-slug}?sort={theme-sort}&page={page}',
'personas': '/personas/{personas-slug}',
}
t['themes-unreviewed'] = t['themes'] + '&unreviewed={exp}'
t['personas-sort'] = t['personas'] + '?sort={personas-sort}'
t['extensions-sort'] = t['extensions'] + '?sort={theme-sort}'
t['extensions-featured'] = t['extensions'] + 'featured'
for key, value in templates.items():
templates[key] = root + value
def combos(s, parts):
def | (s, parts, kw):
key, rest = parts[0], parts[1:]
rv = []
for opt in config[key]:
kw[key] = opt
if not rest:
rv.append(s.format(**kw))
else:
rv.extend(_rec(s, rest, kw))
return rv
return _rec(s, parts, {})
def gen(choices=templates):
rv = []
for template in choices:
parts = part_re.findall(template)
rv.extend(combos(template, parts))
return rv
def main():
args = sys.argv
try:
base, choices = sys.argv[1], args[2:] or templates.keys()
except IndexError:
print 'Usage: python siege.py <BASE> [%s]' % (', '.join(templates))
print '\nBASE should be something like "http://localhost:8000/z".'
print 'The remaining arguments are names of url templates.'
sys.exit(1)
config['base'] = [base.rstrip('/')]
print '\n'.join(gen(templates[k] for k in choices))
if __name__ == '__main__':
main()
| _rec |
main.rs | mod example_application_window;
use gtk::prelude::*;
use example_application_window::ExampleApplicationWindow;
fn main() | {
let application = gtk::Application::new(
Some("com.github.gtk-rs.examples.composite_template"),
Default::default(),
)
.expect("Failed to initialize application");
application.connect_activate(|app| {
let win = ExampleApplicationWindow::new(app);
win.show();
});
application.run();
} |
|
buildah.go | package buildah
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
"github.com/containers/buildah/define"
"github.com/containers/buildah/docker"
"github.com/containers/image/v5/types"
encconfig "github.com/containers/ocicrypt/config"
"github.com/containers/storage"
"github.com/containers/storage/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// Package is the name of this package, used in help output and to
// identify working containers.
Package = define.Package
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
Version = define.Version
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
// that data structure, as it's used to distinguish containers which
// are "ours" from ones that aren't.
containerType = Package + " 0.0.1"
// The file in the per-container directory which we use to store our
// per-container state. If it isn't there, then the container isn't
// one of our build containers.
stateFile = Package + ".json"
)
// PullPolicy takes the value PullIfMissing, PullAlways, PullIfNewer, or PullNever.
type PullPolicy = define.PullPolicy
const (
// PullIfMissing is one of the values that BuilderOptions.PullPolicy
// can take, signalling that the source image should be pulled from a
// registry if a local copy of it is not already present.
PullIfMissing = define.PullIfMissing
// PullAlways is one of the values that BuilderOptions.PullPolicy can
// take, signalling that a fresh, possibly updated, copy of the image
// should be pulled from a registry before the build proceeds.
PullAlways = define.PullAlways
// PullIfNewer is one of the values that BuilderOptions.PullPolicy
// can take, signalling that the source image should only be pulled
// from a registry if a local copy is not already present or if a
// newer version the image is present on the repository.
PullIfNewer = define.PullIfNewer
// PullNever is one of the values that BuilderOptions.PullPolicy can
// take, signalling that the source image should not be pulled from a
// registry if a local copy of it is not already present.
PullNever = define.PullNever
)
// NetworkConfigurationPolicy takes the value NetworkDefault, NetworkDisabled,
// or NetworkEnabled.
type NetworkConfigurationPolicy = define.NetworkConfigurationPolicy
const (
// NetworkDefault is one of the values that BuilderOptions.ConfigureNetwork
// can take, signalling that the default behavior should be used.
NetworkDefault = define.NetworkDefault
// NetworkDisabled is one of the values that BuilderOptions.ConfigureNetwork
// can take, signalling that network interfaces should NOT be configured for
// newly-created network namespaces.
NetworkDisabled = define.NetworkDisabled
// NetworkEnabled is one of the values that BuilderOptions.ConfigureNetwork
// can take, signalling that network interfaces should be configured for
// newly-created network namespaces.
NetworkEnabled = define.NetworkEnabled
)
// Builder objects are used to represent containers which are being used to
// build images. They also carry potential updates which will be applied to
// the image's configuration when the container's contents are used to build an
// image.
type Builder struct {
store storage.Store
// Logger is the logrus logger to write log messages with
Logger *logrus.Logger `json:"-"`
// Args define variables that users can pass at build-time to the builder
Args map[string]string
// Type is used to help identify a build container's metadata. It
// should not be modified.
Type string `json:"type"`
// FromImage is the name of the source image which was used to create
// the container, if one was used. It should not be modified.
FromImage string `json:"image,omitempty"`
// FromImageID is the ID of the source image which was used to create
// the container, if one was used. It should not be modified.
FromImageID string `json:"image-id"`
// FromImageDigest is the digest of the source image which was used to
// create the container, if one was used. It should not be modified.
FromImageDigest string `json:"image-digest"`
// Config is the source image's configuration. It should not be
// modified.
Config []byte `json:"config,omitempty"`
// Manifest is the source image's manifest. It should not be modified.
Manifest []byte `json:"manifest,omitempty"`
// Container is the name of the build container. It should not be modified.
Container string `json:"container-name,omitempty"`
// ContainerID is the ID of the build container. It should not be modified.
ContainerID string `json:"container-id,omitempty"`
// MountPoint is the last location where the container's root
// filesystem was mounted. It should not be modified.
MountPoint string `json:"mountpoint,omitempty"`
// ProcessLabel is the SELinux process label associated with the container
ProcessLabel string `json:"process-label,omitempty"`
// MountLabel is the SELinux mount label associated with the container
MountLabel string `json:"mount-label,omitempty"`
// ImageAnnotations is a set of key-value pairs which is stored in the
// image's manifest.
ImageAnnotations map[string]string `json:"annotations,omitempty"`
// ImageCreatedBy is a description of how this container was built.
ImageCreatedBy string `json:"created-by,omitempty"`
// ImageHistoryComment is a description of how our added layers were built.
ImageHistoryComment string `json:"history-comment,omitempty"`
// Image metadata and runtime settings, in multiple formats.
OCIv1 v1.Image `json:"ociv1,omitempty"`
Docker docker.V2Image `json:"docker,omitempty"`
// DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format.
DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"`
// Isolation controls how we handle "RUN" statements and the Run() method.
Isolation define.Isolation
// NamespaceOptions controls how we set up the namespaces for processes that we run in the container.
NamespaceOptions define.NamespaceOptions
// ConfigureNetwork controls whether or not network interfaces and
// routing are configured for a new network namespace (i.e., when not
// joining another's namespace and not just using the host's
// namespace), effectively deciding whether or not the process has a
// usable network.
ConfigureNetwork define.NetworkConfigurationPolicy
// CNIPluginPath is the location of CNI plugin helpers, if they should be
// run from a location other than the default location.
CNIPluginPath string
// CNIConfigDir is the location of CNI configuration files, if the files in
// the default configuration directory shouldn't be used.
CNIConfigDir string
// ID mapping options to use when running processes in the container with non-host user namespaces.
IDMappingOptions define.IDMappingOptions
// Capabilities is a list of capabilities to use when running commands in the container.
Capabilities []string
// PrependedEmptyLayers are history entries that we'll add to a
// committed image, after any history items that we inherit from a base
// image, but before the history item for the layer that we're
// committing.
PrependedEmptyLayers []v1.History
// AppendedEmptyLayers are history entries that we'll add to a
// committed image after the history item for the layer that we're
// committing.
AppendedEmptyLayers []v1.History
CommonBuildOpts *define.CommonBuildOptions
// TopLayer is the top layer of the image
TopLayer string
// Format for the build Image
Format string
// TempVolumes are temporary mount points created during container runs
TempVolumes map[string]bool
// ContentDigester counts the digest of all Add()ed content
ContentDigester CompositeDigester
// Devices are the additional devices to add to the containers
Devices define.ContainerDevices
}
// BuilderInfo are used as objects to display container information
type BuilderInfo struct {
Type string
FromImage string
FromImageID string
FromImageDigest string
Config string
Manifest string
Container string
ContainerID string
MountPoint string
ProcessLabel string
MountLabel string
ImageAnnotations map[string]string
ImageCreatedBy string
OCIv1 v1.Image
Docker docker.V2Image
DefaultMountsFilePath string
Isolation string
NamespaceOptions define.NamespaceOptions
Capabilities []string
ConfigureNetwork string
CNIPluginPath string
CNIConfigDir string
IDMappingOptions define.IDMappingOptions
History []v1.History
Devices define.ContainerDevices
}
// GetBuildInfo gets a pointer to a Builder object and returns a BuilderInfo object from it.
// This is used in the inspect command to display Manifest and Config as string and not []byte.
func GetBuildInfo(b *Builder) BuilderInfo {
history := copyHistory(b.OCIv1.History)
history = append(history, copyHistory(b.PrependedEmptyLayers)...)
history = append(history, copyHistory(b.AppendedEmptyLayers)...)
sort.Strings(b.Capabilities)
return BuilderInfo{
Type: b.Type,
FromImage: b.FromImage,
FromImageID: b.FromImageID,
FromImageDigest: b.FromImageDigest,
Config: string(b.Config),
Manifest: string(b.Manifest),
Container: b.Container,
ContainerID: b.ContainerID,
MountPoint: b.MountPoint,
ProcessLabel: b.ProcessLabel,
MountLabel: b.MountLabel,
ImageAnnotations: b.ImageAnnotations,
ImageCreatedBy: b.ImageCreatedBy,
OCIv1: b.OCIv1,
Docker: b.Docker,
DefaultMountsFilePath: b.DefaultMountsFilePath,
Isolation: b.Isolation.String(),
NamespaceOptions: b.NamespaceOptions,
ConfigureNetwork: fmt.Sprintf("%v", b.ConfigureNetwork),
CNIPluginPath: b.CNIPluginPath,
CNIConfigDir: b.CNIConfigDir,
IDMappingOptions: b.IDMappingOptions,
Capabilities: b.Capabilities,
History: history,
Devices: b.Devices,
}
}
// CommonBuildOptions are resources that can be defined by flags for both buildah from and build
type CommonBuildOptions = define.CommonBuildOptions
// BuilderOptions are used to initialize a new Builder.
type BuilderOptions struct {
// Args define variables that users can pass at build-time to the builder
Args map[string]string
// FromImage is the name of the image which should be used as the
// starting point for the container. It can be set to an empty value
// or "scratch" to indicate that the container should not be based on
// an image.
FromImage string
// Container is a desired name for the build container.
Container string
// PullPolicy decides whether or not we should pull the image that
// we're using as a base image. It should be PullIfMissing,
// PullAlways, or PullNever.
PullPolicy define.PullPolicy
// Registry is a value which is prepended to the image's name, if it
// needs to be pulled and the image name alone can not be resolved to a
// reference to a source image. No separator is implicitly added.
Registry string
// BlobDirectory is the name of a directory in which we'll attempt
// to store copies of layer blobs that we pull down, if any. It should
// already exist.
BlobDirectory string
// Mount signals to NewBuilder() that the container should be mounted
// immediately.
Mount bool
// SignaturePolicyPath specifies an override location for the signature
// policy which should be used for verifying the new image as it is
// being written. Except in specific circumstances, no value should be
// specified, indicating that the shared, system-wide default policy
// should be used.
SignaturePolicyPath string
// ReportWriter is an io.Writer which will be used to log the reading
// of the source image from a registry, if we end up pulling the image.
ReportWriter io.Writer
// github.com/containers/image/types SystemContext to hold credentials
// and other authentication/authorization information.
SystemContext *types.SystemContext
// DefaultMountsFilePath is the file path holding the mounts to be
// mounted in "host-path:container-path" format
DefaultMountsFilePath string
// Isolation controls how we handle "RUN" statements and the Run()
// method.
Isolation define.Isolation
// NamespaceOptions controls how we set up namespaces for processes that
// we might need to run using the container's root filesystem.
NamespaceOptions define.NamespaceOptions
// ConfigureNetwork controls whether or not network interfaces and
// routing are configured for a new network namespace (i.e., when not
// joining another's namespace and not just using the host's
// namespace), effectively deciding whether or not the process has a
// usable network.
ConfigureNetwork define.NetworkConfigurationPolicy
// CNIPluginPath is the location of CNI plugin helpers, if they should be
// run from a location other than the default location.
CNIPluginPath string
// CNIConfigDir is the location of CNI configuration files, if the files in
// the default configuration directory shouldn't be used.
CNIConfigDir string
// ID mapping options to use if we're setting up our own user namespace.
IDMappingOptions *define.IDMappingOptions
// Capabilities is a list of capabilities to use when
// running commands in the container.
Capabilities []string
CommonBuildOpts *define.CommonBuildOptions
// Format for the container image
Format string
// Devices are the additional devices to add to the containers
Devices define.ContainerDevices
//DefaultEnv for containers
DefaultEnv []string
// MaxPullRetries is the maximum number of attempts we'll make to pull
// any one image from the external registry if the first attempt fails.
MaxPullRetries int
// PullRetryDelay is how long to wait before retrying a pull attempt.
PullRetryDelay time.Duration
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
}
// ImportOptions are used to initialize a Builder from an existing container
// which was created elsewhere.
type ImportOptions struct {
// Container is the name of the build container.
Container string
// SignaturePolicyPath specifies an override location for the signature
// policy which should be used for verifying the new image as it is
// being written. Except in specific circumstances, no value should be | // should be used.
SignaturePolicyPath string
}
// ImportFromImageOptions are used to initialize a Builder from an image.
type ImportFromImageOptions struct {
// Image is the name or ID of the image we'd like to examine.
Image string
// SignaturePolicyPath specifies an override location for the signature
// policy which should be used for verifying the new image as it is
// being written. Except in specific circumstances, no value should be
// specified, indicating that the shared, system-wide default policy
// should be used.
SignaturePolicyPath string
// github.com/containers/image/types SystemContext to hold information
// about which registries we should check for completing image names
// that don't include a domain portion.
SystemContext *types.SystemContext
}
// NewBuilder creates a new build container.
func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) {
if options.CommonBuildOpts == nil {
options.CommonBuildOpts = &CommonBuildOptions{}
}
return newBuilder(ctx, store, options)
}
// ImportBuilder creates a new build configuration using an already-present
// container.
func ImportBuilder(ctx context.Context, store storage.Store, options ImportOptions) (*Builder, error) {
return importBuilder(ctx, store, options)
}
// ImportBuilderFromImage creates a new builder configuration using an image.
// The returned object can be modified and examined, but it can not be saved
// or committed because it is not associated with a working container.
func ImportBuilderFromImage(ctx context.Context, store storage.Store, options ImportFromImageOptions) (*Builder, error) {
return importBuilderFromImage(ctx, store, options)
}
// OpenBuilder loads information about a build container given its name or ID.
func OpenBuilder(store storage.Store, container string) (*Builder, error) {
cdir, err := store.ContainerDirectory(container)
if err != nil {
return nil, err
}
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
return nil, err
}
b := &Builder{}
if err = json.Unmarshal(buildstate, &b); err != nil {
return nil, errors.Wrapf(err, "error parsing %q, read from %q", string(buildstate), filepath.Join(cdir, stateFile))
}
if b.Type != containerType {
return nil, errors.Errorf("container %q is not a %s container (is a %q container)", container, define.Package, b.Type)
}
b.store = store
b.fixupConfig(nil)
b.setupLogger()
return b, nil
}
// OpenBuilderByPath loads information about a build container given a
// path to the container's root filesystem
func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) {
containers, err := store.Containers()
if err != nil {
return nil, err
}
abs, err := filepath.Abs(path)
if err != nil {
return nil, err
}
builderMatchesPath := func(b *Builder, path string) bool {
return (b.MountPoint == path)
}
for _, container := range containers {
cdir, err := store.ContainerDirectory(container.ID)
if err != nil {
return nil, err
}
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
if os.IsNotExist(err) {
logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
continue
}
return nil, err
}
b := &Builder{}
err = json.Unmarshal(buildstate, &b)
if err == nil && b.Type == containerType && builderMatchesPath(b, abs) {
b.store = store
b.fixupConfig(nil)
b.setupLogger()
return b, nil
}
if err != nil {
logrus.Debugf("error parsing %q, read from %q: %v", string(buildstate), filepath.Join(cdir, stateFile), err)
} else if b.Type != containerType {
logrus.Debugf("container %q is not a %s container (is a %q container)", container.ID, define.Package, b.Type)
}
}
return nil, storage.ErrContainerUnknown
}
// OpenAllBuilders loads all containers which have a state file that we use in
// their data directory, typically so that they can be listed.
func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) {
containers, err := store.Containers()
if err != nil {
return nil, err
}
for _, container := range containers {
cdir, err := store.ContainerDirectory(container.ID)
if err != nil {
return nil, err
}
buildstate, err := ioutil.ReadFile(filepath.Join(cdir, stateFile))
if err != nil {
if os.IsNotExist(err) {
logrus.Debugf("error reading %q: %v, ignoring container %q", filepath.Join(cdir, stateFile), err, container.ID)
continue
}
return nil, err
}
b := &Builder{}
err = json.Unmarshal(buildstate, &b)
if err == nil && b.Type == containerType {
b.store = store
b.setupLogger()
b.fixupConfig(nil)
builders = append(builders, b)
continue
}
if err != nil {
logrus.Debugf("error parsing %q, read from %q: %v", string(buildstate), filepath.Join(cdir, stateFile), err)
} else if b.Type != containerType {
logrus.Debugf("container %q is not a %s container (is a %q container)", container.ID, define.Package, b.Type)
}
}
return builders, nil
}
// Save saves the builder's current state to the build container's metadata.
// This should not need to be called directly, as other methods of the Builder
// object take care of saving their state.
func (b *Builder) Save() error {
buildstate, err := json.Marshal(b)
if err != nil {
return err
}
cdir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return err
}
if err = ioutils.AtomicWriteFile(filepath.Join(cdir, stateFile), buildstate, 0600); err != nil {
return errors.Wrapf(err, "error saving builder state to %q", filepath.Join(cdir, stateFile))
}
return nil
} | // specified, indicating that the shared, system-wide default policy |
lib.rs | /**
* Holo-REA proposed intents: maintains relationships between coordinated proposals and the individual intents that describe their planned enaction. zome entry type definitions
*
* For use in the standard Holo-REA proposed intents: maintains relationships between coordinated proposals and the individual intents that describe their planned enaction. zome,
* or in zomes wishing to embed additional attributes & logic alongside the
* standard `ProposedIntent` data model.
*
* @package Holo-REA
*/
use hdk::prelude::*;
use hc_zome_rea_proposed_intent_storage::Entry;
use hc_zome_rea_proposed_intent_storage_consts::*;
use hc_zome_rea_intent_storage_consts::INTENT_BASE_ENTRY_TYPE;
use hc_zome_rea_proposal_storage_consts::PROPOSAL_BASE_ENTRY_TYPE;
pub fn | () -> ValidatingEntryType {
entry!(
name: PROPOSED_INTENT_ENTRY_TYPE,
description: "Represents many-to-many relationships between Proposals and Intents, supporting including intents in multiple proposals, as well as a proposal including multiple intents.",
sharing: Sharing::Public,
validation_package: || {
hdk::ValidationPackageDefinition::Entry
},
validation: |_validation_data: hdk::EntryValidationData<Entry>| {
Ok(())
}
)
}
pub fn base_entry_def() -> ValidatingEntryType {
entry!(
name: PROPOSED_INTENT_BASE_ENTRY_TYPE,
description: "Base anchor for initial proposedintent addresses to provide lookup functionality",
sharing: Sharing::Public,
validation_package: || {
hdk::ValidationPackageDefinition::Entry
},
validation: |_validation_data: hdk::EntryValidationData<Address>| {
Ok(())
},
links: [
// :TODO: replace with final link definitions
to!(
PROPOSED_INTENT_ENTRY_TYPE,
link_type: PROPOSED_INTENT_INITIAL_ENTRY_LINK_TYPE,
validation_package: || {
hdk::ValidationPackageDefinition::Entry
},
validation: | _validation_data: hdk::LinkValidationData| {
Ok(())
}
),
to!(
INTENT_BASE_ENTRY_TYPE,
link_type: PROPOSED_INTENT_PUBLISHES_LINK_TYPE,
validation_package: || {
hdk::ValidationPackageDefinition::Entry
},
validation: | _validation_data: hdk::LinkValidationData| {
Ok(())
}
),
to!(
PROPOSAL_BASE_ENTRY_TYPE,
link_type: PROPOSED_INTENT_PUBLISHED_IN_LINK_TYPE,
validation_package: || {
hdk::ValidationPackageDefinition::Entry
},
validation: | _validation_data: hdk::LinkValidationData| {
Ok(())
}
)
]
)
}
| entry_def |
withdraw.ts | import { Component, NgZone, ViewChild } from '@angular/core';
import { CROperationsService, CRWebsiteCommand } from '../../../services/croperations.service';
import { PopupService } from '../../../services/popup.service';
import { TitleBarComponent } from 'src/app/components/titlebar/titlebar.component';
import { TranslateService } from '@ngx-translate/core';
import { GlobalIntentService } from 'src/app/services/global.intent.service';
import { Logger } from 'src/app/logger';
import { GlobalNavService } from 'src/app/services/global.nav.service';
import { VoteService } from 'src/app/vote/services/vote.service';
import { WalletManager } from 'src/app/wallet/services/wallet.service';
import { StandardCoinName } from 'src/app/wallet/model/Coin';
import { Util } from 'src/app/model/util';
import { ProposalService } from 'src/app/crproposalvoting/services/proposal.service';
import { GlobalThemeService } from 'src/app/services/global.theme.service';
import { ProposalDetails } from 'src/app/crproposalvoting/model/proposal-details';
import { App } from 'src/app/model/app.enum';
import { Config } from 'src/app/wallet/config/Config';
type WithdrawCommand = CRWebsiteCommand & {
data: {
amount: number,
ownerpublickey: string,
proposalhash: string,
recipient: string,
userdid: string,
},
}
@Component({
selector: 'page-withdraw',
templateUrl: 'withdraw.html',
styleUrls: ['./withdraw.scss']
})
export class | {
@ViewChild(TitleBarComponent, { static: false }) titleBar: TitleBarComponent;
private withdrawCommand: WithdrawCommand;
public signingAndSendingSuggestionResponse = false;
public proposalDetails: ProposalDetails;
public proposalDetailsFetched = false;
public Config = Config;
constructor(
private crOperations: CROperationsService,
private popup: PopupService,
public translate: TranslateService,
private globalIntentService: GlobalIntentService,
private walletManager: WalletManager,
private voteService: VoteService,
private proposalService: ProposalService,
public theme: GlobalThemeService,
private globalNav: GlobalNavService,
) {
}
async ionViewWillEnter() {
this.titleBar.setTitle(this.translate.instant('crproposalvoting.withdraw'));
this.withdrawCommand = this.crOperations.onGoingCommand as WithdrawCommand;
try {
// Fetch more details about this proposal, to display to the user
this.proposalDetails = await this.proposalService.fetchProposalDetails(this.withdrawCommand.data.proposalhash);
Logger.log('crproposal', "proposalDetails", this.proposalDetails);
this.proposalDetailsFetched = true;
}
catch (err) {
Logger.error('crproposal', 'WithdrawPage ionViewDidEnter error:', err);
}
}
cancel() {
this.globalNav.navigateBack();
}
async signAndWithdraw() {
this.signingAndSendingSuggestionResponse = true;
try {
//Get payload
var payload = this.getWithdrawPayload(this.withdrawCommand);
Logger.log('crproposal', "Got payload.", payload);
//Get digest
var digest = await this.walletManager.spvBridge.proposalWithdrawDigest(this.voteService.masterWalletId, StandardCoinName.ELA, JSON.stringify(payload));
digest = Util.reverseHexToBE(digest);
Logger.log('crproposal', "Got proposal digest.", digest);
//Get did sign digest
let ret = await this.globalIntentService.sendIntent("https://did.elastos.net/signdigest", {
data: digest,
});
Logger.log('crproposal', "Got signed digest.", ret);
if (ret.result && ret.result.signature) {
//Create transaction and send
payload.Signature = ret.result.signature;
const rawTx = await this.voteService.sourceSubwallet.createProposalWithdrawTransaction(JSON.stringify(payload), '');
await this.voteService.signAndSendRawTransaction(rawTx, App.CRPROPOSAL_VOTING);
}
}
catch (e) {
// Something wrong happened while signing the JWT. Just tell the end user that we can't complete the operation for now.
await this.popup.alert("Error", "Sorry, unable to withdraw. Your crproposal can't be withdraw for now. " + e, "Ok");
}
this.signingAndSendingSuggestionResponse = false;
// this.exitIntentWithSuccess();
}
private getWithdrawPayload(command: WithdrawCommand): any {
let payload = {
ProposalHash: command.data.proposalhash,
OwnerPublicKey: command.data.ownerpublickey,
Recipient: command.data.recipient,
Amount: command.data.amount,
};
return payload;
}
} | WithdrawPage |
signIn.js | import React from 'react';
import classNames from 'classnames';
import { Mutation } from 'react-apollo';
import { SIGNIN_USER } from './../../queries';
import { withRouter } from 'react-router-dom';
import * as Cookies from 'es-cookie';
import { Helmet } from 'react-helmet';
import {NavLink} from 'react-router-dom';
const initialState = {
email: '',
password: '',
error: ''
}
class Signin extends React.Component {
constructor(props){
super();
this.state = {
...initialState
}
}
clearState() {
this.setState({...initialState})
}
handleChange(event) {
const name = event.target.name;
const value = event.target.value;
this.setState({
[name]: value
});
}
handleSubmit(event, signinUser) {
event.preventDefault();
signinUser().then(async ({data}) => {
Cookies.set('token', data.signinUser.token);
await this.props.refetch();
this.clearState();
this.props.history.push('/dashboard');
}).catch(error => {
this.setState({
error: error.graphQLErrors.map(x => x.message)
})
console.error("ERR =>", error.graphQLErrors.map(x => x.message));
});
}
validateForm() {
const { email, password } = this.state
this.state;
const isInvalid = !email || !password;
return isInvalid;
}
head() {
return (
<Helmet bodyAttributes={{class: "logInPage"}}>
<title>LogIn - React Starter Kit</title>
</Helmet>
);
}
render(){
const { email, password } = this.state
this.state;
return (
<div className="column column_12_12">
{this.head()}
<div className="signUp authForm">
<h1 className="dark_headline">
LogIn
</h1>
<Mutation mutation={SIGNIN_USER} variables={{ email, password }}>
{(signinUser, { data, loading, error }) => {
return (
<form className="form" onSubmit={event => this.handleSubmit(event, signinUser)}>
<div className="form_wrap">
<div className={classNames({'error-label' : this.state.error != ''})}>
{this.state.error}
</div>
<div className="form_row">
<div className="form_item">
<div className="form_input">
<input type="text" name="email" placeholder="Email" value={email} onChange={this.handleChange.bind(this)} />
<span className="bottom_border"></span>
</div>
</div>
</div>
<div className="form_row">
<div className="form_item">
<div className="form_input">
<input type="password" name="password" placeholder="Password" value={password} onChange={this.handleChange.bind(this)} />
<span className="bottom_border"></span>
</div>
</div>
</div>
<div className="formBottomLinks">
<p>
Don't have an account? <NavLink to="/signup">Join now!</NavLink>
</p>
<p>
Forgot your password? <NavLink to="/account-recovery">Reset here</NavLink>
</p>
</div>
<div className="form_buttons">
<button type="submit" className="btn"
disabled={ loading || this.validateForm() }>
LogIn</button>
</div>
</div>
</form> | }}
</Mutation>
</div>
</div>
)
}
}
export default withRouter(Signin); |
); |
Dropdown.stories.js | import { useDialogState } from '@wp-g2/a11y';
import React from 'react';
import {
Button,
Modal,
ModalBody,
ModalFooter,
ModalHeader,
} from '../../index';
import {
Dropdown,
DropdownMenu,
DropdownMenuItem,
DropdownTrigger,
} from '../index';
export default {
component: Dropdown,
title: 'Components/Dropdown',
};
export const WithModal = () => {
const dialog = useDialogState({ animated: true });
return (
<>
<Dropdown>
<DropdownTrigger>Dropdown</DropdownTrigger>
<DropdownMenu>
<DropdownMenuItem onClick={dialog.show}>
One
</DropdownMenuItem>
<DropdownMenuItem onClick={dialog.show}>
Two
</DropdownMenuItem>
</DropdownMenu>
</Dropdown>
<Modal dialog={dialog}>
<ModalHeader />
<ModalBody>Hello</ModalBody> | <Button onClick={dialog.hide}>Cancel</Button>
</ModalFooter>
</Modal>
</>
);
};
export const _default = () => {
return (
<Dropdown visible>
<DropdownTrigger>Dropdown</DropdownTrigger>
<DropdownMenu>
<DropdownMenuItem>One</DropdownMenuItem>
<DropdownMenuItem>Two</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
<DropdownMenuItem>Three</DropdownMenuItem>
</DropdownMenu>
</Dropdown>
);
}; | <ModalFooter>
<Button onClick={dialog.hide} variant="primary">
Save
</Button> |
authentication.py | import datetime
import logging
from rest_framework import status
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import APIException
import seaserv
from seahub.base.accounts import User
from seahub.constants import GUEST_USER
from seahub.api2.models import Token, TokenV2
from seahub.api2.utils import get_client_ip
from seahub.utils import within_time_range
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
logger = logging.getLogger(__name__)
HEADER_CLIENT_VERSION = 'HTTP_X_SEAFILE_CLIENT_VERSION'
HEADER_PLATFORM_VERSION = 'HTTP_X_SEAFILE_PLATFORM_VERSION'
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = 'Incorrect authentication credentials.'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if not auth or auth[0].lower() != 'token':
return None
if len(auth) == 1:
msg = 'Invalid token header. No credentials provided.'
raise AuthenticationFailed(msg)
elif len(auth) > 2:
msg = 'Invalid token header. Token string should not contain spaces.'
raise AuthenticationFailed(msg)
key = auth[1]
ret = self.authenticate_v2(request, key)
if ret:
return ret
return self.authenticate_v1(request, key)
def _populate_user_permissions(self, user):
"""Disable some operations if ``user`` is a guest.
"""
if user.role == GUEST_USER:
user.permissions.can_add_repo = lambda: False
user.permissions.can_add_group = lambda: False
user.permissions.can_view_org = lambda: False
user.permissions.can_use_global_address_book = lambda: False
user.permissions.can_generate_shared_link = lambda: False
def authenticate_v1(self, request, key):
try:
token = Token.objects.get(key=key)
except Token.DoesNotExist:
raise AuthenticationFailed('Invalid token')
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user) | user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
return (user, token)
def authenticate_v2(self, request, key):
try:
token = TokenV2.objects.get(key=key)
except TokenV2.DoesNotExist:
return None # Continue authentication in token v1
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user)
if orgs:
user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
need_save = False
# We update the device's last_login_ip, client_version, platform_version if changed
ip = get_client_ip(request)
if ip and ip != token.last_login_ip:
token.last_login_ip = ip
need_save = True
client_version = request.META.get(HEADER_CLIENT_VERSION, '')
if client_version and client_version != token.client_version:
token.client_version = client_version
need_save = True
platform_version = request.META.get(HEADER_PLATFORM_VERSION, '')
if platform_version and platform_version != token.platform_version:
token.platform_version = platform_version
need_save = True
if not within_time_range(token.last_accessed, datetime.datetime.now(), 10 * 60):
# We only need 10min precision for the last_accessed field
need_save = True
if need_save:
try:
token.save()
except:
logger.exception('error when save token v2:')
return (user, token) | if orgs: |
neural_net.py | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
|
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
out1 = np.maximum(0, X.dot(W1) + b1) # relu, (N, H)
scores = out1.dot(W2) + b2 # (N, C)
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
correct_class_score = scores[np.arange(N), y].reshape(N, 1)
exp_sum = np.sum(np.exp(scores), axis=1).reshape(N, 1)
loss = np.sum(np.log(exp_sum) - correct_class_score)
loss /= N
loss += 0.5 * reg * np.sum(W1 * W1)+ 0.5 * reg * np.sum(W2 * W2)
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
margin = np.exp(scores) / exp_sum
margin[np.arange(N), y] += -1
margin /= N #(N, C)
dW2 = out1.T.dot(margin) #(H ,C)
dW2 += reg * W2
grads['W2'] = dW2
grads['b2'] = np.sum(margin, axis = 0)
margin1 = margin.dot(W2.T) #(N, H)
margin1[out1 <= 0] = 0
dW1 = X.T.dot(margin1) #(D, H)
dW1 += reg * W1
grads['W1'] = dW1
grads['b1'] = np.sum(margin1, axis = 0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
mask = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[mask]
y_batch = y[mask]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
self.params['W1'] -= learning_rate * grads['W1']
self.params['W2'] -= learning_rate * grads['W2']
self.params['b1'] -= learning_rate * grads['b1']
self.params['b2'] -= learning_rate * grads['b2']
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
out1 = np.maximum(0, X.dot(self.params['W1']) + self.params['b1']) # relu, (N, H)
y_pred = np.argmax(out1.dot(self.params['W2']) + self.params['b2'],axis = 1) # (N, C)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
| """
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size) |
genesis_test.go | package stake
import (
"fmt"
"testing"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
sdk "github.com/cosmos/cosmos-sdk/types"
keep "github.com/cosmos/cosmos-sdk/x/stake/keeper"
"github.com/cosmos/cosmos-sdk/x/stake/types"
)
func TestInitGenesis(t *testing.T) {
ctx, _, keeper := keep.CreateTestInput(t, false, 1000)
pool := keeper.GetPool(ctx)
pool.BondedTokens = sdk.NewDec(2)
params := keeper.GetParams(ctx)
validators := make([]Validator, 2)
var delegations []Delegation
// initialize the validators
validators[0].OperatorAddr = sdk.ValAddress(keep.Addrs[0])
validators[0].ConsPubKey = keep.PKs[0]
validators[0].Description = Description{Moniker: "hoop"}
validators[0].Status = sdk.Bonded
validators[0].Tokens = sdk.OneDec()
validators[0].DelegatorShares = sdk.OneDec()
validators[1].OperatorAddr = sdk.ValAddress(keep.Addrs[1])
validators[1].ConsPubKey = keep.PKs[1]
validators[1].Description = Description{Moniker: "bloop"}
validators[1].Status = sdk.Bonded
validators[1].Tokens = sdk.OneDec()
validators[1].DelegatorShares = sdk.OneDec()
genesisState := types.NewGenesisState(pool, params, validators, delegations)
vals, err := InitGenesis(ctx, keeper, genesisState)
require.NoError(t, err)
actualGenesis := ExportGenesis(ctx, keeper)
require.Equal(t, genesisState.Pool, actualGenesis.Pool)
require.Equal(t, genesisState.Params, actualGenesis.Params)
require.Equal(t, genesisState.Bonds, actualGenesis.Bonds)
require.EqualValues(t, keeper.GetAllValidators(ctx), actualGenesis.Validators)
// now make sure the validators are bonded and intra-tx counters are correct
resVal, found := keeper.GetValidator(ctx, sdk.ValAddress(keep.Addrs[0]))
require.True(t, found)
require.Equal(t, sdk.Bonded, resVal.Status)
resVal, found = keeper.GetValidator(ctx, sdk.ValAddress(keep.Addrs[1]))
require.True(t, found)
require.Equal(t, sdk.Bonded, resVal.Status)
abcivals := make([]abci.ValidatorUpdate, len(vals))
for i, val := range validators {
abcivals[i] = val.ABCIValidatorUpdate()
}
require.Equal(t, abcivals, vals)
}
func TestInitGenesisLargeValidatorSet(t *testing.T) {
size := 200
require.True(t, size > 100)
ctx, _, keeper := keep.CreateTestInput(t, false, 1000)
// Assigning 2 to the first 100 vals, 1 to the rest
pool := keeper.GetPool(ctx)
pool.BondedTokens = sdk.NewDec(int64(200 + (size - 100)))
params := keeper.GetParams(ctx)
delegations := []Delegation{}
validators := make([]Validator, size)
for i := range validators {
validators[i] = NewValidator(sdk.ValAddress(keep.Addrs[i]), keep.PKs[i], Description{Moniker: fmt.Sprintf("#%d", i)})
validators[i].Status = sdk.Bonded
if i < 100 {
validators[i].Tokens = sdk.NewDec(2)
validators[i].DelegatorShares = sdk.NewDec(2)
} else {
validators[i].Tokens = sdk.OneDec()
validators[i].DelegatorShares = sdk.OneDec()
}
}
genesisState := types.NewGenesisState(pool, params, validators, delegations)
vals, err := InitGenesis(ctx, keeper, genesisState)
require.NoError(t, err)
abcivals := make([]abci.ValidatorUpdate, 100)
for i, val := range validators[:100] {
abcivals[i] = val.ABCIValidatorUpdate()
}
require.Equal(t, abcivals, vals)
}
func TestValidateGenesis(t *testing.T) {
genValidators1 := make([]types.Validator, 1, 5)
pk := ed25519.GenPrivKey().PubKey()
genValidators1[0] = types.NewValidator(sdk.ValAddress(pk.Address()), pk, types.NewDescription("", "", "", ""))
genValidators1[0].Tokens = sdk.OneDec()
genValidators1[0].DelegatorShares = sdk.OneDec()
tests := []struct {
name string
mutate func(*types.GenesisState)
wantErr bool
}{
{"default", func(*types.GenesisState) {}, false},
// validate genesis validators
{"duplicate validator", func(data *types.GenesisState) {
(*data).Validators = genValidators1
(*data).Validators = append((*data).Validators, genValidators1[0])
}, true},
{"no delegator shares", func(data *types.GenesisState) {
(*data).Validators = genValidators1
(*data).Validators[0].DelegatorShares = sdk.ZeroDec()
}, true},
{"jailed and bonded validator", func(data *types.GenesisState) {
(*data).Validators = genValidators1
(*data).Validators[0].Jailed = true
(*data).Validators[0].Status = sdk.Bonded
}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
genesisState := types.DefaultGenesisState()
tt.mutate(&genesisState)
if tt.wantErr { | assert.Error(t, ValidateGenesis(genesisState))
} else {
assert.NoError(t, ValidateGenesis(genesisState))
}
})
}
} | |
pipe.rs | // Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Implementation of the chain block acceptance (or refusal) pipeline.
use crate::core::consensus;
use crate::core::core::hash::Hashed;
use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::Committed;
use crate::core::core::{Block, BlockHeader, BlockSums, OutputIdentifier};
use crate::core::pow;
use crate::error::{Error, ErrorKind};
use crate::store;
use crate::txhashset;
use crate::types::{CommitPos, Options, Tip};
use crate::util::RwLock;
use std::sync::Arc;
/// Contextual information required to process a new block and either reject or
/// accept it.
pub struct BlockContext<'a> {
/// The options
pub opts: Options,
/// The pow verifier to use when processing a block.
pub pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
/// The active txhashset (rewindable MMRs) to use for block processing.
pub txhashset: &'a mut txhashset::TxHashSet,
/// The active header MMR handle.
pub header_pmmr: &'a mut txhashset::PMMRHandle<BlockHeader>,
/// The active batch to use for block processing.
pub batch: store::Batch<'a>,
/// The verifier cache (caching verifier for rangeproofs and kernel signatures)
pub verifier_cache: Arc<RwLock<dyn VerifierCache>>,
}
// If this block has greater total difficulty than treat as unknown in current context.
// If it matches current chain head (latest or previous hash) then we know about it.
// If it exists in the local db then we know about it.
fn check_known(header: &BlockHeader, head: &Tip, ctx: &BlockContext<'_>) -> Result<(), Error> {
if header.total_difficulty() <= head.total_difficulty {
check_known_head(header, head)?;
check_known_store(header, head, ctx)?;
}
Ok(())
}
// Validate only the proof of work in a block header.
// Used to cheaply validate pow before checking if orphan or continuing block validation.
fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
if ctx.opts.contains(Options::SKIP_POW) {
// Some of our tests require this check to be skipped (we should revisit this).
return Ok(());
}
if !header.pow.is_primary() && !header.pow.is_secondary() {
return Err(ErrorKind::LowEdgebits.into());
}
if (ctx.pow_verifier)(header).is_err() {
error!(
"pipe: error validating header with cuckoo edge_bits {}",
header.pow.edge_bits(),
);
return Err(ErrorKind::InvalidPow.into());
}
Ok(())
}
/// Runs the block processing pipeline, including validation and finding a
/// place for the new block in the chain.
/// Returns new head if chain head updated and the "fork point" rewound to when processing the new block.
pub fn process_block(
b: &Block,
ctx: &mut BlockContext<'_>,
) -> Result<(Option<Tip>, BlockHeader), Error> {
debug!(
"pipe: process_block {} at {} [in/out/kern: {}/{}/{}]",
b.hash(),
b.header.height,
b.inputs().len(),
b.outputs().len(),
b.kernels().len(),
);
// Read current chain head from db via the batch.
// We use this for various operations later.
let head = ctx.batch.head()?;
// Check if we have already processed this block previously.
check_known(&b.header, &head, ctx)?;
// Quick pow validation. No point proceeding if this is invalid.
// We want to do this before we add the block to the orphan pool so we
// want to do this now and not later during header validation.
validate_pow_only(&b.header, ctx)?;
// Get previous header from the db.
let prev = prev_header_store(&b.header, &mut ctx.batch)?;
// Process the header for the block.
// Note: We still want to process the full block if we have seen this header before
// as we may have processed it "header first" and not yet processed the full block.
process_block_header(&b.header, ctx)?;
// Validate the block itself, make sure it is internally consistent.
// Use the verifier_cache for verifying rangeproofs and kernel signatures.
validate_block(b, ctx)?;
// Start a chain extension unit of work dependent on the success of the
// internal validation and saving operations
let header_pmmr = &mut ctx.header_pmmr;
let txhashset = &mut ctx.txhashset;
let batch = &mut ctx.batch;
let fork_point = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| {
let fork_point = rewind_and_apply_fork(&prev, ext, batch)?;
// Check any coinbase being spent have matured sufficiently.
// This needs to be done within the context of a potentially
// rewound txhashset extension to reflect chain state prior
// to applying the new block.
verify_coinbase_maturity(b, ext, batch)?;
// Validate the block against the UTXO set.
validate_utxo(b, ext, batch)?;
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
// accounting for inputs/outputs/kernels in this new block.
// We know there are no double-spends etc. if this verifies successfully.
verify_block_sums(b, batch)?;
// Apply the block to the txhashset state.
// Validate the txhashset roots and sizes against the block header.
// Block is invalid if there are any discrepencies.
apply_block_to_txhashset(b, ext, batch)?;
// If applying this block does not increase the work on the chain then
// we know we have not yet updated the chain to produce a new chain head.
// We discard the "child" batch used in this extension (original ctx batch still active).
// We discard any MMR modifications applied in this extension.
let head = batch.head()?;
if !has_more_work(&b.header, &head) {
ext.extension.force_rollback();
}
Ok(fork_point)
})?;
// Add the validated block to the db.
// Note we do this in the outer batch, not the child batch from the extension
// as we only commit the child batch if the extension increases total work.
// We want to save the block to the db regardless.
add_block(b, &ctx.batch)?;
// If we have no "tail" then set it now.
if ctx.batch.tail().is_err() {
update_body_tail(&b.header, &ctx.batch)?;
}
if has_more_work(&b.header, &head) {
let head = Tip::from_header(&b.header);
update_head(&head, &mut ctx.batch)?;
Ok((Some(head), fork_point))
} else {
Ok((None, fork_point))
}
}
/// Sync a chunk of block headers.
/// This is only used during header sync.
pub fn sync_block_headers(
headers: &[BlockHeader],
ctx: &mut BlockContext<'_>,
) -> Result<(), Error> {
if headers.is_empty() {
return Ok(());
}
let last_header = headers.last().expect("last header");
// Check if we know about all these headers. If so we can accept them quickly.
// If they *do not* increase total work on the sync chain we are done.
// If they *do* increase total work then we should process them to update sync_head.
let sync_head = {
let hash = ctx.header_pmmr.head_hash()?;
let header = ctx.batch.get_block_header(&hash)?;
Tip::from_header(&header)
};
if let Ok(existing) = ctx.batch.get_block_header(&last_header.hash()) {
if !has_more_work(&existing, &sync_head) {
return Ok(());
}
}
// Validate each header in the chunk and add to our db.
// Note: This batch may be rolled back later if the MMR does not validate successfully.
for header in headers {
validate_header(header, ctx)?;
add_block_header(header, &ctx.batch)?;
}
// Now apply this entire chunk of headers to the sync MMR (ctx is sync MMR specific).
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
rewind_and_apply_header_fork(&last_header, ext, batch)?;
Ok(())
})
}
/// Process a block header. Update the header MMR and corresponding header_head if this header
/// increases the total work relative to header_head.
/// Note: In contrast to processing a full block we treat "already known" as success
/// to allow processing to continue (for header itself).
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
// If we have already processed the full block for this header then done.
// Note: "already known" in this context is success so subsequent processing can continue.
{
let head = ctx.batch.head()?;
if check_known(header, &head, ctx).is_err() {
return Ok(());
}
}
// Check this header is not an orphan, we must know about the previous header to continue.
let prev_header = ctx.batch.get_previous_header(&header)?;
// If we have not yet seen the full block then check if we have seen this header.
// If it does not increase total_difficulty beyond our current header_head
// then we can (re)accept this header and process the full block (or request it).
// This header is on a fork and we should still accept it as the fork may eventually win.
let header_head = ctx.batch.header_head()?;
if let Ok(existing) = ctx.batch.get_block_header(&header.hash()) {
if !has_more_work(&existing, &header_head) {
return Ok(());
}
}
// We want to validate this individual header before applying it to our header PMMR.
validate_header(header, ctx)?;
// Apply the header to the header PMMR, making sure we put the extension in the correct state
// based on previous header first.
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
rewind_and_apply_header_fork(&prev_header, ext, batch)?;
ext.validate_root(header)?;
ext.apply_header(header)?;
if !has_more_work(&header, &header_head) {
ext.force_rollback();
}
Ok(())
})?;
// Add this new block header to the db.
add_block_header(header, &ctx.batch)?;
if has_more_work(header, &header_head) {
update_header_head(&Tip::from_header(header), &mut ctx.batch)?;
}
Ok(())
}
/// Quick check to reject recently handled blocks.
/// Checks against last_block_h and prev_block_h of the chain head.
fn check_known_head(header: &BlockHeader, head: &Tip) -> Result<(), Error> {
let bh = header.hash();
if bh == head.last_block_h || bh == head.prev_block_h {
return Err(ErrorKind::Unfit("already known in head".to_string()).into());
}
Ok(())
}
// Check if this block is in the store already.
fn | (
header: &BlockHeader,
head: &Tip,
ctx: &BlockContext<'_>,
) -> Result<(), Error> {
match ctx.batch.block_exists(&header.hash()) {
Ok(true) => {
if header.height < head.height.saturating_sub(50) {
// TODO - we flag this as an "abusive peer" but only in the case
// where we have the full block in our store.
// So this is not a particularly exhaustive check.
Err(ErrorKind::OldBlock.into())
} else {
Err(ErrorKind::Unfit("already known in store".to_string()).into())
}
}
Ok(false) => {
// Not yet processed this block, we can proceed.
Ok(())
}
Err(e) => Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into()),
}
}
// Find the previous header from the store.
// Return an Orphan error if we cannot find the previous header.
fn prev_header_store(
header: &BlockHeader,
batch: &mut store::Batch<'_>,
) -> Result<BlockHeader, Error> {
let prev = batch.get_previous_header(&header)?;
Ok(prev)
}
/// First level of block validation that only needs to act on the block header
/// to make it as cheap as possible. The different validations are also
/// arranged by order of cost to have as little DoS surface as possible.
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
// First I/O cost, delayed as late as possible.
let prev = prev_header_store(header, &mut ctx.batch)?;
// This header height must increase the height from the previous header by exactly 1.
if header.height != prev.height + 1 {
return Err(ErrorKind::InvalidBlockHeight.into());
}
// This header must have a valid header version for its height.
if !consensus::valid_header_version(header.height, header.version) {
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
}
if header.timestamp <= prev.timestamp {
// prevent time warp attacks and some timestamp manipulations by forcing strict
// time progression
return Err(ErrorKind::InvalidBlockTime.into());
}
// verify the proof of work and related parameters
// at this point we have a previous block header
// we know the height increased by one
// so now we can check the total_difficulty increase is also valid
// check the pow hash shows a difficulty at least as large
// as the target difficulty
if !ctx.opts.contains(Options::SKIP_POW) {
// Quick check of this header in isolation. No point proceeding if this fails.
// We can do this without needing to iterate over previous headers.
validate_pow_only(header, ctx)?;
if header.total_difficulty() <= prev.total_difficulty() {
return Err(ErrorKind::DifficultyTooLow.into());
}
let target_difficulty = header.total_difficulty() - prev.total_difficulty();
if header.pow.to_difficulty(header.height) < target_difficulty {
return Err(ErrorKind::DifficultyTooLow.into());
}
// explicit check to ensure total_difficulty has increased by exactly
// the _network_ difficulty of the previous block
// (during testnet1 we use _block_ difficulty here)
let child_batch = ctx.batch.child()?;
let diff_iter = store::DifficultyIter::from_batch(prev.hash(), child_batch);
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
if target_difficulty != next_header_info.difficulty {
info!(
"validate_header: header target difficulty {} != {}",
target_difficulty.to_num(),
next_header_info.difficulty.to_num()
);
return Err(ErrorKind::WrongTotalDifficulty.into());
}
// check the secondary PoW scaling factor if applicable
if header.pow.secondary_scaling != next_header_info.secondary_scaling {
info!(
"validate_header: header secondary scaling {} != {}",
header.pow.secondary_scaling, next_header_info.secondary_scaling
);
return Err(ErrorKind::InvalidScaling.into());
}
}
Ok(())
}
fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
let prev = ctx.batch.get_previous_header(&block.header)?;
block
.validate(&prev.total_kernel_offset, ctx.verifier_cache.clone())
.map_err(ErrorKind::InvalidBlockProof)?;
Ok(())
}
/// Verify the block is not spending coinbase outputs before they have sufficiently matured.
fn verify_coinbase_maturity(
block: &Block,
ext: &txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
let extension = &ext.extension;
let header_extension = &ext.header_extension;
extension
.utxo_view(header_extension)
.verify_coinbase_maturity(&block.inputs(), block.header.height, batch)
}
/// Verify kernel sums across the full utxo and kernel sets based on block_sums
/// of previous block accounting for the inputs|outputs|kernels of the new block.
/// Saves the new block_sums to the db via the current batch if successful.
fn verify_block_sums(b: &Block, batch: &store::Batch<'_>) -> Result<(), Error> {
// Retrieve the block_sums for the previous block.
let block_sums = batch.get_block_sums(&b.header.prev_hash)?;
// Overage is based purely on the new block.
// Previous block_sums have taken all previous overage into account.
let overage = b.header.overage();
// Offset on the other hand is the total kernel offset from the new block.
let offset = b.header.total_kernel_offset();
// Verify the kernel sums for the block_sums with the new block applied.
let (utxo_sum, kernel_sum) =
(block_sums, b as &dyn Committed).verify_kernel_sums(overage, offset)?;
batch.save_block_sums(
&b.hash(),
BlockSums {
utxo_sum,
kernel_sum,
},
)?;
Ok(())
}
/// Fully validate the block by applying it to the txhashset extension.
/// Check both the txhashset roots and sizes are correct after applying the block.
fn apply_block_to_txhashset(
block: &Block,
ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
ext.extension
.apply_block(block, ext.header_extension, batch)?;
ext.extension.validate_roots(&block.header)?;
ext.extension.validate_sizes(&block.header)?;
Ok(())
}
/// Officially adds the block to our chain (possibly on a losing fork).
/// Header must be added separately (assume this has been done previously).
fn add_block(b: &Block, batch: &store::Batch<'_>) -> Result<(), Error> {
batch.save_block(b)?;
Ok(())
}
/// Update the block chain tail so we can know the exact tail of full blocks in this node
fn update_body_tail(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Error> {
let tip = Tip::from_header(bh);
batch
.save_body_tail(&tip)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body tail".to_owned()))?;
debug!("body tail {} @ {}", bh.hash(), bh.height);
Ok(())
}
/// Officially adds the block header to our header chain.
fn add_block_header(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Error> {
batch
.save_block_header(bh)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header".to_owned()))?;
Ok(())
}
fn update_header_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
batch
.save_header_head(&head)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
debug!(
"header head updated to {} at {}",
head.last_block_h, head.height
);
Ok(())
}
fn update_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
batch
.save_body_head(&head)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
debug!("head updated to {} at {}", head.last_block_h, head.height);
Ok(())
}
// Whether the provided block totals more work than the chain tip
fn has_more_work(header: &BlockHeader, head: &Tip) -> bool {
header.total_difficulty() > head.total_difficulty
}
/// Rewind the header chain and reapply headers on a fork.
pub fn rewind_and_apply_header_fork(
header: &BlockHeader,
ext: &mut txhashset::HeaderExtension<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
let mut fork_hashes = vec![];
let mut current = header.clone();
while current.height > 0 && ext.is_on_current_chain(¤t, batch).is_err() {
fork_hashes.push(current.hash());
current = batch.get_previous_header(¤t)?;
}
fork_hashes.reverse();
let forked_header = current;
// Rewind the txhashset state back to the block where we forked from the most work chain.
ext.rewind(&forked_header)?;
// Re-apply all headers on this fork.
for h in fork_hashes {
let header = batch
.get_block_header(&h)
.map_err(|e| ErrorKind::StoreErr(e, "getting forked headers".to_string()))?;
ext.validate_root(&header)?;
ext.apply_header(&header)?;
}
Ok(())
}
/// Utility function to handle forks. From the forked block, jump backward
/// to find to fork point. Rewind the txhashset to the fork point and apply all
/// necessary blocks prior to the one being processed to set the txhashset in
/// the expected state.
/// Returns the "fork point" that we rewound to.
pub fn rewind_and_apply_fork(
header: &BlockHeader,
ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<BlockHeader, Error> {
let extension = &mut ext.extension;
let header_extension = &mut ext.header_extension;
// Prepare the header MMR.
rewind_and_apply_header_fork(header, header_extension, batch)?;
// Rewind the txhashset extension back to common ancestor based on header MMR.
let mut current = batch.head_header()?;
while current.height > 0
&& header_extension
.is_on_current_chain(¤t, batch)
.is_err()
{
current = batch.get_previous_header(¤t)?;
}
let fork_point = current;
extension.rewind(&fork_point, batch)?;
// Then apply all full blocks since this common ancestor
// to put txhashet extension in a state to accept the new block.
let mut fork_hashes = vec![];
let mut current = header.clone();
while current.height > fork_point.height {
fork_hashes.push(current.hash());
current = batch.get_previous_header(¤t)?;
}
fork_hashes.reverse();
for h in fork_hashes {
let fb = batch
.get_block(&h)
.map_err(|e| ErrorKind::StoreErr(e, "getting forked blocks".to_string()))?;
// Re-verify coinbase maturity along this fork.
verify_coinbase_maturity(&fb, ext, batch)?;
// Validate the block against the UTXO set.
validate_utxo(&fb, ext, batch)?;
// Re-verify block_sums to set the block_sums up on this fork correctly.
verify_block_sums(&fb, batch)?;
// Re-apply the blocks.
apply_block_to_txhashset(&fb, ext, batch)?;
}
Ok(fork_point)
}
/// Validate block inputs against utxo.
fn validate_utxo(
block: &Block,
ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<Vec<(OutputIdentifier, CommitPos)>, Error> {
let extension = &ext.extension;
let header_extension = &ext.header_extension;
extension
.utxo_view(header_extension)
.validate_block(block, batch)
}
| check_known_store |
stockadjuster.js | /******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId])
/******/ return installedModules[moduleId].exports;
/******/
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/ | /******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // identity function for calling harmory imports with the correct context
/******/ __webpack_require__.i = function(value) { return value; };
/******/
/******/ // define getter function for harmory exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ Object.defineProperty(exports, name, {
/******/ configurable: false,
/******/ enumerable: true,
/******/ get: getter
/******/ });
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = 7);
/******/ })
/************************************************************************/
/******/ ([
/* 0 */
/***/ function(module, exports, __webpack_require__) {
eval("var __vue_exports__, __vue_options__\nvar __vue_styles__ = {}\n\n/* styles */\n__webpack_require__(6)\n\n/* script */\n__vue_exports__ = __webpack_require__(1)\n\n/* template */\nvar __vue_template__ = __webpack_require__(4)\n__vue_options__ = __vue_exports__ = __vue_exports__ || {}\nif (\n typeof __vue_exports__.default === \"object\" ||\n typeof __vue_exports__.default === \"function\"\n) {\nif (Object.keys(__vue_exports__).some(function (key) { return key !== \"default\" && key !== \"__esModule\" })) {console.error(\"named exports are not supported in *.vue files.\")}\n__vue_options__ = __vue_exports__ = __vue_exports__.default\n}\nif (typeof __vue_options__ === \"function\") {\n __vue_options__ = __vue_options__.options\n}\n__vue_options__.__file = \"C:\\\\laragon\\\\www\\\\k9homes.com.au\\\\fido\\\\resources\\\\assets\\\\js\\\\components\\\\Stockadjuster.vue\"\n__vue_options__.render = __vue_template__.render\n__vue_options__.staticRenderFns = __vue_template__.staticRenderFns\n\n/* hot reload */\nif (false) {(function () {\n var hotAPI = require(\"vue-hot-reload-api\")\n hotAPI.install(require(\"vue\"), false)\n if (!hotAPI.compatible) return\n module.hot.accept()\n if (!module.hot.data) {\n hotAPI.createRecord(\"data-v-eaeb90c4\", __vue_options__)\n } else {\n hotAPI.reload(\"data-v-eaeb90c4\", __vue_options__)\n }\n})()}\nif (__vue_options__.functional) {console.error(\"[vue-loader] Stockadjuster.vue: functional components are not supported and should be defined in plain js files using render functions.\")}\n\nmodule.exports = __vue_exports__\n//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiMC5qcyIsInNvdXJjZXMiOlsid2VicGFjazovLy8uL3Jlc291cmNlcy9hc3NldHMvanMvY29tcG9uZW50cy9TdG9ja2FkanVzdGVyLnZ1ZT8wOGU5Il0sInNvdXJjZXNDb250ZW50IjpbInZhciBfX3Z1ZV9leHBvcnRzX18sIF9fdnVlX29wdGlvbnNfX1xudmFyIF9fdnVlX3N0eWxlc19fID0ge31cblxuLyogc3R5bGVzICovXG5yZXF1aXJlKFwiISF2dWUtc3R5bGUtbG9hZGVyIWNzcy1sb2FkZXI/c291cmNlTWFwIXZ1ZS1sb2FkZXIvbGliL3N0eWxlLXJld3JpdGVyP2lkPWRhdGEtdi1lYWViOTBjNCF2dWUtbG9hZGVyL2xpYi9zZWxlY3Rvcj90eXBlPXN0eWxlcyZpbmRleD0wIS4vU3RvY2thZGp1c3Rlci52dWVcIilcblxuLyogc2NyaXB0ICovXG5fX3Z1ZV9leHBvcnRzX18gPSByZXF1aXJlKFwiISFidWJsZS1sb2FkZXIhdnVlLWxvYWRlci9saWIvc2VsZWN0b3I/dHlwZT1zY3JpcHQmaW5kZXg9MCEuL1N0b2NrYWRqdXN0ZXIudnVlXCIpXG5cbi8qIHRlbXBsYXRlICovXG52YXIgX192dWVfdGVtcGxhdGVfXyA9IHJlcXVpcmUoXCIhIXZ1ZS1sb2FkZXIvbGliL3RlbXBsYXRlLWNvbXBpbGVyP2lkPWRhdGEtdi1lYWViOTBjNCF2dWUtbG9hZGVyL2xpYi9zZWxlY3Rvcj90eXBlPXRlbXBsYXRlJmluZGV4PTAhLi9TdG9ja2FkanVzdGVyLnZ1ZVwiKVxuX192dWVfb3B0aW9uc19fID0gX192dWVfZXhwb3J0c19fID0gX192dWVfZXhwb3J0c19fIHx8IHt9XG5pZiAoXG4gIHR5cGVvZiBfX3Z1ZV9leHBvcnRzX18uZGVmYXVsdCA9PT0gXCJvYmplY3RcIiB8fFxuICB0eXBlb2YgX192dWVfZXhwb3J0c19fLmRlZmF1bHQgPT09IFwiZnVuY3Rpb25cIlxuKSB7XG5pZiAoT2JqZWN0LmtleXMoX192dWVfZXhwb3J0c19fKS5zb21lKGZ1bmN0aW9uIChrZXkpIHsgcmV0dXJuIGtleSAhPT0gXCJkZWZhdWx0XCIgJiYga2V5ICE9PSBcIl9fZXNNb2R1bGVcIiB9KSkge2NvbnNvbGUuZXJyb3IoXCJuYW1lZCBleHBvcnRzIGFyZSBub3Qgc3VwcG9ydGVkIGluICoudnVlIGZpbGVzLlwiKX1cbl9fdnVlX29wdGlvbnNfXyA9IF9fdnVlX2V4cG9ydHNfXyA9IF9fdnVlX2V4cG9ydHNfXy5kZWZhdWx0XG59XG5pZiAodHlwZW9mIF9fdnVlX29wdGlvbnNfXyA9PT0gXCJmdW5jdGlvblwiKSB7XG4gIF9fdnVlX29wdGlvbnNfXyA9IF9fdnVlX29wdGlvbnNfXy5vcHRpb25zXG59XG5fX3Z1ZV9vcHRpb25zX18uX19maWxlID0gXCJDOlxcXFxsYXJhZ29uXFxcXHd3d1xcXFxrOWhvbWVzLmNvbS5hdVxcXFxmaWRvXFxcXHJlc291cmNlc1xcXFxhc3NldHNcXFxcanNcXFxcY29tcG9uZW50c1xcXFxTdG9ja2FkanVzdGVyLnZ1ZVwiXG5fX3Z1ZV9vcHRpb25zX18ucmVuZGVyID0gX192dWVfdGVtcGxhdGVfXy5yZW5kZXJcbl9fdnVlX29wdGlvbnNfXy5zdGF0aWNSZW5kZXJGbnMgPSBfX3Z1ZV90ZW1wbGF0ZV9fLnN0YXRpY1JlbmRlckZuc1xuXG4vKiBob3QgcmVsb2FkICovXG5pZiAobW9kdWxlLmhvdCkgeyhmdW5jdGlvbiAoKSB7XG4gIHZhciBob3RBUEkgPSByZXF1aXJlKFwidnVlLWhvdC1yZWxvYWQtYXBpXCIpXG4gIGhvdEFQSS5pbnN0YWxsKHJlcXVpcmUoXCJ2dWVcIiksIGZhbHNlKVxuICBpZiAoIWhvdEFQSS5jb21wYXRpYmxlKSByZXR1cm5cbiAgbW9kdWxlLmhvdC5hY2NlcHQoKVxuICBpZiAoIW1vZHVsZS5ob3QuZGF0YSkge1xuICAgIGhvdEFQSS5jcmVhdGVSZWNvcmQoXCJkYXRhLXYtZWFlYjkwYzRcIiwgX192dWVfb3B0aW9uc19fKVxuICB9IGVsc2Uge1xuICAgIGhvdEFQSS5yZWxvYWQoXCJkYXRhLXYtZWFlYjkwYzRcIiwgX192dWVfb3B0aW9uc19fKVxuICB9XG59KSgpfVxuaWYgKF9fdnVlX29wdGlvbnNfXy5mdW5jdGlvbmFsKSB7Y29uc29sZS5lcnJvcihcIlt2dWUtbG9hZGVyXSBTdG9ja2FkanVzdGVyLnZ1ZTogZnVuY3Rpb25hbCBjb21wb25lbnRzIGFyZSBub3Qgc3VwcG9ydGVkIGFuZCBzaG91bGQgYmUgZGVmaW5lZCBpbiBwbGFpbiBqcyBmaWxlcyB1c2luZyByZW5kZXIgZnVuY3Rpb25zLlwiKX1cblxubW9kdWxlLmV4cG9ydHMgPSBfX3Z1ZV9leHBvcnRzX19cblxuXG5cbi8vLy8vLy8vLy8vLy8vLy8vL1xuLy8gV0VCUEFDSyBGT09URVJcbi8vIC4vcmVzb3VyY2VzL2Fzc2V0cy9qcy9jb21wb25lbnRzL1N0b2NrYWRqdXN0ZXIudnVlXG4vLyBtb2R1bGUgaWQgPSAwXG4vLyBtb2R1bGUgY2h1bmtzID0gMCJdLCJtYXBwaW5ncyI6IkFBQUE7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTsiLCJzb3VyY2VSb290IjoiIn0=");
/***/ },
/* 1 */
/***/ function(module, exports, __webpack_require__) {
"use strict";
eval("//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n//\n\r\n/* harmony default export */ exports[\"default\"] = {\r\n data: function data() {\r\n return {\r\n \r\n barcode: null,\r\n product_code:null,\r\n product_find_key: null,\r\n qty_ordered: 0,\r\n qty_available: 0,\r\n qty_onshelf: 0,\r\n adjusted_qty_onshelf: 0,\r\n product_id: 0,\r\n updated: false,\r\n loaded: false,\r\n error: false,\r\n message: null\r\n }\r\n },\r\n methods: {\r\n barcodeInput: function barcodeInput(val){\r\n var input = val.toString().trim()\r\n // remove checksum digit if present\r\n\r\n this.barcode = input.substring(0,12)\r\n\r\n if(this.barcode.length == 12 && !this.loaded){\r\n this.findItem()\r\n } else {\r\n this.clearForm()\r\n }\r\n },\r\n productCodeInput: function productCodeInput(val){\r\n this.product_code = val.toString().trim()\r\n },\r\n focusOn: function focusOn(input){\r\n //console.log('focus is on ',input.name)\r\n if(this.loaded){\r\n this.clearForm()\r\n this.barcode = null\r\n this.product_code = null\r\n //this.$forceUpdate()\r\n //this.$nextTick(() => this.$forceUpdate())\r\n }\r\n },\r\n \r\n findItem: function findItem() {\n var this$1 = this;\n\r\n \r\n var data = { \r\n // _token: K9homes.csrfToken, \r\n barcode: (this.barcode !== null && this.barcode.length == 12 ) ? this.barcode : null,\r\n product_code: this.product_code\r\n }\r\n\r\n this.$http.post(pageVar.url + '/find', data ).then( function (response) {\r\n // success callback\r\n // console.log(response);\r\n var product = response.body\r\n this$1.product_id = product.id\r\n this$1.qty_available = parseInt(product.qty_instock)\r\n this$1.qty_ordered = parseInt(product.ordered)\r\n // default shelf qty to the combo of both\r\n \r\n this$1.qty_onshelf = parseInt(product.qty_instock) + parseInt(product.ordered)\r\n this$1.adjusted_qty_onshelf = this$1.qty_onshelf\r\n\r\n if(this$1.product_code === null) {\r\n this$1.product_code = product.product_code\r\n }\r\n if(this$1.barcode === null) {\r\n this$1.barcode = product.barcode\r\n } \r\n\r\n this$1.loaded = true \r\n this$1.error = false \r\n\r\n // Move focus\r\n this$1.$refs.qty_on_shelf.focus() \r\n\r\n }, function (response) {\r\n // error callback\r\n this$1.clearForm();\r\n this$1.error = true;\r\n //alert('server error encountered');\r\n });\r\n },\r\n adjustItem: function adjustItem(qty){\n var this$1 = this;\n\r\n \r\n var data = { \r\n // _token: K9homes.csrfToken, \r\n qty_instock: this.adjusted_qty_onshelf - this.qty_ordered\r\n }\r\n\r\n this.$http.post(pageVar.url + '/'+this.product_id, data ).then( function (response) {\r\n // success callback\r\n // console.log(response);\r\n var product = response.body\r\n this$1.product_id = product.id\r\n this$1.qty_available = parseInt(product.qty_instock)\r\n this$1.qty_ordered = parseInt(product.ordered)\r\n // default shelf qty to the combo of both\r\n this$1.qty_onshelf = this$1.qty_available + this$1.qty_ordered\r\n this$1.adjusted_qty_onshelf = this$1.qty_onshelf\r\n\r\n this$1.clearForm()\r\n this$1.barcode = null\r\n this$1.product_code = null\r\n this$1.$refs.barcode_input.focus()\r\n \r\n\r\n }, function (response) {\r\n // error callback\r\n this$1.clearForm();\r\n this$1.error = true;\r\n //alert('server error encountered');\r\n });\r\n },\r\n \r\n clearForm: function clearForm() {\r\n this.qty_available = 0\r\n this.qty_ordered = 0\r\n this.qty_onshelf = 0\r\n this.adjusted_qty_onshelf = 0\r\n // this.barcode = null\r\n // this.product_code = null\r\n this.product_id = 0\r\n\r\n this.updated = false\r\n this.loaded = false\r\n this.error = false\r\n\r\n this.$refs.barcode_input.focus()\r\n }\r\n \r\n },\r\n mounted: function mounted() {\r\n \r\n this.$refs.barcode_input.focus()\r\n //this.$nextTick(() => this.$refs.user_input.focus())\r\n \r\n }\r\n};\r\n\r\n//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiMS5qcyIsInNvdXJjZXMiOlsid2VicGFjazovLy9yZXNvdXJjZXMvYXNzZXRzL2pzL2NvbXBvbmVudHMvU3RvY2thZGp1c3Rlci52dWU/ZThmZCJdLCJzb3VyY2VzQ29udGVudCI6WyIvL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cbi8vXG4vL1xuLy9cblxyXG5leHBvcnQgZGVmYXVsdCB7XHJcbiAgZGF0YSgpIHtcclxuICAgIHJldHVybiB7XHJcbiAgICAgIFxyXG4gICAgICBiYXJjb2RlOiBudWxsLFxyXG4gICAgICBwcm9kdWN0X2NvZGU6bnVsbCxcclxuICAgICAgcHJvZHVjdF9maW5kX2tleTogbnVsbCxcclxuICAgICAgcXR5X29yZGVyZWQ6IDAsXHJcbiAgICAgIHF0eV9hdmFpbGFibGU6IDAsXHJcbiAgICAgIHF0eV9vbnNoZWxmOiAwLFxyXG4gICAgICBhZGp1c3RlZF9xdHlfb25zaGVsZjogMCxcclxuICAgICAgcHJvZHVjdF9pZDogMCxcclxuICAgICAgdXBkYXRlZDogZmFsc2UsXHJcbiAgICAgIGxvYWRlZDogZmFsc2UsXHJcbiAgICAgIGVycm9yOiBmYWxzZSxcclxuICAgICAgbWVzc2FnZTogbnVsbFxyXG4gICAgfVxyXG4gIH0sXHJcbiAgbWV0aG9kczoge1xyXG4gICAgYmFyY29kZUlucHV0KHZhbCl7XHJcbiAgICAgIGxldCBpbnB1dCA9IHZhbC50b1N0cmluZygpLnRyaW0oKVxyXG4gICAgICAvLyByZW1vdmUgY2hlY2tzdW0gZGlnaXQgaWYgcHJlc2VudFxyXG5cclxuICAgICAgdGhpcy5iYXJjb2RlID0gaW5wdXQuc3Vic3RyaW5nKDAsMTIpXHJcblxyXG4gICAgICBpZih0aGlzLmJhcmNvZGUubGVuZ3RoID09IDEyICYmICF0aGlzLmxvYWRlZCl7XHJcbiAgICAgICAgdGhpcy5maW5kSXRlbSgpXHJcbiAgICAgIH0gZWxzZSB7XHJcbiAgICAgICAgdGhpcy5jbGVhckZvcm0oKVxyXG4gICAgICB9XHJcbiAgICB9LFxyXG4gICAgcHJvZHVjdENvZGVJbnB1dCh2YWwpe1xyXG4gICAgICB0aGlzLnByb2R1Y3RfY29kZSA9IHZhbC50b1N0cmluZygpLnRyaW0oKVxyXG4gICAgfSxcclxuICAgIGZvY3VzT24oaW5wdXQpe1xyXG4gICAgICAvL2NvbnNvbGUubG9nKCdmb2N1cyBpcyBvbiAnLGlucHV0Lm5hbWUpXHJcbiAgICAgIGlmKHRoaXMubG9hZGVkKXtcclxuICAgICAgICB0aGlzLmNsZWFyRm9ybSgpXHJcbiAgICAgICAgdGhpcy5iYXJjb2RlID0gbnVsbFxyXG4gICAgICAgIHRoaXMucHJvZHVjdF9jb2RlID0gbnVsbFxyXG4gICAgICAgIC8vdGhpcy4kZm9yY2VVcGRhdGUoKVxyXG4gICAgICAgIC8vdGhpcy4kbmV4dFRpY2soKCkgPT4gdGhpcy4kZm9yY2VVcGRhdGUoKSlcclxuICAgICAgfVxyXG4gICAgfSxcclxuICAgXHJcbiAgICBmaW5kSXRlbSgpIHtcclxuICAgICAgICAgIFxyXG4gICAgICAgICAgbGV0IGRhdGEgPSB7IFxyXG4gICAgICAgICAgICAgIC8vIF90b2tlbjogSzlob21lcy5jc3JmVG9rZW4sIFxyXG4gICAgICAgICAgICAgIGJhcmNvZGU6ICh0aGlzLmJhcmNvZGUgIT09IG51bGwgJiYgdGhpcy5iYXJjb2RlLmxlbmd0aCA9PSAxMiApID8gdGhpcy5iYXJjb2RlIDogbnVsbCxcclxuICAgICAgICAgICAgICBwcm9kdWN0X2NvZGU6IHRoaXMucHJvZHVjdF9jb2RlXHJcbiAgICAgICAgICB9XHJcblxyXG4gICAgICAgICAgdGhpcy4kaHR0cC5wb3N0KHBhZ2VWYXIudXJsICsgJy9maW5kJywgZGF0YSApLnRoZW4oIChyZXNwb25zZSkgPT4ge1xyXG4gICAgICAgICAgICAgIC8vIHN1Y2Nlc3MgY2FsbGJhY2tcclxuICAgICAgICAgICAgICAvLyBjb25zb2xlLmxvZyhyZXNwb25zZSk7XHJcbiAgICAgICAgICAgICAgbGV0IHByb2R1Y3QgPSByZXNwb25zZS5ib2R5XHJcbiAgICAgICAgICAgICAgdGhpcy5wcm9kdWN0X2lkID0gcHJvZHVjdC5pZFxyXG4gICAgICAgICAgICAgIHRoaXMucXR5X2F2YWlsYWJsZSA9IHBhcnNlSW50KHByb2R1Y3QucXR5X2luc3RvY2spXHJcbiAgICAgICAgICAgICAgdGhpcy5xdHlfb3JkZXJlZCA9IHBhcnNlSW50KHByb2R1Y3Qub3JkZXJlZClcclxuICAgICAgICAgICAgICAvLyBkZWZhdWx0IHNoZWxmIHF0eSB0byB0aGUgY29tYm8gb2YgYm90aFxyXG4gICAgICAgICAgICAgIFxyXG4gICAgICAgICAgICAgIHRoaXMucXR5X29uc2hlbGYgPSBwYXJzZUludChwcm9kdWN0LnF0eV9pbnN0b2NrKSArIHBhcnNlSW50KHByb2R1Y3Qub3JkZXJlZClcclxuICAgICAgICAgICAgICB0aGlzLmFkanVzdGVkX3F0eV9vbnNoZWxmID0gdGhpcy5xdHlfb25zaGVsZlxyXG5cclxuICAgICAgICAgICAgICBpZih0aGlzLnByb2R1Y3RfY29kZSA9PT0gbnVsbCkgIHtcclxuICAgICAgICAgICAgICAgIHRoaXMucHJvZHVjdF9jb2RlID0gcHJvZHVjdC5wcm9kdWN0X2NvZGVcclxuICAgICAgICAgICAgICB9XHJcbiAgICAgICAgICAgICAgaWYodGhpcy5iYXJjb2RlID09PSBudWxsKSAge1xyXG4gICAgICAgICAgICAgICAgdGhpcy5iYXJjb2RlID0gcHJvZHVjdC5iYXJjb2RlXHJcbiAgICAgICAgICAgICAgfSAgIFxyXG5cclxuICAgICAgICAgICAgICB0aGlzLmxvYWRlZCA9IHRydWUgXHJcbiAgICAgICAgICAgICAgdGhpcy5lcnJvciA9IGZhbHNlICBcclxuXHJcbiAgICAgICAgICAgICAgLy8gTW92ZSBmb2N1c1xyXG4gICAgICAgICAgICAgIHRoaXMuJHJlZnMucXR5X29uX3NoZWxmLmZvY3VzKCkgICAgICBcclxuXHJcbiAgICAgICAgICB9LCAocmVzcG9uc2UpID0+IHtcclxuICAgICAgICAgICAgICAvLyBlcnJvciBjYWxsYmFja1xyXG4gICAgICAgICAgICAgIHRoaXMuY2xlYXJGb3JtKCk7XHJcbiAgICAgICAgICAgICAgdGhpcy5lcnJvciA9IHRydWU7XHJcbiAgICAgICAgICAgICAgLy9hbGVydCgnc2VydmVyIGVycm9yIGVuY291bnRlcmVkJyk7XHJcbiAgICAgICAgICB9KTtcclxuICAgICAgfSxcclxuICAgICAgYWRqdXN0SXRlbShxdHkpe1xyXG4gICAgICAgIFxyXG4gICAgICAgIGxldCBkYXRhID0geyBcclxuICAgICAgICAgICAgICAvLyBfdG9rZW46IEs5aG9tZXMuY3NyZlRva2VuLCBcclxuICAgICAgICAgICAgICBxdHlfaW5zdG9jazogdGhpcy5hZGp1c3RlZF9xdHlfb25zaGVsZiAtIHRoaXMucXR5X29yZGVyZWRcclxuICAgICAgICAgIH1cclxuXHJcbiAgICAgICAgICB0aGlzLiRodHRwLnBvc3QocGFnZVZhci51cmwgKyAnLycrdGhpcy5wcm9kdWN0X2lkLCBkYXRhICkudGhlbiggKHJlc3BvbnNlKSA9PiB7XHJcbiAgICAgICAgICAgICAgLy8gc3VjY2VzcyBjYWxsYmFja1xyXG4gICAgICAgICAgICAgIC8vIGNvbnNvbGUubG9nKHJlc3BvbnNlKTtcclxuICAgICAgICAgICAgICBsZXQgcHJvZHVjdCA9IHJlc3BvbnNlLmJvZHlcclxuICAgICAgICAgICAgICB0aGlzLnByb2R1Y3RfaWQgPSBwcm9kdWN0LmlkXHJcbiAgICAgICAgICAgICAgdGhpcy5xdHlfYXZhaWxhYmxlID0gcGFyc2VJbnQocHJvZHVjdC5xdHlfaW5zdG9jaylcclxuICAgICAgICAgICAgICB0aGlzLnF0eV9vcmRlcmVkID0gcGFyc2VJbnQocHJvZHVjdC5vcmRlcmVkKVxyXG4gICAgICAgICAgICAgIC8vIGRlZmF1bHQgc2hlbGYgcXR5IHRvIHRoZSBjb21ibyBvZiBib3RoXHJcbiAgICAgICAgICAgICAgdGhpcy5xdHlfb25zaGVsZiA9IHRoaXMucXR5X2F2YWlsYWJsZSArIHRoaXMucXR5X29yZGVyZWRcclxuICAgICAgICAgICAgICB0aGlzLmFkanVzdGVkX3F0eV9vbnNoZWxmID0gdGhpcy5xdHlfb25zaGVsZlxyXG5cclxuICAgICAgICAgICAgICB0aGlzLmNsZWFyRm9ybSgpXHJcbiAgICAgICAgICAgICAgdGhpcy5iYXJjb2RlID0gbnVsbFxyXG4gICAgICAgICAgICAgIHRoaXMucHJvZHVjdF9jb2RlID0gbnVsbFxyXG4gICAgICAgICAgICAgIHRoaXMuJHJlZnMuYmFyY29kZV9pbnB1dC5mb2N1cygpXHJcbiAgICAgICAgICAgICAgXHJcblxyXG4gICAgICAgICAgfSwgKHJlc3BvbnNlKSA9PiB7XHJcbiAgICAgICAgICAgICAgLy8gZXJyb3IgY2FsbGJhY2tcclxuICAgICAgICAgICAgICB0aGlzLmNsZWFyRm9ybSgpO1xyXG4gICAgICAgICAgICAgIHRoaXMuZXJyb3IgPSB0cnVlO1xyXG4gICAgICAgICAgICAgIC8vYWxlcnQoJ3NlcnZlciBlcnJvciBlbmNvdW50ZXJlZCcpO1xyXG4gICAgICAgICAgfSk7XHJcbiAgICAgIH0sXHJcbiAgICAgIFxyXG4gICAgICBjbGVhckZvcm0oKSB7XHJcbiAgICAgICAgdGhpcy5xdHlfYXZhaWxhYmxlID0gMFxyXG4gICAgICAgIHRoaXMucXR5X29yZGVyZWQgPSAwXHJcbiAgICAgICAgdGhpcy5xdHlfb25zaGVsZiA9IDBcclxuICAgICAgICB0aGlzLmFkanVzdGVkX3F0eV9vbnNoZWxmID0gMFxyXG4gICAgICAgIC8vIHRoaXMuYmFyY29kZSA9IG51bGxcclxuICAgICAgICAvLyB0aGlzLnByb2R1Y3RfY29kZSA9IG51bGxcclxuICAgICAgICB0aGlzLnByb2R1Y3RfaWQgPSAwXHJcblxyXG4gICAgICAgIHRoaXMudXBkYXRlZCA9IGZhbHNlXHJcbiAgICAgICAgdGhpcy5sb2FkZWQgPSBmYWxzZVxyXG4gICAgICAgIHRoaXMuZXJyb3IgPSBmYWxzZVxyXG5cclxuICAgICAgICB0aGlzLiRyZWZzLmJhcmNvZGVfaW5wdXQuZm9jdXMoKVxyXG4gICAgICB9XHJcbiAgICBcclxuICB9LFxyXG4gIG1vdW50ZWQoKSB7XHJcbiAgICBcclxuICAgIHRoaXMuJHJlZnMuYmFyY29kZV9pbnB1dC5mb2N1cygpXHJcbiAgICAvL3RoaXMuJG5leHRUaWNrKCgpID0+IHRoaXMuJHJlZnMudXNlcl9pbnB1dC5mb2N1cygpKVxyXG4gICAgXHJcbiAgfVxyXG59XHJcblxyXG5cblxuXG4vLyBXRUJQQUNLIEZPT1RFUiAvL1xuLy8gcmVzb3VyY2VzL2Fzc2V0cy9qcy9jb21wb25lbnRzL1N0b2NrYWRqdXN0ZXIudnVlIl0sIm1hcHBpbmdzIjoiQUFBQTs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7O0FBK0RBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTs7O0FBR0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBOztBQUVBO0FBQ0E7QUFDQTtBQUNBOzs7QUFHQTtBQUNBO0FBQ0E7QUFDQTs7QUFBQTtBQUNBO0FBQ0E7O0FBRUE7QUFDQTtBQUNBO0FBQ0E7QUFDQTs7O0FBR0E7QUFDQTtBQUNBO0FBQ0E7OztBQUdBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBOztBQUVBO0FBQ0E7QUFDQTs7QUFFQTtBQUNBOztBQUVBO0FBQ0E7QUFDQTs7QUFBQTtBQUNBO0FBQ0E7O0FBRUE7QUFDQTtBQUNBO0FBQ0E7OztBQUdBO0FBQ0E7QUFDQTtBQUNBOztBQUVBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBOztBQUVBO0FBQ0E7O0FBRUE7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTs7O0FBR0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7OztBQUdBO0FBQ0E7QUFDQTsiLCJzb3VyY2VSb290IjoiIn0=");
/***/ },
/* 2 */
/***/ function(module, exports, __webpack_require__) {
eval("exports = module.exports = __webpack_require__(3)();\n// imports\n\n\n// module\nexports.push([module.i, \"\\n.updated {\\r\\n background: #ccffcc;\\n}\\n.loaded {\\r\\n background: #ccccff;\\n}\\n.error {\\r\\n background: #fcc;\\n}\\r\\n\\r\\n\\r\\n\", \"\", {\"version\":3,\"sources\":[\"/./resources/assets/js/components/Stockadjuster.vue?41322c7f\"],\"names\":[],\"mappings\":\";AAgNA;EACA,oBAAA;CAEA;AACA;EACA,oBAAA;CAEA;AACA;EACA,iBAAA;CACA\",\"file\":\"Stockadjuster.vue\",\"sourcesContent\":[\"<template>\\r\\n <div>\\r\\n <h3>Stock Adjuster</h3>\\r\\n \\r\\n <p>Enter Barcode OR a Product Code and then click the <strong>Find item</strong> button</p>\\r\\n <div :class=\\\"{error: error}\\\">{{message}}</div>\\r\\n \\r\\n <div style=\\\"display:flex\\\">\\r\\n <div style=\\\"flex:1\\\">Barcode:</div>\\r\\n <div style=\\\"flex:1\\\"><input \\r\\n name=\\\"barcode\\\" \\r\\n type=\\\"number\\\"\\r\\n :value=\\\"barcode\\\" \\r\\n ref=\\\"barcode_input\\\"\\r\\n @input=\\\"barcodeInput($event.target.value)\\\" \\r\\n @focus=\\\"focusOn($event.target)\\\" \\r\\n style=\\\"width:8em\\\" /></div>\\r\\n </div>\\r\\n\\r\\n <div style=\\\"display:flex;width:100%;\\\">\\r\\n <div style=\\\"flex:1\\\">P.code:</div>\\r\\n <div style=\\\"flex:1\\\"><input \\r\\n name=\\\"product_code\\\" \\r\\n ref=\\\"product_code_input\\\" \\r\\n type=\\\"text\\\":value=\\\"product_code\\\" \\r\\n @input=\\\"productCodeInput($event.target.value)\\\" \\r\\n @focus=\\\"focusOn($event.target)\\\" \\r\\n style=\\\"width:8em\\\" /></div>\\r\\n </div>\\r\\n \\r\\n <div style=\\\"display:flex;width:100%;\\\">\\r\\n <div style=\\\"flex:1\\\"> </div>\\r\\n <div style=\\\"flex:1\\\" ><button :class=\\\"{loaded: loaded,error: error}\\\" @click=\\\"findItem\\\">Find item</button></div>\\r\\n </div>\\r\\n\\r\\n <div style=\\\"border-top:1px dashed #999; margin:15px 0\\\"></div>\\r\\n\\r\\n <div style=\\\"display: flex;width:100%\\\">\\r\\n <div style=\\\"flex:1\\\">Qty on order:</div>\\r\\n <div style=\\\"flex:1\\\">{{ qty_ordered }}</div>\\r\\n </div>\\r\\n\\r\\n <div style=\\\"display:flex;width:100%\\\">\\r\\n <div style=\\\"flex:1\\\">Qty available:</div>\\r\\n <div style=\\\"flex:1\\\">{{ qty_available }}</div>\\r\\n </div>\\r\\n\\r\\n <div style=\\\"display:flex;width:100%\\\">\\r\\n <div style=\\\"flex:1\\\">Qty shelf:</div>\\r\\n <div style=\\\"flex:1\\\">{{ qty_onshelf }}</div>\\r\\n </div>\\r\\n\\r\\n <div style=\\\"display:flex; width:100%; padding:5px\\\">\\r\\n \\r\\n <div style=\\\"flex:1\\\"><input style=\\\"width:4em\\\" type=\\\"text\\\" ref=\\\"qty_on_shelf\\\" v-model=\\\"adjusted_qty_onshelf\\\" /></div>\\r\\n <div style=\\\"flex:2\\\"><button :disabled=\\\"! product_id > 0\\\" :class=\\\"{updated: updated}\\\" @click=\\\"adjustItem\\\">Actual shelf qty</button> </div>\\r\\n </div>\\r\\n\\r\\n\\r\\n </div>\\r\\n</template>\\r\\n\\r\\n<script>\\r\\nexport default {\\r\\n data() {\\r\\n return {\\r\\n \\r\\n barcode: null,\\r\\n product_code:null,\\r\\n product_find_key: null,\\r\\n qty_ordered: 0,\\r\\n qty_available: 0,\\r\\n qty_onshelf: 0,\\r\\n adjusted_qty_onshelf: 0,\\r\\n product_id: 0,\\r\\n updated: false,\\r\\n loaded: false,\\r\\n error: false,\\r\\n message: null\\r\\n }\\r\\n },\\r\\n methods: {\\r\\n barcodeInput(val){\\r\\n let input = val.toString().trim()\\r\\n // remove checksum digit if present\\r\\n\\r\\n this.barcode = input.substring(0,12)\\r\\n\\r\\n if(this.barcode.length == 12 && !this.loaded){\\r\\n this.findItem()\\r\\n } else {\\r\\n this.clearForm()\\r\\n }\\r\\n },\\r\\n productCodeInput(val){\\r\\n this.product_code = val.toString().trim()\\r\\n },\\r\\n focusOn(input){\\r\\n //console.log('focus is on ',input.name)\\r\\n if(this.loaded){\\r\\n this.clearForm()\\r\\n this.barcode = null\\r\\n this.product_code = null\\r\\n //this.$forceUpdate()\\r\\n //this.$nextTick(() => this.$forceUpdate())\\r\\n }\\r\\n },\\r\\n \\r\\n findItem() {\\r\\n \\r\\n let data = { \\r\\n // _token: K9homes.csrfToken, \\r\\n barcode: (this.barcode !== null && this.barcode.length == 12 ) ? this.barcode : null,\\r\\n product_code: this.product_code\\r\\n }\\r\\n\\r\\n this.$http.post(pageVar.url + '/find', data ).then( (response) => {\\r\\n // success callback\\r\\n // console.log(response);\\r\\n let product = response.body\\r\\n this.product_id = product.id\\r\\n this.qty_available = parseInt(product.qty_instock)\\r\\n this.qty_ordered = parseInt(product.ordered)\\r\\n // default shelf qty to the combo of both\\r\\n \\r\\n this.qty_onshelf = parseInt(product.qty_instock) + parseInt(product.ordered)\\r\\n this.adjusted_qty_onshelf = this.qty_onshelf\\r\\n\\r\\n if(this.product_code === null) {\\r\\n this.product_code = product.product_code\\r\\n }\\r\\n if(this.barcode === null) {\\r\\n this.barcode = product.barcode\\r\\n } \\r\\n\\r\\n this.loaded = true \\r\\n this.error = false \\r\\n\\r\\n // Move focus\\r\\n this.$refs.qty_on_shelf.focus() \\r\\n\\r\\n }, (response) => {\\r\\n // error callback\\r\\n this.clearForm();\\r\\n this.error = true;\\r\\n //alert('server error encountered');\\r\\n });\\r\\n },\\r\\n adjustItem(qty){\\r\\n \\r\\n let data = { \\r\\n // _token: K9homes.csrfToken, \\r\\n qty_instock: this.adjusted_qty_onshelf - this.qty_ordered\\r\\n }\\r\\n\\r\\n this.$http.post(pageVar.url + '/'+this.product_id, data ).then( (response) => {\\r\\n // success callback\\r\\n // console.log(response);\\r\\n let product = response.body\\r\\n this.product_id = product.id\\r\\n this.qty_available = parseInt(product.qty_instock)\\r\\n this.qty_ordered = parseInt(product.ordered)\\r\\n // default shelf qty to the combo of both\\r\\n this.qty_onshelf = this.qty_available + this.qty_ordered\\r\\n this.adjusted_qty_onshelf = this.qty_onshelf\\r\\n\\r\\n this.clearForm()\\r\\n this.barcode = null\\r\\n this.product_code = null\\r\\n this.$refs.barcode_input.focus()\\r\\n \\r\\n\\r\\n }, (response) => {\\r\\n // error callback\\r\\n this.clearForm();\\r\\n this.error = true;\\r\\n //alert('server error encountered');\\r\\n });\\r\\n },\\r\\n \\r\\n clearForm() {\\r\\n this.qty_available = 0\\r\\n this.qty_ordered = 0\\r\\n this.qty_onshelf = 0\\r\\n this.adjusted_qty_onshelf = 0\\r\\n // this.barcode = null\\r\\n // this.product_code = null\\r\\n this.product_id = 0\\r\\n\\r\\n this.updated = false\\r\\n this.loaded = false\\r\\n this.error = false\\r\\n\\r\\n this.$refs.barcode_input.focus()\\r\\n }\\r\\n \\r\\n },\\r\\n mounted() {\\r\\n \\r\\n this.$refs.barcode_input.focus()\\r\\n //this.$nextTick(() => this.$refs.user_input.focus())\\r\\n \\r\\n }\\r\\n}\\r\\n\\r\\n</script>\\r\\n\\r\\n<style>\\r\\n.updated {\\r\\n background: #ccffcc;\\r\\n \\r\\n}\\r\\n.loaded {\\r\\n background: #ccccff;\\r\\n\\r\\n}\\r\\n.error {\\r\\n background: #fcc;\\r\\n}\\r\\n\\r\\n\\r\\n</style>\"],\"sourceRoot\":\"webpack://\"}]);\n\n// exports\n//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"file":"2.js","sources":["webpack:///./resources/assets/js/components/Stockadjuster.vue?169b"],"sourcesContent":["exports = module.exports = require(\"../../../../node_modules/css-loader/lib/css-base.js\")();\n// imports\n\n\n// module\nexports.push([module.id, \"\\n.updated {\\r\\n  background: #ccffcc;\\n}\\n.loaded {\\r\\n  background: #ccccff;\\n}\\n.error {\\r\\n  background: #fcc;\\n}\\r\\n\\r\\n\\r\\n\", \"\", {\"version\":3,\"sources\":[\"/./resources/assets/js/components/Stockadjuster.vue?41322c7f\"],\"names\":[],\"mappings\":\";AAgNA;EACA,oBAAA;CAEA;AACA;EACA,oBAAA;CAEA;AACA;EACA,iBAAA;CACA\",\"file\":\"Stockadjuster.vue\",\"sourcesContent\":[\"<template>\\r\\n  <div>\\r\\n  <h3>Stock Adjuster</h3>\\r\\n  \\r\\n  <p>Enter Barcode OR a Product Code and then click the <strong>Find item</strong> button</p>\\r\\n  <div :class=\\\"{error: error}\\\">{{message}}</div>\\r\\n  \\r\\n    <div style=\\\"display:flex\\\">\\r\\n      <div style=\\\"flex:1\\\">Barcode:</div>\\r\\n      <div style=\\\"flex:1\\\"><input \\r\\n        name=\\\"barcode\\\" \\r\\n        type=\\\"number\\\"\\r\\n        :value=\\\"barcode\\\" \\r\\n        ref=\\\"barcode_input\\\"\\r\\n        @input=\\\"barcodeInput($event.target.value)\\\" \\r\\n        @focus=\\\"focusOn($event.target)\\\" \\r\\n        style=\\\"width:8em\\\"  /></div>\\r\\n    </div>\\r\\n\\r\\n    <div style=\\\"display:flex;width:100%;\\\">\\r\\n      <div style=\\\"flex:1\\\">P.code:</div>\\r\\n      <div style=\\\"flex:1\\\"><input \\r\\n        name=\\\"product_code\\\" \\r\\n        ref=\\\"product_code_input\\\" \\r\\n        type=\\\"text\\\":value=\\\"product_code\\\" \\r\\n        @input=\\\"productCodeInput($event.target.value)\\\" \\r\\n        @focus=\\\"focusOn($event.target)\\\" \\r\\n        style=\\\"width:8em\\\" /></div>\\r\\n    </div>\\r\\n    \\r\\n    <div style=\\\"display:flex;width:100%;\\\">\\r\\n      <div style=\\\"flex:1\\\">&nbsp;</div>\\r\\n      <div style=\\\"flex:1\\\" ><button :class=\\\"{loaded: loaded,error: error}\\\" @click=\\\"findItem\\\">Find item</button></div>\\r\\n    </div>\\r\\n\\r\\n    <div style=\\\"border-top:1px dashed #999; margin:15px 0\\\"></div>\\r\\n\\r\\n    <div style=\\\"display: flex;width:100%\\\">\\r\\n      <div style=\\\"flex:1\\\">Qty on order:</div>\\r\\n      <div style=\\\"flex:1\\\">{{ qty_ordered }}</div>\\r\\n    </div>\\r\\n\\r\\n    <div style=\\\"display:flex;width:100%\\\">\\r\\n      <div style=\\\"flex:1\\\">Qty available:</div>\\r\\n      <div style=\\\"flex:1\\\">{{ qty_available }}</div>\\r\\n    </div>\\r\\n\\r\\n    <div style=\\\"display:flex;width:100%\\\">\\r\\n      <div style=\\\"flex:1\\\">Qty shelf:</div>\\r\\n      <div style=\\\"flex:1\\\">{{ qty_onshelf }}</div>\\r\\n    </div>\\r\\n\\r\\n    <div  style=\\\"display:flex; width:100%; padding:5px\\\">\\r\\n      \\r\\n      <div style=\\\"flex:1\\\"><input style=\\\"width:4em\\\" type=\\\"text\\\" ref=\\\"qty_on_shelf\\\" v-model=\\\"adjusted_qty_onshelf\\\" /></div>\\r\\n      <div style=\\\"flex:2\\\"><button :disabled=\\\"! product_id > 0\\\" :class=\\\"{updated: updated}\\\" @click=\\\"adjustItem\\\">Actual shelf qty</button>&nbsp;</div>\\r\\n    </div>\\r\\n\\r\\n\\r\\n  </div>\\r\\n</template>\\r\\n\\r\\n<script>\\r\\nexport default {\\r\\n  data() {\\r\\n    return {\\r\\n      \\r\\n      barcode: null,\\r\\n      product_code:null,\\r\\n      product_find_key: null,\\r\\n      qty_ordered: 0,\\r\\n      qty_available: 0,\\r\\n      qty_onshelf: 0,\\r\\n      adjusted_qty_onshelf: 0,\\r\\n      product_id: 0,\\r\\n      updated: false,\\r\\n      loaded: false,\\r\\n      error: false,\\r\\n      message: null\\r\\n    }\\r\\n  },\\r\\n  methods: {\\r\\n    barcodeInput(val){\\r\\n      let input = val.toString().trim()\\r\\n      // remove checksum digit if present\\r\\n\\r\\n      this.barcode = input.substring(0,12)\\r\\n\\r\\n      if(this.barcode.length == 12 && !this.loaded){\\r\\n        this.findItem()\\r\\n      } else {\\r\\n        this.clearForm()\\r\\n      }\\r\\n    },\\r\\n    productCodeInput(val){\\r\\n      this.product_code = val.toString().trim()\\r\\n    },\\r\\n    focusOn(input){\\r\\n      //console.log('focus is on ',input.name)\\r\\n      if(this.loaded){\\r\\n        this.clearForm()\\r\\n        this.barcode = null\\r\\n        this.product_code = null\\r\\n        //this.$forceUpdate()\\r\\n        //this.$nextTick(() => this.$forceUpdate())\\r\\n      }\\r\\n    },\\r\\n   \\r\\n    findItem() {\\r\\n          \\r\\n          let data = { \\r\\n              // _token: K9homes.csrfToken, \\r\\n              barcode: (this.barcode !== null && this.barcode.length == 12 ) ? this.barcode : null,\\r\\n              product_code: this.product_code\\r\\n          }\\r\\n\\r\\n          this.$http.post(pageVar.url + '/find', data ).then( (response) => {\\r\\n              // success callback\\r\\n              // console.log(response);\\r\\n              let product = response.body\\r\\n              this.product_id = product.id\\r\\n              this.qty_available = parseInt(product.qty_instock)\\r\\n              this.qty_ordered = parseInt(product.ordered)\\r\\n              // default shelf qty to the combo of both\\r\\n              \\r\\n              this.qty_onshelf = parseInt(product.qty_instock) + parseInt(product.ordered)\\r\\n              this.adjusted_qty_onshelf = this.qty_onshelf\\r\\n\\r\\n              if(this.product_code === null)  {\\r\\n                this.product_code = product.product_code\\r\\n              }\\r\\n              if(this.barcode === null)  {\\r\\n                this.barcode = product.barcode\\r\\n              }   \\r\\n\\r\\n              this.loaded = true \\r\\n              this.error = false  \\r\\n\\r\\n              // Move focus\\r\\n              this.$refs.qty_on_shelf.focus()      \\r\\n\\r\\n          }, (response) => {\\r\\n              // error callback\\r\\n              this.clearForm();\\r\\n              this.error = true;\\r\\n              //alert('server error encountered');\\r\\n          });\\r\\n      },\\r\\n      adjustItem(qty){\\r\\n        \\r\\n        let data = { \\r\\n              // _token: K9homes.csrfToken, \\r\\n              qty_instock: this.adjusted_qty_onshelf - this.qty_ordered\\r\\n          }\\r\\n\\r\\n          this.$http.post(pageVar.url + '/'+this.product_id, data ).then( (response) => {\\r\\n              // success callback\\r\\n              // console.log(response);\\r\\n              let product = response.body\\r\\n              this.product_id = product.id\\r\\n              this.qty_available = parseInt(product.qty_instock)\\r\\n              this.qty_ordered = parseInt(product.ordered)\\r\\n              // default shelf qty to the combo of both\\r\\n              this.qty_onshelf = this.qty_available + this.qty_ordered\\r\\n              this.adjusted_qty_onshelf = this.qty_onshelf\\r\\n\\r\\n              this.clearForm()\\r\\n              this.barcode = null\\r\\n              this.product_code = null\\r\\n              this.$refs.barcode_input.focus()\\r\\n              \\r\\n\\r\\n          }, (response) => {\\r\\n              // error callback\\r\\n              this.clearForm();\\r\\n              this.error = true;\\r\\n              //alert('server error encountered');\\r\\n          });\\r\\n      },\\r\\n      \\r\\n      clearForm() {\\r\\n        this.qty_available = 0\\r\\n        this.qty_ordered = 0\\r\\n        this.qty_onshelf = 0\\r\\n        this.adjusted_qty_onshelf = 0\\r\\n        // this.barcode = null\\r\\n        // this.product_code = null\\r\\n        this.product_id = 0\\r\\n\\r\\n        this.updated = false\\r\\n        this.loaded = false\\r\\n        this.error = false\\r\\n\\r\\n        this.$refs.barcode_input.focus()\\r\\n      }\\r\\n    \\r\\n  },\\r\\n  mounted() {\\r\\n    \\r\\n    this.$refs.barcode_input.focus()\\r\\n    //this.$nextTick(() => this.$refs.user_input.focus())\\r\\n    \\r\\n  }\\r\\n}\\r\\n\\r\\n</script>\\r\\n\\r\\n<style>\\r\\n.updated {\\r\\n  background: #ccffcc;\\r\\n \\r\\n}\\r\\n.loaded {\\r\\n  background: #ccccff;\\r\\n\\r\\n}\\r\\n.error {\\r\\n  background: #fcc;\\r\\n}\\r\\n\\r\\n\\r\\n</style>\"],\"sourceRoot\":\"webpack://\"}]);\n\n// exports\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./~/css-loader?sourceMap!./~/vue-loader/lib/style-rewriter.js?id=data-v-eaeb90c4!./~/vue-loader/lib/selector.js?type=styles&index=0!./resources/assets/js/components/Stockadjuster.vue\n// module id = 2\n// module chunks = 0"],"mappings":"AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;","sourceRoot":""}");
/***/ },
/* 3 */
/***/ function(module, exports) {
eval("/*\r\n\tMIT License http://www.opensource.org/licenses/mit-license.php\r\n\tAuthor Tobias Koppers @sokra\r\n*/\r\n// css base code, injected by the css-loader\r\nmodule.exports = function() {\r\n\tvar list = [];\r\n\r\n\t// return the list of modules as css string\r\n\tlist.toString = function toString() {\r\n\t\tvar result = [];\r\n\t\tfor(var i = 0; i < this.length; i++) {\r\n\t\t\tvar item = this[i];\r\n\t\t\tif(item[2]) {\r\n\t\t\t\tresult.push(\"@media \" + item[2] + \"{\" + item[1] + \"}\");\r\n\t\t\t} else {\r\n\t\t\t\tresult.push(item[1]);\r\n\t\t\t}\r\n\t\t}\r\n\t\treturn result.join(\"\");\r\n\t};\r\n\r\n\t// import a list of modules into the list\r\n\tlist.i = function(modules, mediaQuery) {\r\n\t\tif(typeof modules === \"string\")\r\n\t\t\tmodules = [[null, modules, \"\"]];\r\n\t\tvar alreadyImportedModules = {};\r\n\t\tfor(var i = 0; i < this.length; i++) {\r\n\t\t\tvar id = this[i][0];\r\n\t\t\tif(typeof id === \"number\")\r\n\t\t\t\talreadyImportedModules[id] = true;\r\n\t\t}\r\n\t\tfor(i = 0; i < modules.length; i++) {\r\n\t\t\tvar item = modules[i];\r\n\t\t\t// skip already imported module\r\n\t\t\t// this implementation is not 100% perfect for weird media query combinations\r\n\t\t\t// when a module is imported multiple times with different media queries.\r\n\t\t\t// I hope this will never occur (Hey this way we have smaller bundles)\r\n\t\t\tif(typeof item[0] !== \"number\" || !alreadyImportedModules[item[0]]) {\r\n\t\t\t\tif(mediaQuery && !item[2]) {\r\n\t\t\t\t\titem[2] = mediaQuery;\r\n\t\t\t\t} else if(mediaQuery) {\r\n\t\t\t\t\titem[2] = \"(\" + item[2] + \") and (\" + mediaQuery + \")\";\r\n\t\t\t\t}\r\n\t\t\t\tlist.push(item);\r\n\t\t\t}\r\n\t\t}\r\n\t};\r\n\treturn list;\r\n};\r\n//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiMy5qcyIsInNvdXJjZXMiOlsid2VicGFjazovLy8uL34vY3NzLWxvYWRlci9saWIvY3NzLWJhc2UuanM/ZGEwNCJdLCJzb3VyY2VzQ29udGVudCI6WyIvKlxyXG5cdE1JVCBMaWNlbnNlIGh0dHA6Ly93d3cub3BlbnNvdXJjZS5vcmcvbGljZW5zZXMvbWl0LWxpY2Vuc2UucGhwXHJcblx0QXV0aG9yIFRvYmlhcyBLb3BwZXJzIEBzb2tyYVxyXG4qL1xyXG4vLyBjc3MgYmFzZSBjb2RlLCBpbmplY3RlZCBieSB0aGUgY3NzLWxvYWRlclxyXG5tb2R1bGUuZXhwb3J0cyA9IGZ1bmN0aW9uKCkge1xyXG5cdHZhciBsaXN0ID0gW107XHJcblxyXG5cdC8vIHJldHVybiB0aGUgbGlzdCBvZiBtb2R1bGVzIGFzIGNzcyBzdHJpbmdcclxuXHRsaXN0LnRvU3RyaW5nID0gZnVuY3Rpb24gdG9TdHJpbmcoKSB7XHJcblx0XHR2YXIgcmVzdWx0ID0gW107XHJcblx0XHRmb3IodmFyIGkgPSAwOyBpIDwgdGhpcy5sZW5ndGg7IGkrKykge1xyXG5cdFx0XHR2YXIgaXRlbSA9IHRoaXNbaV07XHJcblx0XHRcdGlmKGl0ZW1bMl0pIHtcclxuXHRcdFx0XHRyZXN1bHQucHVzaChcIkBtZWRpYSBcIiArIGl0ZW1bMl0gKyBcIntcIiArIGl0ZW1bMV0gKyBcIn1cIik7XHJcblx0XHRcdH0gZWxzZSB7XHJcblx0XHRcdFx0cmVzdWx0LnB1c2goaXRlbVsxXSk7XHJcblx0XHRcdH1cclxuXHRcdH1cclxuXHRcdHJldHVybiByZXN1bHQuam9pbihcIlwiKTtcclxuXHR9O1xyXG5cclxuXHQvLyBpbXBvcnQgYSBsaXN0IG9mIG1vZHVsZXMgaW50byB0aGUgbGlzdFxyXG5cdGxpc3QuaSA9IGZ1bmN0aW9uKG1vZHVsZXMsIG1lZGlhUXVlcnkpIHtcclxuXHRcdGlmKHR5cGVvZiBtb2R1bGVzID09PSBcInN0cmluZ1wiKVxyXG5cdFx0XHRtb2R1bGVzID0gW1tudWxsLCBtb2R1bGVzLCBcIlwiXV07XHJcblx0XHR2YXIgYWxyZWFkeUltcG9ydGVkTW9kdWxlcyA9IHt9O1xyXG5cdFx0Zm9yKHZhciBpID0gMDsgaSA8IHRoaXMubGVuZ3RoOyBpKyspIHtcclxuXHRcdFx0dmFyIGlkID0gdGhpc1tpXVswXTtcclxuXHRcdFx0aWYodHlwZW9mIGlkID09PSBcIm51bWJlclwiKVxyXG5cdFx0XHRcdGFscmVhZHlJbXBvcnRlZE1vZHVsZXNbaWRdID0gdHJ1ZTtcclxuXHRcdH1cclxuXHRcdGZvcihpID0gMDsgaSA8IG1vZHVsZXMubGVuZ3RoOyBpKyspIHtcclxuXHRcdFx0dmFyIGl0ZW0gPSBtb2R1bGVzW2ldO1xyXG5cdFx0XHQvLyBza2lwIGFscmVhZHkgaW1wb3J0ZWQgbW9kdWxlXHJcblx0XHRcdC8vIHRoaXMgaW1wbGVtZW50YXRpb24gaXMgbm90IDEwMCUgcGVyZmVjdCBmb3Igd2VpcmQgbWVkaWEgcXVlcnkgY29tYmluYXRpb25zXHJcblx0XHRcdC8vICB3aGVuIGEgbW9kdWxlIGlzIGltcG9ydGVkIG11bHRpcGxlIHRpbWVzIHdpdGggZGlmZmVyZW50IG1lZGlhIHF1ZXJpZXMuXHJcblx0XHRcdC8vICBJIGhvcGUgdGhpcyB3aWxsIG5ldmVyIG9jY3VyIChIZXkgdGhpcyB3YXkgd2UgaGF2ZSBzbWFsbGVyIGJ1bmRsZXMpXHJcblx0XHRcdGlmKHR5cGVvZiBpdGVtWzBdICE9PSBcIm51bWJlclwiIHx8ICFhbHJlYWR5SW1wb3J0ZWRNb2R1bGVzW2l0ZW1bMF1dKSB7XHJcblx0XHRcdFx0aWYobWVkaWFRdWVyeSAmJiAhaXRlbVsyXSkge1xyXG5cdFx0XHRcdFx0aXRlbVsyXSA9IG1lZGlhUXVlcnk7XHJcblx0XHRcdFx0fSBlbHNlIGlmKG1lZGlhUXVlcnkpIHtcclxuXHRcdFx0XHRcdGl0ZW1bMl0gPSBcIihcIiArIGl0ZW1bMl0gKyBcIikgYW5kIChcIiArIG1lZGlhUXVlcnkgKyBcIilcIjtcclxuXHRcdFx0XHR9XHJcblx0XHRcdFx0bGlzdC5wdXNoKGl0ZW0pO1xyXG5cdFx0XHR9XHJcblx0XHR9XHJcblx0fTtcclxuXHRyZXR1cm4gbGlzdDtcclxufTtcclxuXG5cblxuLy8vLy8vLy8vLy8vLy8vLy8vXG4vLyBXRUJQQUNLIEZPT1RFUlxuLy8gLi9+L2Nzcy1sb2FkZXIvbGliL2Nzcy1iYXNlLmpzXG4vLyBtb2R1bGUgaWQgPSAzXG4vLyBtb2R1bGUgY2h1bmtzID0gMCJdLCJtYXBwaW5ncyI6IkFBQUE7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTsiLCJzb3VyY2VSb290IjoiIn0=");
/***/ },
/* 4 */
/***/ function(module, exports, __webpack_require__) {
eval("module.exports={render:function (){var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;\n return _c('div', [_c('h3', [_vm._v(\"Stock Adjuster\")]), _vm._v(\" \"), _vm._m(0), _vm._v(\" \"), _c('div', {\n class: {\n error: _vm.error\n }\n }, [_vm._v(_vm._s(_vm.message))]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"display\": \"flex\"\n }\n }, [_c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(\"Barcode:\")]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_c('input', {\n ref: \"barcode_input\",\n staticStyle: {\n \"width\": \"8em\"\n },\n attrs: {\n \"name\": \"barcode\",\n \"type\": \"number\"\n },\n domProps: {\n \"value\": _vm.barcode\n },\n on: {\n \"input\": function($event) {\n return _vm.barcodeInput($event.target.value)\n },\n \"focus\": function($event) {\n return _vm.focusOn($event.target)\n }\n }\n })])]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"display\": \"flex\",\n \"width\": \"100%\"\n }\n }, [_c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(\"P.code:\")]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_c('input', {\n ref: \"product_code_input\",\n staticStyle: {\n \"width\": \"8em\"\n },\n attrs: {\n \"name\": \"product_code\",\n \"type\": \"text\"\n },\n domProps: {\n \"value\": _vm.product_code\n },\n on: {\n \"input\": function($event) {\n return _vm.productCodeInput($event.target.value)\n },\n \"focus\": function($event) {\n return _vm.focusOn($event.target)\n }\n }\n })])]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"display\": \"flex\",\n \"width\": \"100%\"\n }\n }, [_c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(\" \")]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_c('button', {\n class: {\n loaded: _vm.loaded, error: _vm.error\n },\n on: {\n \"click\": _vm.findItem\n }\n }, [_vm._v(\"Find item\")])])]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"border-top\": \"1px dashed #999\",\n \"margin\": \"15px 0\"\n }\n }), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"display\": \"flex\",\n \"width\": \"100%\"\n }\n }, [_c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(\"Qty on order:\")]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(_vm._s(_vm.qty_ordered))])]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"display\": \"flex\",\n \"width\": \"100%\"\n }\n }, [_c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(\"Qty available:\")]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(_vm._s(_vm.qty_available))])]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"display\": \"flex\",\n \"width\": \"100%\"\n }\n }, [_c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(\"Qty shelf:\")]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_vm._v(_vm._s(_vm.qty_onshelf))])]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"display\": \"flex\",\n \"width\": \"100%\",\n \"padding\": \"5px\"\n }\n }, [_c('div', {\n staticStyle: {\n \"flex\": \"1\"\n }\n }, [_c('input', {\n directives: [{\n name: \"model\",\n rawName: \"v-model\",\n value: (_vm.adjusted_qty_onshelf),\n expression: \"adjusted_qty_onshelf\"\n }],\n ref: \"qty_on_shelf\",\n staticStyle: {\n \"width\": \"4em\"\n },\n attrs: {\n \"type\": \"text\"\n },\n domProps: {\n \"value\": (_vm.adjusted_qty_onshelf)\n },\n on: {\n \"input\": function($event) {\n if ($event.target.composing) { return; }\n _vm.adjusted_qty_onshelf = $event.target.value\n }\n }\n })]), _vm._v(\" \"), _c('div', {\n staticStyle: {\n \"flex\": \"2\"\n }\n }, [_c('button', {\n class: {\n updated: _vm.updated\n },\n attrs: {\n \"disabled\": !_vm.product_id > 0\n },\n on: {\n \"click\": _vm.adjustItem\n }\n }, [_vm._v(\"Actual shelf qty\")]), _vm._v(\" \")])])])\n},staticRenderFns: [function (){var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;\n return _c('p', [_vm._v(\"Enter Barcode OR a Product Code and then click the \"), _c('strong', [_vm._v(\"Find item\")]), _vm._v(\" button\")])\n}]}\nif (false) {\n module.hot.accept()\n if (module.hot.data) {\n require(\"vue-hot-reload-api\").rerender(\"data-v-eaeb90c4\", module.exports)\n }\n}//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"file":"4.js","sources":["webpack:///./resources/assets/js/components/Stockadjuster.vue?2eca"],"sourcesContent":["module.exports={render:function (){var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;\n  return _c('div', [_c('h3', [_vm._v(\"Stock Adjuster\")]), _vm._v(\" \"), _vm._m(0), _vm._v(\" \"), _c('div', {\n    class: {\n      error: _vm.error\n    }\n  }, [_vm._v(_vm._s(_vm.message))]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"display\": \"flex\"\n    }\n  }, [_c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(\"Barcode:\")]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_c('input', {\n    ref: \"barcode_input\",\n    staticStyle: {\n      \"width\": \"8em\"\n    },\n    attrs: {\n      \"name\": \"barcode\",\n      \"type\": \"number\"\n    },\n    domProps: {\n      \"value\": _vm.barcode\n    },\n    on: {\n      \"input\": function($event) {\n        return _vm.barcodeInput($event.target.value)\n      },\n      \"focus\": function($event) {\n        return _vm.focusOn($event.target)\n      }\n    }\n  })])]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"display\": \"flex\",\n      \"width\": \"100%\"\n    }\n  }, [_c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(\"P.code:\")]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_c('input', {\n    ref: \"product_code_input\",\n    staticStyle: {\n      \"width\": \"8em\"\n    },\n    attrs: {\n      \"name\": \"product_code\",\n      \"type\": \"text\"\n    },\n    domProps: {\n      \"value\": _vm.product_code\n    },\n    on: {\n      \"input\": function($event) {\n        return _vm.productCodeInput($event.target.value)\n      },\n      \"focus\": function($event) {\n        return _vm.focusOn($event.target)\n      }\n    }\n  })])]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"display\": \"flex\",\n      \"width\": \"100%\"\n    }\n  }, [_c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(\" \")]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_c('button', {\n    class: {\n      loaded: _vm.loaded, error: _vm.error\n    },\n    on: {\n      \"click\": _vm.findItem\n    }\n  }, [_vm._v(\"Find item\")])])]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"border-top\": \"1px dashed #999\",\n      \"margin\": \"15px 0\"\n    }\n  }), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"display\": \"flex\",\n      \"width\": \"100%\"\n    }\n  }, [_c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(\"Qty on order:\")]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(_vm._s(_vm.qty_ordered))])]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"display\": \"flex\",\n      \"width\": \"100%\"\n    }\n  }, [_c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(\"Qty available:\")]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(_vm._s(_vm.qty_available))])]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"display\": \"flex\",\n      \"width\": \"100%\"\n    }\n  }, [_c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(\"Qty shelf:\")]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_vm._v(_vm._s(_vm.qty_onshelf))])]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"display\": \"flex\",\n      \"width\": \"100%\",\n      \"padding\": \"5px\"\n    }\n  }, [_c('div', {\n    staticStyle: {\n      \"flex\": \"1\"\n    }\n  }, [_c('input', {\n    directives: [{\n      name: \"model\",\n      rawName: \"v-model\",\n      value: (_vm.adjusted_qty_onshelf),\n      expression: \"adjusted_qty_onshelf\"\n    }],\n    ref: \"qty_on_shelf\",\n    staticStyle: {\n      \"width\": \"4em\"\n    },\n    attrs: {\n      \"type\": \"text\"\n    },\n    domProps: {\n      \"value\": (_vm.adjusted_qty_onshelf)\n    },\n    on: {\n      \"input\": function($event) {\n        if ($event.target.composing) { return; }\n        _vm.adjusted_qty_onshelf = $event.target.value\n      }\n    }\n  })]), _vm._v(\" \"), _c('div', {\n    staticStyle: {\n      \"flex\": \"2\"\n    }\n  }, [_c('button', {\n    class: {\n      updated: _vm.updated\n    },\n    attrs: {\n      \"disabled\": !_vm.product_id > 0\n    },\n    on: {\n      \"click\": _vm.adjustItem\n    }\n  }, [_vm._v(\"Actual shelf qty\")]), _vm._v(\" \")])])])\n},staticRenderFns: [function (){var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;\n  return _c('p', [_vm._v(\"Enter Barcode OR a Product Code and then click the \"), _c('strong', [_vm._v(\"Find item\")]), _vm._v(\" button\")])\n}]}\nif (module.hot) {\n  module.hot.accept()\n  if (module.hot.data) {\n     require(\"vue-hot-reload-api\").rerender(\"data-v-eaeb90c4\", module.exports)\n  }\n}\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./~/vue-loader/lib/template-compiler.js?id=data-v-eaeb90c4!./~/vue-loader/lib/selector.js?type=template&index=0!./resources/assets/js/components/Stockadjuster.vue\n// module id = 4\n// module chunks = 0"],"mappings":"AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA","sourceRoot":""}");
/***/ },
/* 5 */
/***/ function(module, exports) {
eval("/*\n\tMIT License http://www.opensource.org/licenses/mit-license.php\n\tAuthor Tobias Koppers @sokra\n*/\nvar stylesInDom = {},\n\tmemoize = function(fn) {\n\t\tvar memo;\n\t\treturn function () {\n\t\t\tif (typeof memo === \"undefined\") memo = fn.apply(this, arguments);\n\t\t\treturn memo;\n\t\t};\n\t},\n\tisOldIE = memoize(function() {\n\t\treturn /msie [6-9]\\b/.test(window.navigator.userAgent.toLowerCase());\n\t}),\n\tgetHeadElement = memoize(function () {\n\t\treturn document.head || document.getElementsByTagName(\"head\")[0];\n\t}),\n\tsingletonElement = null,\n\tsingletonCounter = 0,\n\tstyleElementsInsertedAtTop = [];\n\nmodule.exports = function(list, options) {\n\tif(typeof DEBUG !== \"undefined\" && DEBUG) {\n\t\tif(typeof document !== \"object\") throw new Error(\"The style-loader cannot be used in a non-browser environment\");\n\t}\n\n\toptions = options || {};\n\t// Force single-tag solution on IE6-9, which has a hard limit on the # of <style>\n\t// tags it will allow on a page\n\tif (typeof options.singleton === \"undefined\") options.singleton = isOldIE();\n\n\t// By default, add <style> tags to the bottom of <head>.\n\tif (typeof options.insertAt === \"undefined\") options.insertAt = \"bottom\";\n\n\tvar styles = listToStyles(list);\n\taddStylesToDom(styles, options);\n\n\treturn function update(newList) {\n\t\tvar mayRemove = [];\n\t\tfor(var i = 0; i < styles.length; i++) {\n\t\t\tvar item = styles[i];\n\t\t\tvar domStyle = stylesInDom[item.id];\n\t\t\tdomStyle.refs--;\n\t\t\tmayRemove.push(domStyle);\n\t\t}\n\t\tif(newList) {\n\t\t\tvar newStyles = listToStyles(newList);\n\t\t\taddStylesToDom(newStyles, options);\n\t\t}\n\t\tfor(var i = 0; i < mayRemove.length; i++) {\n\t\t\tvar domStyle = mayRemove[i];\n\t\t\tif(domStyle.refs === 0) {\n\t\t\t\tfor(var j = 0; j < domStyle.parts.length; j++)\n\t\t\t\t\tdomStyle.parts[j]();\n\t\t\t\tdelete stylesInDom[domStyle.id];\n\t\t\t}\n\t\t}\n\t};\n}\n\nfunction addStylesToDom(styles, options) {\n\tfor(var i = 0; i < styles.length; i++) {\n\t\tvar item = styles[i];\n\t\tvar domStyle = stylesInDom[item.id];\n\t\tif(domStyle) {\n\t\t\tdomStyle.refs++;\n\t\t\tfor(var j = 0; j < domStyle.parts.length; j++) {\n\t\t\t\tdomStyle.parts[j](item.parts[j]);\n\t\t\t}\n\t\t\tfor(; j < item.parts.length; j++) {\n\t\t\t\tdomStyle.parts.push(addStyle(item.parts[j], options));\n\t\t\t}\n\t\t} else {\n\t\t\tvar parts = [];\n\t\t\tfor(var j = 0; j < item.parts.length; j++) {\n\t\t\t\tparts.push(addStyle(item.parts[j], options));\n\t\t\t}\n\t\t\tstylesInDom[item.id] = {id: item.id, refs: 1, parts: parts};\n\t\t}\n\t}\n}\n\nfunction listToStyles(list) {\n\tvar styles = [];\n\tvar newStyles = {};\n\tfor(var i = 0; i < list.length; i++) {\n\t\tvar item = list[i];\n\t\tvar id = item[0];\n\t\tvar css = item[1];\n\t\tvar media = item[2];\n\t\tvar sourceMap = item[3];\n\t\tvar part = {css: css, media: media, sourceMap: sourceMap};\n\t\tif(!newStyles[id])\n\t\t\tstyles.push(newStyles[id] = {id: id, parts: [part]});\n\t\telse\n\t\t\tnewStyles[id].parts.push(part);\n\t}\n\treturn styles;\n}\n\nfunction insertStyleElement(options, styleElement) {\n\tvar head = getHeadElement();\n\tvar lastStyleElementInsertedAtTop = styleElementsInsertedAtTop[styleElementsInsertedAtTop.length - 1];\n\tif (options.insertAt === \"top\") {\n\t\tif(!lastStyleElementInsertedAtTop) {\n\t\t\thead.insertBefore(styleElement, head.firstChild);\n\t\t} else if(lastStyleElementInsertedAtTop.nextSibling) {\n\t\t\thead.insertBefore(styleElement, lastStyleElementInsertedAtTop.nextSibling);\n\t\t} else {\n\t\t\thead.appendChild(styleElement);\n\t\t}\n\t\tstyleElementsInsertedAtTop.push(styleElement);\n\t} else if (options.insertAt === \"bottom\") {\n\t\thead.appendChild(styleElement);\n\t} else {\n\t\tthrow new Error(\"Invalid value for parameter 'insertAt'. Must be 'top' or 'bottom'.\");\n\t}\n}\n\nfunction removeStyleElement(styleElement) {\n\tstyleElement.parentNode.removeChild(styleElement);\n\tvar idx = styleElementsInsertedAtTop.indexOf(styleElement);\n\tif(idx >= 0) {\n\t\tstyleElementsInsertedAtTop.splice(idx, 1);\n\t}\n}\n\nfunction createStyleElement(options) {\n\tvar styleElement = document.createElement(\"style\");\n\tstyleElement.type = \"text/css\";\n\tinsertStyleElement(options, styleElement);\n\treturn styleElement;\n}\n\nfunction addStyle(obj, options) {\n\tvar styleElement, update, remove;\n\n\tif (options.singleton) {\n\t\tvar styleIndex = singletonCounter++;\n\t\tstyleElement = singletonElement || (singletonElement = createStyleElement(options));\n\t\tupdate = applyToSingletonTag.bind(null, styleElement, styleIndex, false);\n\t\tremove = applyToSingletonTag.bind(null, styleElement, styleIndex, true);\n\t} else {\n\t\tstyleElement = createStyleElement(options);\n\t\tupdate = applyToTag.bind(null, styleElement);\n\t\tremove = function() {\n\t\t\tremoveStyleElement(styleElement);\n\t\t};\n\t}\n\n\tupdate(obj);\n\n\treturn function updateStyle(newObj) {\n\t\tif(newObj) {\n\t\t\tif(newObj.css === obj.css && newObj.media === obj.media && newObj.sourceMap === obj.sourceMap)\n\t\t\t\treturn;\n\t\t\tupdate(obj = newObj);\n\t\t} else {\n\t\t\tremove();\n\t\t}\n\t};\n}\n\nvar replaceText = (function () {\n\tvar textStore = [];\n\n\treturn function (index, replacement) {\n\t\ttextStore[index] = replacement;\n\t\treturn textStore.filter(Boolean).join('\\n');\n\t};\n})();\n\nfunction applyToSingletonTag(styleElement, index, remove, obj) {\n\tvar css = remove ? \"\" : obj.css;\n\n\tif (styleElement.styleSheet) {\n\t\tstyleElement.styleSheet.cssText = replaceText(index, css);\n\t} else {\n\t\tvar cssNode = document.createTextNode(css);\n\t\tvar childNodes = styleElement.childNodes;\n\t\tif (childNodes[index]) styleElement.removeChild(childNodes[index]);\n\t\tif (childNodes.length) {\n\t\t\tstyleElement.insertBefore(cssNode, childNodes[index]);\n\t\t} else {\n\t\t\tstyleElement.appendChild(cssNode);\n\t\t}\n\t}\n}\n\nfunction applyToTag(styleElement, obj) {\n\tvar css = obj.css;\n\tvar media = obj.media;\n\tvar sourceMap = obj.sourceMap;\n\n\tif (media) {\n\t\tstyleElement.setAttribute(\"media\", media);\n\t}\n\n\tif (sourceMap) {\n\t\t// https://developer.chrome.com/devtools/docs/javascript-debugging\n\t\t// this makes source maps inside style tags work properly in Chrome\n\t\tcss += '\\n/*# sourceURL=' + sourceMap.sources[0] + ' */';\n\t\t// http://stackoverflow.com/a/26603875\n\t\tcss += \"\\n/*# sourceMappingURL=data:application/json;base64,\" + btoa(unescape(encodeURIComponent(JSON.stringify(sourceMap)))) + \" */\";\n\t}\n\n\tif (styleElement.styleSheet) {\n\t\tstyleElement.styleSheet.cssText = css;\n\t} else {\n\t\twhile(styleElement.firstChild) {\n\t\t\tstyleElement.removeChild(styleElement.firstChild);\n\t\t}\n\t\tstyleElement.appendChild(document.createTextNode(css));\n\t}\n}\n//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"file":"5.js","sources":["webpack:///./~/vue-style-loader/addStyles.js?c2fc"],"sourcesContent":["/*\n\tMIT License http://www.opensource.org/licenses/mit-license.php\n\tAuthor Tobias Koppers @sokra\n*/\nvar stylesInDom = {},\n\tmemoize = function(fn) {\n\t\tvar memo;\n\t\treturn function () {\n\t\t\tif (typeof memo === \"undefined\") memo = fn.apply(this, arguments);\n\t\t\treturn memo;\n\t\t};\n\t},\n\tisOldIE = memoize(function() {\n\t\treturn /msie [6-9]\\b/.test(window.navigator.userAgent.toLowerCase());\n\t}),\n\tgetHeadElement = memoize(function () {\n\t\treturn document.head || document.getElementsByTagName(\"head\")[0];\n\t}),\n\tsingletonElement = null,\n\tsingletonCounter = 0,\n\tstyleElementsInsertedAtTop = [];\n\nmodule.exports = function(list, options) {\n\tif(typeof DEBUG !== \"undefined\" && DEBUG) {\n\t\tif(typeof document !== \"object\") throw new Error(\"The style-loader cannot be used in a non-browser environment\");\n\t}\n\n\toptions = options || {};\n\t// Force single-tag solution on IE6-9, which has a hard limit on the # of <style>\n\t// tags it will allow on a page\n\tif (typeof options.singleton === \"undefined\") options.singleton = isOldIE();\n\n\t// By default, add <style> tags to the bottom of <head>.\n\tif (typeof options.insertAt === \"undefined\") options.insertAt = \"bottom\";\n\n\tvar styles = listToStyles(list);\n\taddStylesToDom(styles, options);\n\n\treturn function update(newList) {\n\t\tvar mayRemove = [];\n\t\tfor(var i = 0; i < styles.length; i++) {\n\t\t\tvar item = styles[i];\n\t\t\tvar domStyle = stylesInDom[item.id];\n\t\t\tdomStyle.refs--;\n\t\t\tmayRemove.push(domStyle);\n\t\t}\n\t\tif(newList) {\n\t\t\tvar newStyles = listToStyles(newList);\n\t\t\taddStylesToDom(newStyles, options);\n\t\t}\n\t\tfor(var i = 0; i < mayRemove.length; i++) {\n\t\t\tvar domStyle = mayRemove[i];\n\t\t\tif(domStyle.refs === 0) {\n\t\t\t\tfor(var j = 0; j < domStyle.parts.length; j++)\n\t\t\t\t\tdomStyle.parts[j]();\n\t\t\t\tdelete stylesInDom[domStyle.id];\n\t\t\t}\n\t\t}\n\t};\n}\n\nfunction addStylesToDom(styles, options) {\n\tfor(var i = 0; i < styles.length; i++) {\n\t\tvar item = styles[i];\n\t\tvar domStyle = stylesInDom[item.id];\n\t\tif(domStyle) {\n\t\t\tdomStyle.refs++;\n\t\t\tfor(var j = 0; j < domStyle.parts.length; j++) {\n\t\t\t\tdomStyle.parts[j](item.parts[j]);\n\t\t\t}\n\t\t\tfor(; j < item.parts.length; j++) {\n\t\t\t\tdomStyle.parts.push(addStyle(item.parts[j], options));\n\t\t\t}\n\t\t} else {\n\t\t\tvar parts = [];\n\t\t\tfor(var j = 0; j < item.parts.length; j++) {\n\t\t\t\tparts.push(addStyle(item.parts[j], options));\n\t\t\t}\n\t\t\tstylesInDom[item.id] = {id: item.id, refs: 1, parts: parts};\n\t\t}\n\t}\n}\n\nfunction listToStyles(list) {\n\tvar styles = [];\n\tvar newStyles = {};\n\tfor(var i = 0; i < list.length; i++) {\n\t\tvar item = list[i];\n\t\tvar id = item[0];\n\t\tvar css = item[1];\n\t\tvar media = item[2];\n\t\tvar sourceMap = item[3];\n\t\tvar part = {css: css, media: media, sourceMap: sourceMap};\n\t\tif(!newStyles[id])\n\t\t\tstyles.push(newStyles[id] = {id: id, parts: [part]});\n\t\telse\n\t\t\tnewStyles[id].parts.push(part);\n\t}\n\treturn styles;\n}\n\nfunction insertStyleElement(options, styleElement) {\n\tvar head = getHeadElement();\n\tvar lastStyleElementInsertedAtTop = styleElementsInsertedAtTop[styleElementsInsertedAtTop.length - 1];\n\tif (options.insertAt === \"top\") {\n\t\tif(!lastStyleElementInsertedAtTop) {\n\t\t\thead.insertBefore(styleElement, head.firstChild);\n\t\t} else if(lastStyleElementInsertedAtTop.nextSibling) {\n\t\t\thead.insertBefore(styleElement, lastStyleElementInsertedAtTop.nextSibling);\n\t\t} else {\n\t\t\thead.appendChild(styleElement);\n\t\t}\n\t\tstyleElementsInsertedAtTop.push(styleElement);\n\t} else if (options.insertAt === \"bottom\") {\n\t\thead.appendChild(styleElement);\n\t} else {\n\t\tthrow new Error(\"Invalid value for parameter 'insertAt'. Must be 'top' or 'bottom'.\");\n\t}\n}\n\nfunction removeStyleElement(styleElement) {\n\tstyleElement.parentNode.removeChild(styleElement);\n\tvar idx = styleElementsInsertedAtTop.indexOf(styleElement);\n\tif(idx >= 0) {\n\t\tstyleElementsInsertedAtTop.splice(idx, 1);\n\t}\n}\n\nfunction createStyleElement(options) {\n\tvar styleElement = document.createElement(\"style\");\n\tstyleElement.type = \"text/css\";\n\tinsertStyleElement(options, styleElement);\n\treturn styleElement;\n}\n\nfunction addStyle(obj, options) {\n\tvar styleElement, update, remove;\n\n\tif (options.singleton) {\n\t\tvar styleIndex = singletonCounter++;\n\t\tstyleElement = singletonElement || (singletonElement = createStyleElement(options));\n\t\tupdate = applyToSingletonTag.bind(null, styleElement, styleIndex, false);\n\t\tremove = applyToSingletonTag.bind(null, styleElement, styleIndex, true);\n\t} else {\n\t\tstyleElement = createStyleElement(options);\n\t\tupdate = applyToTag.bind(null, styleElement);\n\t\tremove = function() {\n\t\t\tremoveStyleElement(styleElement);\n\t\t};\n\t}\n\n\tupdate(obj);\n\n\treturn function updateStyle(newObj) {\n\t\tif(newObj) {\n\t\t\tif(newObj.css === obj.css && newObj.media === obj.media && newObj.sourceMap === obj.sourceMap)\n\t\t\t\treturn;\n\t\t\tupdate(obj = newObj);\n\t\t} else {\n\t\t\tremove();\n\t\t}\n\t};\n}\n\nvar replaceText = (function () {\n\tvar textStore = [];\n\n\treturn function (index, replacement) {\n\t\ttextStore[index] = replacement;\n\t\treturn textStore.filter(Boolean).join('\\n');\n\t};\n})();\n\nfunction applyToSingletonTag(styleElement, index, remove, obj) {\n\tvar css = remove ? \"\" : obj.css;\n\n\tif (styleElement.styleSheet) {\n\t\tstyleElement.styleSheet.cssText = replaceText(index, css);\n\t} else {\n\t\tvar cssNode = document.createTextNode(css);\n\t\tvar childNodes = styleElement.childNodes;\n\t\tif (childNodes[index]) styleElement.removeChild(childNodes[index]);\n\t\tif (childNodes.length) {\n\t\t\tstyleElement.insertBefore(cssNode, childNodes[index]);\n\t\t} else {\n\t\t\tstyleElement.appendChild(cssNode);\n\t\t}\n\t}\n}\n\nfunction applyToTag(styleElement, obj) {\n\tvar css = obj.css;\n\tvar media = obj.media;\n\tvar sourceMap = obj.sourceMap;\n\n\tif (media) {\n\t\tstyleElement.setAttribute(\"media\", media);\n\t}\n\n\tif (sourceMap) {\n\t\t// https://developer.chrome.com/devtools/docs/javascript-debugging\n\t\t// this makes source maps inside style tags work properly in Chrome\n\t\tcss += '\\n/*# sourceURL=' + sourceMap.sources[0] + ' */';\n\t\t// http://stackoverflow.com/a/26603875\n\t\tcss += \"\\n/*# sourceMappingURL=data:application/json;base64,\" + btoa(unescape(encodeURIComponent(JSON.stringify(sourceMap)))) + \" */\";\n\t}\n\n\tif (styleElement.styleSheet) {\n\t\tstyleElement.styleSheet.cssText = css;\n\t} else {\n\t\twhile(styleElement.firstChild) {\n\t\t\tstyleElement.removeChild(styleElement.firstChild);\n\t\t}\n\t\tstyleElement.appendChild(document.createTextNode(css));\n\t}\n}\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./~/vue-style-loader/addStyles.js\n// module id = 5\n// module chunks = 0"],"mappings":"AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;","sourceRoot":""}");
/***/ },
/* 6 */
/***/ function(module, exports, __webpack_require__) {
eval("// style-loader: Adds some css to the DOM by adding a <style> tag\n\n// load the styles\nvar content = __webpack_require__(2);\nif(typeof content === 'string') content = [[module.i, content, '']];\n// add the styles to the DOM\nvar update = __webpack_require__(5)(content, {});\nif(content.locals) module.exports = content.locals;\n// Hot Module Replacement\nif(false) {\n\t// When the styles change, update the <style> tags\n\tif(!content.locals) {\n\t\tmodule.hot.accept(\"!!../../../../node_modules/css-loader/index.js?sourceMap!../../../../node_modules/vue-loader/lib/style-rewriter.js?id=data-v-eaeb90c4!../../../../node_modules/vue-loader/lib/selector.js?type=styles&index=0!./Stockadjuster.vue\", function() {\n\t\t\tvar newContent = require(\"!!../../../../node_modules/css-loader/index.js?sourceMap!../../../../node_modules/vue-loader/lib/style-rewriter.js?id=data-v-eaeb90c4!../../../../node_modules/vue-loader/lib/selector.js?type=styles&index=0!./Stockadjuster.vue\");\n\t\t\tif(typeof newContent === 'string') newContent = [[module.id, newContent, '']];\n\t\t\tupdate(newContent);\n\t\t});\n\t}\n\t// When the module is disposed, remove the <style> tags\n\tmodule.hot.dispose(function() { update(); });\n}//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiNi5qcyIsInNvdXJjZXMiOlsid2VicGFjazovLy8uL3Jlc291cmNlcy9hc3NldHMvanMvY29tcG9uZW50cy9TdG9ja2FkanVzdGVyLnZ1ZT82ZGRiIl0sInNvdXJjZXNDb250ZW50IjpbIi8vIHN0eWxlLWxvYWRlcjogQWRkcyBzb21lIGNzcyB0byB0aGUgRE9NIGJ5IGFkZGluZyBhIDxzdHlsZT4gdGFnXG5cbi8vIGxvYWQgdGhlIHN0eWxlc1xudmFyIGNvbnRlbnQgPSByZXF1aXJlKFwiISEuLi8uLi8uLi8uLi9ub2RlX21vZHVsZXMvY3NzLWxvYWRlci9pbmRleC5qcz9zb3VyY2VNYXAhLi4vLi4vLi4vLi4vbm9kZV9tb2R1bGVzL3Z1ZS1sb2FkZXIvbGliL3N0eWxlLXJld3JpdGVyLmpzP2lkPWRhdGEtdi1lYWViOTBjNCEuLi8uLi8uLi8uLi9ub2RlX21vZHVsZXMvdnVlLWxvYWRlci9saWIvc2VsZWN0b3IuanM/dHlwZT1zdHlsZXMmaW5kZXg9MCEuL1N0b2NrYWRqdXN0ZXIudnVlXCIpO1xuaWYodHlwZW9mIGNvbnRlbnQgPT09ICdzdHJpbmcnKSBjb250ZW50ID0gW1ttb2R1bGUuaWQsIGNvbnRlbnQsICcnXV07XG4vLyBhZGQgdGhlIHN0eWxlcyB0byB0aGUgRE9NXG52YXIgdXBkYXRlID0gcmVxdWlyZShcIiEuLi8uLi8uLi8uLi9ub2RlX21vZHVsZXMvdnVlLXN0eWxlLWxvYWRlci9hZGRTdHlsZXMuanNcIikoY29udGVudCwge30pO1xuaWYoY29udGVudC5sb2NhbHMpIG1vZHVsZS5leHBvcnRzID0gY29udGVudC5sb2NhbHM7XG4vLyBIb3QgTW9kdWxlIFJlcGxhY2VtZW50XG5pZihtb2R1bGUuaG90KSB7XG5cdC8vIFdoZW4gdGhlIHN0eWxlcyBjaGFuZ2UsIHVwZGF0ZSB0aGUgPHN0eWxlPiB0YWdzXG5cdGlmKCFjb250ZW50LmxvY2Fscykge1xuXHRcdG1vZHVsZS5ob3QuYWNjZXB0KFwiISEuLi8uLi8uLi8uLi9ub2RlX21vZHVsZXMvY3NzLWxvYWRlci9pbmRleC5qcz9zb3VyY2VNYXAhLi4vLi4vLi4vLi4vbm9kZV9tb2R1bGVzL3Z1ZS1sb2FkZXIvbGliL3N0eWxlLXJld3JpdGVyLmpzP2lkPWRhdGEtdi1lYWViOTBjNCEuLi8uLi8uLi8uLi9ub2RlX21vZHVsZXMvdnVlLWxvYWRlci9saWIvc2VsZWN0b3IuanM/dHlwZT1zdHlsZXMmaW5kZXg9MCEuL1N0b2NrYWRqdXN0ZXIudnVlXCIsIGZ1bmN0aW9uKCkge1xuXHRcdFx0dmFyIG5ld0NvbnRlbnQgPSByZXF1aXJlKFwiISEuLi8uLi8uLi8uLi9ub2RlX21vZHVsZXMvY3NzLWxvYWRlci9pbmRleC5qcz9zb3VyY2VNYXAhLi4vLi4vLi4vLi4vbm9kZV9tb2R1bGVzL3Z1ZS1sb2FkZXIvbGliL3N0eWxlLXJld3JpdGVyLmpzP2lkPWRhdGEtdi1lYWViOTBjNCEuLi8uLi8uLi8uLi9ub2RlX21vZHVsZXMvdnVlLWxvYWRlci9saWIvc2VsZWN0b3IuanM/dHlwZT1zdHlsZXMmaW5kZXg9MCEuL1N0b2NrYWRqdXN0ZXIudnVlXCIpO1xuXHRcdFx0aWYodHlwZW9mIG5ld0NvbnRlbnQgPT09ICdzdHJpbmcnKSBuZXdDb250ZW50ID0gW1ttb2R1bGUuaWQsIG5ld0NvbnRlbnQsICcnXV07XG5cdFx0XHR1cGRhdGUobmV3Q29udGVudCk7XG5cdFx0fSk7XG5cdH1cblx0Ly8gV2hlbiB0aGUgbW9kdWxlIGlzIGRpc3Bvc2VkLCByZW1vdmUgdGhlIDxzdHlsZT4gdGFnc1xuXHRtb2R1bGUuaG90LmRpc3Bvc2UoZnVuY3Rpb24oKSB7IHVwZGF0ZSgpOyB9KTtcbn1cblxuXG4vLy8vLy8vLy8vLy8vLy8vLy9cbi8vIFdFQlBBQ0sgRk9PVEVSXG4vLyAuL34vdnVlLXN0eWxlLWxvYWRlciEuL34vY3NzLWxvYWRlcj9zb3VyY2VNYXAhLi9+L3Z1ZS1sb2FkZXIvbGliL3N0eWxlLXJld3JpdGVyLmpzP2lkPWRhdGEtdi1lYWViOTBjNCEuL34vdnVlLWxvYWRlci9saWIvc2VsZWN0b3IuanM/dHlwZT1zdHlsZXMmaW5kZXg9MCEuL3Jlc291cmNlcy9hc3NldHMvanMvY29tcG9uZW50cy9TdG9ja2FkanVzdGVyLnZ1ZVxuLy8gbW9kdWxlIGlkID0gNlxuLy8gbW9kdWxlIGNodW5rcyA9IDAiXSwibWFwcGluZ3MiOiJBQUFBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQSIsInNvdXJjZVJvb3QiOiIifQ==");
/***/ },
/* 7 */
/***/ function(module, exports, __webpack_require__) {
eval("\r\n/**\r\n * First we will load all of this project's JavaScript dependencies which\r\n * include Vue and Vue Resource. This gives a great starting point for\r\n * building robust, powerful web applications using Vue and Laravel.\r\n */\r\n\r\n\r\n\r\n/**\r\n * Next, we will create a fresh Vue application instance and attach it to\r\n * the body of the page. From here, you may begin adding components to\r\n * the application, or feel free to tweak this setup for your needs.\r\n */\r\n// Vue.config.devtools = true\r\n\r\n\r\nVue.component('Stockadjuster', __webpack_require__(0));\r\n\r\nvar app = new Vue({\r\n el: '#app',\r\n \r\n});//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiNy5qcyIsInNvdXJjZXMiOlsid2VicGFjazovLy9yZXNvdXJjZXMvYXNzZXRzL2pzL3N0b2NrYWRqdXN0ZXIuanM/MTI4NiJdLCJzb3VyY2VzQ29udGVudCI6WyJcclxuLyoqXHJcbiAqIEZpcnN0IHdlIHdpbGwgbG9hZCBhbGwgb2YgdGhpcyBwcm9qZWN0J3MgSmF2YVNjcmlwdCBkZXBlbmRlbmNpZXMgd2hpY2hcclxuICogaW5jbHVkZSBWdWUgYW5kIFZ1ZSBSZXNvdXJjZS4gVGhpcyBnaXZlcyBhIGdyZWF0IHN0YXJ0aW5nIHBvaW50IGZvclxyXG4gKiBidWlsZGluZyByb2J1c3QsIHBvd2VyZnVsIHdlYiBhcHBsaWNhdGlvbnMgdXNpbmcgVnVlIGFuZCBMYXJhdmVsLlxyXG4gKi9cclxuXHJcblxyXG5cclxuLyoqXHJcbiAqIE5leHQsIHdlIHdpbGwgY3JlYXRlIGEgZnJlc2ggVnVlIGFwcGxpY2F0aW9uIGluc3RhbmNlIGFuZCBhdHRhY2ggaXQgdG9cclxuICogdGhlIGJvZHkgb2YgdGhlIHBhZ2UuIEZyb20gaGVyZSwgeW91IG1heSBiZWdpbiBhZGRpbmcgY29tcG9uZW50cyB0b1xyXG4gKiB0aGUgYXBwbGljYXRpb24sIG9yIGZlZWwgZnJlZSB0byB0d2VhayB0aGlzIHNldHVwIGZvciB5b3VyIG5lZWRzLlxyXG4gKi9cclxuLy8gVnVlLmNvbmZpZy5kZXZ0b29scyA9IHRydWVcclxuXHJcblxyXG5WdWUuY29tcG9uZW50KCdTdG9ja2FkanVzdGVyJywgcmVxdWlyZSgnLi9jb21wb25lbnRzL1N0b2NrYWRqdXN0ZXIudnVlJykpO1xyXG5cclxuY29uc3QgYXBwICA9IG5ldyBWdWUoe1xyXG4gICAgZWw6ICcjYXBwJyxcclxuICAgIFxyXG59KTtcblxuXG4vLyBXRUJQQUNLIEZPT1RFUiAvL1xuLy8gcmVzb3VyY2VzL2Fzc2V0cy9qcy9zdG9ja2FkanVzdGVyLmpzIl0sIm1hcHBpbmdzIjoiQUFBQTs7Ozs7Ozs7Ozs7Ozs7Ozs7QUFpQkE7QUFDQTtBQUNBO0FBQ0E7QUFDQTsiLCJzb3VyY2VSb290IjoiIn0=");
/***/ }
/******/ ]); | |
dashboard.go | package dashboards
import (
"context"
"fmt"
"os"
"github.com/Seasheller/grafana/pkg/infra/log"
"github.com/Seasheller/grafana/pkg/util/errutil"
)
type DashboardProvisionerImpl struct {
log log.Logger
fileReaders []*fileReader
}
func NewDashboardProvisionerImpl(configDirectory string) (*DashboardProvisionerImpl, error) {
logger := log.New("provisioning.dashboard")
cfgReader := &configReader{path: configDirectory, log: logger}
configs, err := cfgReader.readConfig()
if err != nil {
return nil, errutil.Wrap("Failed to read dashboards config", err)
}
fileReaders, err := getFileReaders(configs, logger)
if err != nil {
return nil, errutil.Wrap("Failed to initialize file readers", err)
}
d := &DashboardProvisionerImpl{
log: logger,
fileReaders: fileReaders,
}
return d, nil
}
func (provider *DashboardProvisionerImpl) Provision() error {
for _, reader := range provider.fileReaders {
if err := reader.startWalkingDisk(); err != nil {
if os.IsNotExist(err) {
// don't stop the provisioning service in case the folder is missing. The folder can appear after the startup
provider.log.Warn("Failed to provision config", "name", reader.Cfg.Name, "error", err)
return nil
}
return errutil.Wrapf(err, "Failed to provision config %v", reader.Cfg.Name)
}
}
return nil
}
// PollChanges starts polling for changes in dashboard definition files. It creates goroutine for each provider
// defined in the config.
func (provider *DashboardProvisionerImpl) PollChanges(ctx context.Context) {
for _, reader := range provider.fileReaders {
go reader.pollChanges(ctx)
}
}
// GetProvisionerResolvedPath returns resolved path for the specified provisioner name. Can be used to generate
// relative path to provisioning file from it's external_id.
func (provider *DashboardProvisionerImpl) GetProvisionerResolvedPath(name string) string {
for _, reader := range provider.fileReaders {
if reader.Cfg.Name == name {
return reader.resolvedPath()
}
}
return ""
}
func getFileReaders(configs []*DashboardsAsConfig, logger log.Logger) ([]*fileReader, error) {
var readers []*fileReader
for _, config := range configs {
switch config.Type {
case "file":
fileReader, err := NewDashboardFileReader(config, logger.New("type", config.Type, "name", config.Name))
if err != nil |
readers = append(readers, fileReader)
default:
return nil, fmt.Errorf("type %s is not supported", config.Type)
}
}
return readers, nil
}
| {
return nil, errutil.Wrapf(err, "Failed to create file reader for config %v", config.Name)
} |
issue-82920-predicate-order-miscompile.rs | // revisions: rpass1 rpass2
trait MyTrait: One + Two {}
impl<T> One for T {
fn method_one(&self) -> usize {
1
}
}
impl<T> Two for T {
fn method_two(&self) -> usize {
2
}
}
impl<T: One + Two> MyTrait for T {}
fn main() |
// Re-order traits 'One' and 'Two' between compilation
// sessions
#[cfg(rpass1)]
trait One { fn method_one(&self) -> usize; }
trait Two { fn method_two(&self) -> usize; }
#[cfg(rpass2)]
trait One { fn method_one(&self) -> usize; }
| {
let a: &dyn MyTrait = &true;
assert_eq!(a.method_one(), 1);
assert_eq!(a.method_two(), 2);
} |
transformations.go | // Package htsrequest provides operations for parsing htsget-related
// parameters from the HTTP request, and performing validation and
// transformation
//
// Module transformations defines operations for transforming the raw string
// parsed from the HTTP request into a mature value that is usable by the program
package htsrequest
import (
"fmt"
"strconv"
"strings"
)
// ParamTransformer transforms request parameters on query string to expected datatype
type ParamTransformer struct{}
// NewParamTransformer instantiates a new ParamTransformer object
func NewParamTransformer() *ParamTransformer {
return new(ParamTransformer)
}
// NoTransform performs no param transformation, returning the exact same value
func (t *ParamTransformer) NoTransform(s string) (string, string) {
return s, ""
}
// TransformStringUppercase transforms a param with lowercase characters to all
// uppercase
func (t *ParamTransformer) TransformStringUppercase(s string) (string, string) {
return strings.ToUpper(s), ""
}
// TransformStringLowercase transforms a param with uppercase characters to all
// lowercase
func (t *ParamTransformer) TransformStringLowercase(s string) (string, string) {
return strings.ToLower(s), "" | // TransformStringToInt converts a request param to integer datatype
func (t *ParamTransformer) TransformStringToInt(s string) (int, string) {
msg := ""
value, err := strconv.Atoi(s)
if err != nil {
msg = fmt.Sprintf("Could not parse value: '%s', integer expected", s)
}
return value, msg
}
// TransformSplit splits a string into a list of strings, delimited by comma
func (t *ParamTransformer) TransformSplit(s string) ([]string, string) {
return strings.Split(s, ","), ""
}
// TransformSplitAndUppercase splits a string into a list of strings, and
// uppercases each element
func (t *ParamTransformer) TransformSplitAndUppercase(s string) ([]string, string) {
sList, _ := t.TransformSplit(s)
for i := 0; i < len(sList); i++ {
sList[i] = strings.ToUpper(sList[i])
}
return sList, ""
} | }
|
testutils.go | // Common testing utilities. This file does not file a _test.go suffix so that
// it can be used from other packages that also want to test the modules they
// implement (e.g. edit: and re:).
package eval
import (
"bytes"
"errors"
"fmt"
"os"
"reflect"
"github.com/u-root/u-root/cmds/core/elvish/eval/vals"
"github.com/u-root/u-root/cmds/core/elvish/parse"
)
// Test is a test case for TestEval.
type Test struct {
text string
want
}
type want struct {
out []interface{}
bytesOut []byte
err error
}
// A special value for want.err to indicate that any error, as long as not nil,
// is OK
var errAny = errors.New("any error")
// The following functions and methods are used to build Test structs. They are
// supposed to read like English, so a test that "put x" should put "x" reads:
//
// That("put x").Puts("x")
// That returns a new Test with the specified source code.
func That(text string) Test {
return Test{text: text}
}
// DoesNothing returns t unchanged. It is used to mark that a piece of code
// should simply does nothing. In particular, it shouldn't have any output and
// does not error.
func (t Test) DoesNothing() Test {
return t
}
// Puts returns an altered Test that requires the source code to produce the
// specified values in the value channel when evaluated.
func (t Test) Puts(vs ...interface{}) Test {
t.want.out = vs
return t
}
// Puts returns an altered Test that requires the source code to produce the
// specified strings in the value channel when evaluated.
func (t Test) PutsStrings(ss []string) Test {
t.want.out = make([]interface{}, len(ss))
for i, s := range ss {
t.want.out[i] = s
}
return t
}
// Prints returns an altered test that requires the source code to produce
// the specified output in the byte pipe when evaluated.
func (t Test) Prints(s string) Test {
t.want.bytesOut = []byte(s)
return t
}
// ErrorsWith returns an altered Test that requires the source code to result in
// the specified error when evaluted.
func (t Test) ErrorsWith(err error) Test {
t.want.err = err
return t
}
// Errors returns an altered Test that requires the source code to result in any
// error when evaluated.
func (t Test) Errors() Test {
return t.ErrorsWith(errAny)
}
// RunTests runs test cases. For each test case, a new Evaler is made by calling
// makeEvaler.
func RunTests(evalTests []Test, makeEvaler func() *Evaler) error {
for _, tt := range evalTests {
// fmt.Printf("eval %q\n", tt.text)
ev := makeEvaler()
defer ev.Close()
out, bytesOut, err := evalAndCollect(ev, []string{tt.text}, len(tt.want.out))
first := true
errorf := func(format string, args ...interface{}) error {
if first {
first = false
return fmt.Errorf("eval(%q) fails:", tt.text)
}
return fmt.Errorf(" "+format, args...)
}
if !matchOut(tt.want.out, out) {
if err := errorf("got out=%v, want %v", out, tt.want.out); err != nil {
return err
}
}
if !bytes.Equal(tt.want.bytesOut, bytesOut) {
if err := errorf("got bytesOut=%q, want %q", bytesOut, tt.want.bytesOut); err != nil {
return err
}
}
if !matchErr(tt.want.err, err) {
if err := errorf("got err=%v, want %v", err, tt.want.err); err != nil {
return err
}
}
}
return nil
}
func evalAndCollect(ev *Evaler, texts []string, chsize int) ([]interface{}, []byte, error) {
// Collect byte output
bytesOut := []byte{}
pr, pw, _ := os.Pipe()
bytesDone := make(chan struct{})
go func() {
for {
var buf [64]byte
nr, err := pr.Read(buf[:])
bytesOut = append(bytesOut, buf[:nr]...)
if err != nil {
break
}
}
close(bytesDone)
}()
// Channel output
outs := []interface{}{}
// Eval error. Only that of the last text is saved.
var ex error
for i, text := range texts {
name := fmt.Sprintf("test%d.elv", i)
src := NewScriptSource(name, name, text)
op, err := mustParseAndCompile(ev, src)
if err != nil {
return nil, nil, err
}
outCh := make(chan interface{}, chsize)
outDone := make(chan struct{})
go func() {
for v := range outCh {
outs = append(outs, v)
}
close(outDone)
}()
ports := []*Port{
{File: os.Stdin, Chan: ClosedChan},
{File: pw, Chan: outCh},
{File: os.Stderr, Chan: BlackholeChan},
}
ex = ev.eval(op, ports, src)
close(outCh)
<-outDone
} | pw.Close()
<-bytesDone
pr.Close()
return outs, bytesOut, ex
}
func mustParseAndCompile(ev *Evaler, src *Source) (Op, error) {
n, err := parse.Parse(src.name, src.code)
if err != nil {
return Op{}, fmt.Errorf("Parse(%q) error: %s", src.code, err)
}
op, err := ev.Compile(n, src)
if err != nil {
return Op{}, fmt.Errorf("Compile(Parse(%q)) error: %s", src.code, err)
}
return op, nil
}
func matchOut(want, got []interface{}) bool {
if len(got) == 0 && len(want) == 0 {
return true
}
if len(got) != len(want) {
return false
}
for i := range got {
if !vals.Equal(got[i], want[i]) {
return false
}
}
return true
}
func matchErr(want, got error) bool {
if got == nil {
return want == nil
}
return want == errAny || reflect.DeepEqual(got.(*Exception).Cause, want)
}
// compareValues compares two slices, using equals for each element.
func compareSlice(wantValues, gotValues []interface{}) error {
if len(wantValues) != len(gotValues) {
return fmt.Errorf("want %d values, got %d",
len(wantValues), len(gotValues))
}
for i, want := range wantValues {
if !vals.Equal(want, gotValues[i]) {
return fmt.Errorf("want [%d] = %s, got %s", i, want, gotValues[i])
}
}
return nil
} | |
comma-style.js | /**
* @fileoverview Comma style - enforces comma styles of two types: last and first
* @author Vignesh Anand aka vegetableman
*/
"use strict";
const astUtils = require("../ast-utils");
//------------------------------------------------------------------------------
// Rule Definition
//------------------------------------------------------------------------------
module.exports = {
meta: {
docs: {
description: "enforce consistent comma style",
category: "Stylistic Issues",
recommended: false
},
fixable: "code",
schema: [
{
enum: ["first", "last"]
},
{
type: "object",
properties: {
exceptions: {
type: "object",
additionalProperties: {
type: "boolean"
}
}
},
additionalProperties: false
}
]
},
create(context) {
const style = context.options[0] || "last",
sourceCode = context.getSourceCode();
let exceptions = {};
if (context.options.length === 2 && context.options[1].hasOwnProperty("exceptions")) {
exceptions = context.options[1].exceptions;
}
//--------------------------------------------------------------------------
// Helpers
//--------------------------------------------------------------------------
/**
* Determines if a given token is a comma operator.
* @param {ASTNode} token The token to check.
* @returns {boolean} True if the token is a comma, false if not.
* @private
*/
function isComma(token) {
return !!token && (token.type === "Punctuator") && (token.value === ",");
}
/**
* Modified text based on the style
* @param {string} styleType Style type
* @param {string} text Source code text
* @returns {string} modified text
* @private
*/
function getReplacedText(styleType, text) {
switch (styleType) {
case "between":
return `,${text.replace("\n", "")}`;
case "first":
return `${text},`;
case "last":
return `,${text}`;
default:
return "";
}
}
/**
* Determines the fixer function for a given style.
* @param {string} styleType comma style
* @param {ASTNode} previousItemToken The token to check.
* @param {ASTNode} commaToken The token to check.
* @param {ASTNode} currentItemToken The token to check.
* @returns {Function} Fixer function
* @private
*/
function | (styleType, previousItemToken, commaToken, currentItemToken) {
const text =
sourceCode.text.slice(previousItemToken.range[1], commaToken.range[0]) +
sourceCode.text.slice(commaToken.range[1], currentItemToken.range[0]);
const range = [previousItemToken.range[1], currentItemToken.range[0]];
return function(fixer) {
return fixer.replaceTextRange(range, getReplacedText(styleType, text));
};
}
/**
* Validates the spacing around single items in lists.
* @param {Token} previousItemToken The last token from the previous item.
* @param {Token} commaToken The token representing the comma.
* @param {Token} currentItemToken The first token of the current item.
* @param {Token} reportItem The item to use when reporting an error.
* @returns {void}
* @private
*/
function validateCommaItemSpacing(previousItemToken, commaToken, currentItemToken, reportItem) {
// if single line
if (astUtils.isTokenOnSameLine(commaToken, currentItemToken) &&
astUtils.isTokenOnSameLine(previousItemToken, commaToken)) {
return;
} else if (!astUtils.isTokenOnSameLine(commaToken, currentItemToken) &&
!astUtils.isTokenOnSameLine(previousItemToken, commaToken)) {
// lone comma
context.report({
node: reportItem,
loc: {
line: commaToken.loc.end.line,
column: commaToken.loc.start.column
},
message: "Bad line breaking before and after ','.",
fix: getFixerFunction("between", previousItemToken, commaToken, currentItemToken)
});
} else if (style === "first" && !astUtils.isTokenOnSameLine(commaToken, currentItemToken)) {
context.report({
node: reportItem,
message: "',' should be placed first.",
fix: getFixerFunction(style, previousItemToken, commaToken, currentItemToken)
});
} else if (style === "last" && astUtils.isTokenOnSameLine(commaToken, currentItemToken)) {
context.report({
node: reportItem,
loc: {
line: commaToken.loc.end.line,
column: commaToken.loc.end.column
},
message: "',' should be placed last.",
fix: getFixerFunction(style, previousItemToken, commaToken, currentItemToken)
});
}
}
/**
* Checks the comma placement with regards to a declaration/property/element
* @param {ASTNode} node The binary expression node to check
* @param {string} property The property of the node containing child nodes.
* @private
* @returns {void}
*/
function validateComma(node, property) {
const items = node[property],
arrayLiteral = (node.type === "ArrayExpression");
if (items.length > 1 || arrayLiteral) {
// seed as opening [
let previousItemToken = sourceCode.getFirstToken(node);
items.forEach(function(item) {
const commaToken = item ? sourceCode.getTokenBefore(item) : previousItemToken,
currentItemToken = item ? sourceCode.getFirstToken(item) : sourceCode.getTokenAfter(commaToken),
reportItem = item || currentItemToken,
tokenBeforeComma = sourceCode.getTokenBefore(commaToken);
// Check if previous token is wrapped in parentheses
if (tokenBeforeComma && tokenBeforeComma.value === ")") {
previousItemToken = tokenBeforeComma;
}
/*
* This works by comparing three token locations:
* - previousItemToken is the last token of the previous item
* - commaToken is the location of the comma before the current item
* - currentItemToken is the first token of the current item
*
* These values get switched around if item is undefined.
* previousItemToken will refer to the last token not belonging
* to the current item, which could be a comma or an opening
* square bracket. currentItemToken could be a comma.
*
* All comparisons are done based on these tokens directly, so
* they are always valid regardless of an undefined item.
*/
if (isComma(commaToken)) {
validateCommaItemSpacing(previousItemToken, commaToken,
currentItemToken, reportItem);
}
previousItemToken = item ? sourceCode.getLastToken(item) : previousItemToken;
});
/*
* Special case for array literals that have empty last items, such
* as [ 1, 2, ]. These arrays only have two items show up in the
* AST, so we need to look at the token to verify that there's no
* dangling comma.
*/
if (arrayLiteral) {
const lastToken = sourceCode.getLastToken(node),
nextToLastToken = sourceCode.getTokenBefore(lastToken);
if (isComma(nextToLastToken)) {
validateCommaItemSpacing(
sourceCode.getTokenBefore(nextToLastToken),
nextToLastToken,
lastToken,
lastToken
);
}
}
}
}
//--------------------------------------------------------------------------
// Public
//--------------------------------------------------------------------------
const nodes = {};
if (!exceptions.VariableDeclaration) {
nodes.VariableDeclaration = function(node) {
validateComma(node, "declarations");
};
}
if (!exceptions.ObjectExpression) {
nodes.ObjectExpression = function(node) {
validateComma(node, "properties");
};
}
if (!exceptions.ArrayExpression) {
nodes.ArrayExpression = function(node) {
validateComma(node, "elements");
};
}
return nodes;
}
};
| getFixerFunction |
tests.rs | use std::path::PathBuf;
use std::thread;
use std::time::{Duration, Instant};
use reqwest;
use utils::*;
lazy_static! {
static ref CARGO_WEB: PathBuf = get_var( "CARGO_WEB" ).into();
static ref REPOSITORY_ROOT: PathBuf = get_var( "REPOSITORY_ROOT" ).into();
static ref NODEJS: PathBuf = {
use utils::find_cmd;
find_cmd( &[ "nodejs", "node", "nodejs.exe", "node.exe" ] ).expect( "nodejs not found" ).into()
};
}
#[derive(Copy, Clone, PartialEq, Eq)]
enum Target {
AsmjsUnknownEmscripten,
Wasm32UnknownEmscripten,
Wasm32UnknownUnknown
}
impl Target {
fn to_str( self ) -> &'static str {
match self {
Target::AsmjsUnknownEmscripten => "asmjs-unknown-emscripten",
Target::Wasm32UnknownEmscripten => "wasm32-unknown-emscripten",
Target::Wasm32UnknownUnknown => "wasm32-unknown-unknown"
}
}
}
use self::Target::*;
fn crate_path( crate_name: &str ) -> PathBuf {
REPOSITORY_ROOT.join( "test-crates" ).join( crate_name )
}
fn assert_builds( target: Target, crate_name: &str ) {
run( crate_path( crate_name ), &*CARGO_WEB, &["build", "--target", target.to_str()] ).assert_success();
}
fn assert_fails_to_build( target: Target, crate_name: &str ) {
run( crate_path( crate_name ), &*CARGO_WEB, &["build", "--target", target.to_str()] ).assert_failure();
}
fn assert_tests_build( target: Target, crate_name: &str ) {
run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--no-run", "--target", target.to_str()] ).assert_success();
}
fn assert_tests_fail_to_build( target: Target, crate_name: &str ) {
run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--no-run", "--target", target.to_str()] ).assert_failure();
}
fn assert_tests_succeed( target: Target, crate_name: &str ) {
run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--nodejs", "--target", target.to_str()] ).assert_success();
}
fn assert_tests_fail( target: Target, crate_name: &str ) {
run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--nodejs", "--target", target.to_str()] ).assert_failure();
}
macro_rules! common_tests { (($($attr:tt)*) $namespace:ident, $target:expr) => { mod $namespace {
use super::*;
$($attr)*
#[test]
fn build_rlib() {
assert_builds( $target, "rlib" );
}
$($attr)*
#[test]
fn build_dev_depends_on_dylib() {
assert_builds( $target, "dev-depends-on-dylib" );
}
$($attr)*
#[test]
fn build_staticlib() {
assert_builds( $target, "staticlib" );
}
$($attr)*
#[test]
fn build_workspace() {
assert_builds( $target, "workspace" );
}
$($attr)*
#[test]
fn build_conflicting_versions() {
assert_builds( $target, "conflicting-versions" );
}
$($attr)*
#[test]
fn build_requires_old_cargo_web() {
assert_builds( $target, "requires-old-cargo-web" );
}
$($attr)*
#[test]
fn build_requires_future_cargo_web_disabled_dep() {
assert_builds( $target, "req-future-cargo-web-disabled-dep" );
}
$($attr)*
#[test]
fn build_requires_future_cargo_web_dev_dep() {
assert_builds( $target, "req-future-cargo-web-dev-dep" );
}
$($attr)*
#[test]
fn build_requires_future_cargo_web_dep_dev_dep() {
assert_builds( $target, "req-future-cargo-web-dep-dev-dep" );
}
$($attr)*
#[test]
fn build_requires_future_cargo_web_build_dep() {
assert_builds( $target, "req-future-cargo-web-build-dep" );
}
$($attr)*
#[test]
fn build_compiling_under_cargo_web_env_var() {
assert_builds( $target, "compiling-under-cargo-web-env-var" );
}
$($attr)*
#[test]
fn build_depends_on_default_target_invalid() {
assert_builds( $target, "depends-on-default-target-invalid" );
}
$($attr)*
#[test]
fn test_crate_with_tests() {
assert_tests_build( $target, "crate-with-tests" );
for _ in 0..2 {
assert_tests_succeed( $target, "crate-with-tests" )
}
}
#[test]
fn test_crate_with_integration_tests() {
assert_tests_build( $target, "crate-with-integration-tests" );
for _ in 0..2 {
assert_tests_succeed( $target, "crate-with-integration-tests" );
}
}
$($attr)*
#[test]
fn failed_build_requires_future_cargo_web() {
assert_fails_to_build( $target, "requires-future-cargo-web" );
}
$($attr)*
#[test]
fn failed_build_requires_future_cargo_web_dep() {
assert_fails_to_build( $target, "req-future-cargo-web-dep" );
}
$($attr)*
#[test]
fn failed_build_requires_future_cargo_web_dep_dep() {
assert_fails_to_build( $target, "req-future-cargo-web-dep-dep" );
}
$($attr)*
#[test]
fn failed_build_requires_future_cargo_web_dep_and_dev_dep() {
assert_fails_to_build( $target, "req-future-cargo-web-dep-and-dev-dep" );
}
$($attr)*
#[test]
fn failed_test_requires_future_cargo_web_dev_dep() {
assert_tests_fail_to_build( $target, "req-future-cargo-web-dev-dep" );
}
$($attr)*
#[test]
fn prepend_js() {
let cwd = crate_path( "prepend-js" );
assert_builds( $target, "prepend-js" );
// TODO: We should run cargo-web with `--message-format=json` and grab this path automatically.
let build_dir = if $target == Wasm32UnknownUnknown { "release" } else { "debug" };
let output = cwd.join( "target" ).join( $target.to_str() ).join( build_dir ).join( "prepend-js.js" );
assert_file_contains( output, "alert('THIS IS A TEST');" );
}
$($attr)*
#[test]
fn virtual_manifest() {
let cwd = crate_path( "virtual-manifest" );
run( &cwd, &*CARGO_WEB, &["build", "--target", $target.to_str()] ).assert_failure();
run( &cwd, &*CARGO_WEB, &["build", "-p", "child", "--target", $target.to_str()] ).assert_success();
run( &cwd, &*CARGO_WEB, &["test", "--no-run", "--target", $target.to_str()] ).assert_failure();
run( &cwd, &*CARGO_WEB, &["test", "--no-run", "-p", "child", "--target", $target.to_str()] ).assert_success();
run( &cwd, &*CARGO_WEB, &["deploy", "--target", $target.to_str()] ).assert_failure();
run( &cwd, &*CARGO_WEB, &["deploy", "-p", "child", "--target", $target.to_str()] ).assert_success();
assert_file_missing( cwd.join( "child/target/deploy" ) );
assert_file_exists( cwd.join( "target/deploy" ) );
}
$($attr)*
#[test]
fn failing_test() {
assert_tests_build( $target, "failing-test" );
assert_tests_fail( $target, "failing-test" );
}
$($attr)*
#[test]
fn failing_integration_test() {
assert_tests_build( $target, "failing-integration-test" );
assert_tests_fail( $target, "failing-integration-test" );
}
$($attr)*
#[test]
fn failing_integration_test_crate_types() {
assert_tests_build( $target, "failing-integration-test-crate-types" );
assert_tests_fail( $target, "failing-integration-test-crate-types" );
}
$($attr)*
#[test]
fn async_normal_test_with_nodejs() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--nodejs", "--target", $target.to_str(), "--", "normal_test"] );
assert!( !result.output().contains( "async test(s)" ) );
if $target != Wasm32UnknownUnknown {
// Normal tests don't output anything on this target.
assert!( result.output().contains( "test normal_test ... ok" ) );
assert!( result.output().contains( "test result (async): ok. 0 passed; 0 failed" ) );
}
result.assert_success();
}
$($attr)*
#[test]
fn async_test_ok_with_nodejs() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--nodejs", "--target", $target.to_str(), "--", "ok"] );
assert!( result.output().contains( "running 1 async test(s)" ) );
assert!( result.output().contains( "test ok ... ok" ) );
assert!( result.output().contains( "test result (async): ok. 1 passed; 0 failed" ) );
result.assert_success();
}
$($attr)*
#[test]
fn async_test_panic_with_nodejs() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--nodejs", "--target", $target.to_str(), "--", "panic"] );
assert!( result.output().contains( "running 1 async test(s)" ) );
assert!( result.output().contains( "test panic ... FAILED" ) );
assert!( result.output().contains( "test result (async): FAILED. 0 passed; 1 failed" ) );
result.assert_failure();
}
$($attr)*
#[test]
fn async_test_timeout_with_nodejs() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--nodejs", "--target", $target.to_str(), "--", "timeout"] );
assert!( result.output().contains( "running 1 async test(s)" ) );
assert!( result.output().contains( "test timeout ... FAILED" ) );
assert!( result.output().contains( "test result (async): FAILED. 0 passed; 1 failed" ) );
result.assert_failure();
}
$($attr)*
#[test]
fn async_normal_test_with_chromium() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--target", $target.to_str(), "--", "normal_test"] );
assert!( !result.output().contains( "async test(s)" ) );
if $target != Wasm32UnknownUnknown {
assert!( result.output().contains( "test normal_test ... ok" ) );
assert!( result.output().contains( "test result (async): ok. 0 passed; 0 failed" ) );
}
result.assert_success();
}
$($attr)*
#[test]
fn async_test_ok_with_chromium() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--target", $target.to_str(), "--", "ok"] );
assert!( result.output().contains( "running 1 async test(s)" ) );
assert!( result.output().contains( "test ok ... ok" ) );
assert!( result.output().contains( "test result (async): ok. 1 passed; 0 failed" ) );
result.assert_success();
}
$($attr)*
#[test]
fn async_test_panic_with_chromium() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--target", $target.to_str(), "--", "panic"] );
assert!( result.output().contains( "running 1 async test(s)" ) );
assert!( result.output().contains( "test panic ... FAILED" ) );
assert!( result.output().contains( "test result (async): FAILED. 0 passed; 1 failed" ) );
result.assert_failure();
}
$($attr)*
#[test]
fn async_test_timeout_with_chromium() {
let crate_name = "async-tests";
assert_tests_build( $target, crate_name );
let result = run( crate_path( crate_name ), &*CARGO_WEB, &["test", "--target", $target.to_str(), "--", "timeout"] );
assert!( result.output().contains( "running 1 async test(s)" ) );
assert!( result.output().contains( "test timeout ... FAILED" ) );
assert!( result.output().contains( "test result (async): FAILED. 0 passed; 1 failed" ) );
result.assert_failure();
}
}}}
common_tests!( () asmjs_unknown_emscripten, Target::AsmjsUnknownEmscripten );
common_tests!( () wasm32_unknown_emscripten, Target::Wasm32UnknownEmscripten );
common_tests!( (#[cfg_attr(not(test_wasm32_unknown_unknown), ignore)]) wasm32_unknown_unknown, Target::Wasm32UnknownUnknown );
#[test]
fn build_requires_future_cargo_web_target_dep() {
assert_builds( AsmjsUnknownEmscripten, "req-future-cargo-web-target-dep" );
assert_fails_to_build( Wasm32UnknownEmscripten, "req-future-cargo-web-target-dep" );
}
#[test]
fn link_args_per_target() {
let cwd = crate_path( "link-args-per-target" );
// In Web.toml of the test crate we set a different `EXPORT_NAME` link-arg
// for each target and we check if it's actually used by Emscripten.
assert_builds( AsmjsUnknownEmscripten, "link-args-per-target" );
assert_file_contains( cwd.join( "target/asmjs-unknown-emscripten/debug/link-args-per-target.js" ), "CustomExportNameAsmJs" );
assert_builds( Wasm32UnknownEmscripten, "link-args-per-target" );
assert_file_contains( cwd.join( "target/wasm32-unknown-emscripten/debug/link-args-per-target.js" ), "CustomExportNameWasm" );
if cfg!( test_wasm32_unknown_unknown ) {
// This has no flags set, but still should compile.
assert_builds( Wasm32UnknownUnknown, "link-args-per-target" );
}
}
#[test]
fn link_args_for_emscripten() {
let cwd = crate_path( "link-args-for-emscripten" );
// Here we set the same flag for both targets in a single target section.
assert_builds( AsmjsUnknownEmscripten, "link-args-for-emscripten" );
assert_file_contains( cwd.join( "target/asmjs-unknown-emscripten/debug/link-args-for-emscripten.js" ), "CustomExportNameEmscripten" );
assert_builds( Wasm32UnknownEmscripten, "link-args-for-emscripten" );
assert_file_contains( cwd.join( "target/wasm32-unknown-emscripten/debug/link-args-for-emscripten.js" ), "CustomExportNameEmscripten" );
if cfg!( test_wasm32_unknown_unknown ) {
// This has no flags set, but still should compile.
assert_builds( Wasm32UnknownUnknown, "link-args-for-emscripten" );
}
}
#[test]
fn build_depends_on_prepend_js_two_targets() {
let cwd = crate_path( "depends-on-prepend-js-two-targets" );
run( &cwd, &*CARGO_WEB, &["build", "--target", "asmjs-unknown-emscripten"] ).assert_success();
assert_file_contains( cwd.join( "target/asmjs-unknown-emscripten/debug/depends-on-prepend-js-two-targets.js" ), "alert('THIS IS A TEST');" );
run( &cwd, &*CARGO_WEB, &["build", "--target", "wasm32-unknown-emscripten"] ).assert_success();
assert_file_contains( cwd.join( "target/wasm32-unknown-emscripten/debug/depends-on-prepend-js-two-targets.js" ), "alert('THIS IS A TEST');" );
}
#[test]
fn default_target_asmjs_unknown_emscripten() {
let cwd = crate_path( "default-target-asmjs-unknown-emscripten" );
run( &cwd, &*CARGO_WEB, &["build"] ).assert_success();
assert_file_exists( cwd.join( "target/asmjs-unknown-emscripten/debug/default-target-asmjs-unknown-emscripten.js" ) );
run( &cwd, &*CARGO_WEB, &["test", "--no-run"] ).assert_success();
run( &cwd, &*CARGO_WEB, &["deploy"] ).assert_success();
}
#[test]
fn default_target_wasm32_unknown_emscripten() {
let cwd = crate_path( "default-target-wasm32-unknown-emscripten" );
run( &cwd, &*CARGO_WEB, &["build"] ).assert_success();
assert_file_exists( cwd.join( "target/wasm32-unknown-emscripten/debug/default-target-wasm32-unknown-emscripten.js" ) );
run( &cwd, &*CARGO_WEB, &["test", "--no-run"] ).assert_success();
run( &cwd, &*CARGO_WEB, &["deploy"] ).assert_success();
}
#[test]
fn default_target_invalid() {
let cwd = crate_path( "default-target-invalid" );
run( &cwd, &*CARGO_WEB, &["build"] ).assert_failure();
run( &cwd, &*CARGO_WEB, &["test", "--no-run"] ).assert_failure();
run( &cwd, &*CARGO_WEB, &["deploy"] ).assert_failure();
}
#[cfg_attr(not(test_wasm32_unknown_unknown), ignore)]
#[test]
fn build_and_run_native_wasm() {
let cwd = crate_path( "native-webasm" );
assert_builds( Target::Wasm32UnknownUnknown, "native-webasm" );
run( &cwd, &*NODEJS, &["run.js"] ).assert_success();
}
#[cfg_attr(not(test_wasm32_unknown_unknown), ignore)]
#[test]
fn cdylib() {
let cwd = crate_path( "cdylib" );
run( &cwd, &*CARGO_WEB, &["build", "--target", "wasm32-unknown-unknown"] ).assert_success();
run( &cwd, &*CARGO_WEB, &["deploy", "--target", "wasm32-unknown-unknown"] ).assert_success();
run( &cwd, &*NODEJS, &[cwd.join( "target/wasm32-unknown-unknown/release/cdylib.js" )] ).assert_success();
}
#[cfg_attr(not(test_wasm32_unknown_unknown), ignore)]
#[test]
fn default_target_wasm32_unknown_unknown() {
let cwd = crate_path( "default-target-wasm32-unknown-unknown" );
run( &cwd, &*CARGO_WEB, &["build"] ).assert_success();
assert_file_exists( cwd.join( "target/wasm32-unknown-unknown/release/default-target-wasm32-unknown-unknown.js" ) );
run( &cwd, &*CARGO_WEB, &["deploy"] ).assert_success();
}
#[cfg_attr(not(test_wasm32_unknown_unknown), ignore)]
#[test]
fn prepend_js_includable_only_once() {
let cwd = crate_path( "prepend-js-includable-only-once" );
run( &cwd, &*CARGO_WEB, &["build", "--release", "--target", "wasm32-unknown-unknown"] ).assert_success();
run( &cwd, &*NODEJS, &[cwd.join( "target/wasm32-unknown-unknown/release/prepend-js-includable-only-once.js" )] ).assert_success();
}
#[test]
fn | () {
let cwd = crate_path( "static-files" );
use std::str::FromStr;
use reqwest::header::ContentType;
use reqwest::StatusCode;
use reqwest::mime::Mime;
run( &cwd, &*CARGO_WEB, &["build"] ).assert_success();
let _child = run_in_the_background( &cwd, &*CARGO_WEB, &["start"] );
let start = Instant::now();
let mut response = None;
while start.elapsed() < Duration::from_secs( 10 ) && response.is_none() {
thread::sleep( Duration::from_millis( 100 ) );
response = reqwest::get( "http://localhost:8000" ).ok();
}
let response = response.unwrap();
assert_eq!( response.status(), StatusCode::Ok );
assert_eq!( *response.headers().get::< ContentType >().unwrap(), ContentType::html() );
let mut response = reqwest::get( "http://localhost:8000/subdirectory/dummy.json" ).unwrap();
assert_eq!( response.status(), StatusCode::Ok );
assert_eq!( *response.headers().get::< ContentType >().unwrap(), ContentType::json() );
assert_eq!( response.text().unwrap(), "{}" );
let mut response = reqwest::get( "http://localhost:8000/static-files.js" ).unwrap();
assert_eq!( response.status(), StatusCode::Ok );
assert_eq!( *response.headers().get::< ContentType >().unwrap(), ContentType( Mime::from_str( "application/javascript" ).unwrap() ) );
assert_eq!( response.text().unwrap(), read_to_string( cwd.join( "target/asmjs-unknown-emscripten/debug/static-files.js" ) ) );
// TODO: Move this to its own test?
let mut response = reqwest::get( "http://localhost:8000/__cargo-web__/build_hash" ).unwrap();
assert_eq!( response.status(), StatusCode::Ok );
let build_hash = response.text().unwrap();
let mut response = reqwest::get( "http://localhost:8000/__cargo-web__/build_hash" ).unwrap();
assert_eq!( response.status(), StatusCode::Ok );
assert_eq!( response.text().unwrap(), build_hash ); // Hash didn't change.
touch_file( cwd.join( "src/main.rs" ) );
let start = Instant::now();
let mut found = false;
while start.elapsed() < Duration::from_secs( 10 ) && !found {
thread::sleep( Duration::from_millis( 100 ) );
let mut response = reqwest::get( "http://localhost:8000" ).unwrap();
assert_eq!( response.status(), StatusCode::Ok );
let new_build_hash = response.text().unwrap();
found = new_build_hash != build_hash;
}
assert!( found, "Touching a source file didn't change the build hash!" );
}
#[test]
fn requires_future_cargo_web_cfg_dep() {
assert_builds( Wasm32UnknownUnknown, "req-future-cargo-web-cfg-dep" );
assert_fails_to_build( Wasm32UnknownEmscripten, "req-future-cargo-web-cfg-dep" );
}
#[test]
fn requires_future_cargo_web_cfg_not_dep() {
assert_fails_to_build( Wasm32UnknownUnknown, "req-future-cargo-web-cfg-not-dep" );
assert_builds( Wasm32UnknownEmscripten, "req-future-cargo-web-cfg-not-dep" );
}
| static_files |
validation_test.go | // Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"strings"
"testing"
)
// scenario is a common struct used by many tests in this context.
type scenario struct {
wrapFunc func(*WebhookParameters)
expectedError string
}
func TestValidate(t *testing.T) {
scenarios := map[string]scenario{
"valid": {
wrapFunc: func(args *WebhookParameters) {},
expectedError: "",
},
"invalid deployment namespace": {
wrapFunc: func(args *WebhookParameters) { args.DeploymentAndServiceNamespace = "_/invalid" },
expectedError: `invalid deployment namespace: "_/invalid"`,
},
"invalid deployment name": {
wrapFunc: func(args *WebhookParameters) { args.DeploymentName = "_/invalid" },
expectedError: `invalid deployment name: "_/invalid"`,
},
"invalid service name": {
wrapFunc: func(args *WebhookParameters) { args.ServiceName = "_/invalid" },
expectedError: `invalid service name: "_/invalid"`,
},
"missing deployment namespace": {
wrapFunc: func(args *WebhookParameters) { args.DeploymentAndServiceNamespace = "" },
expectedError: `invalid deployment namespace: ""`,
},
"missing deployment name": {
wrapFunc: func(args *WebhookParameters) { args.DeploymentName = "" },
expectedError: `invalid deployment name: ""`,
},
"missing service name": {
wrapFunc: func(args *WebhookParameters) { args.ServiceName = "" },
expectedError: `invalid service name: ""`,
},
"webhook unset": {
wrapFunc: func(args *WebhookParameters) { args.WebhookConfigFile = "" },
expectedError: "webhookConfigFile not specified",
},
"cert unset": {
wrapFunc: func(args *WebhookParameters) { args.CertFile = "" },
expectedError: "cert file not specified",
},
"key unset": {
wrapFunc: func(args *WebhookParameters) { args.KeyFile = "" },
expectedError: "key file not specified",
},
"ca cert unset": {
wrapFunc: func(args *WebhookParameters) { args.CACertFile = "" },
expectedError: "CA cert file not specified",
},
"invalid port": {
wrapFunc: func(args *WebhookParameters) { args.Port = 100000 },
expectedError: "port number 100000 must be in the range 1024..65535",
},
}
for name, scenario := range scenarios {
t.Run(name, func(tt *testing.T) {
runTestCode(name, tt, scenario)
})
}
}
func runTestCode(name string, t *testing.T, test scenario) {
args := DefaultArgs()
// wrap the args with a webhook config file.
args.WebhookConfigFile = "/etc/istio/config/validatingwebhookconfiguration.yaml"
test.wrapFunc(args)
err := args.Validate()
if err == nil && test.expectedError != "" {
t.Errorf("Test %q failed: expected error: %q, got nil", name, test.expectedError)
}
if err != nil {
if test.expectedError == "" { | if !strings.Contains(err.Error(), test.expectedError) {
t.Errorf("Test %q failed: expected error: %q, got %q", name, test.expectedError, err.Error())
}
}
// Should not return error if validation disabled
args.EnableValidation = false
if err := args.Validate(); err != nil {
t.Errorf("Test %q failed with validation disabled, expected nil error, but got: %v", name, err)
}
} | t.Errorf("Test %q failed: expected nil error, got %v", name, err)
} |
graph.ts | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
module tf.graph {
/** Delimiter used in node names to denote namespaces. */
export const NAMESPACE_DELIM = '/';
export const ROOT_NAME = '__root__';
export const FUNCTION_LIBRARY_NODE_PREFIX = '__function_library__';
/** Attribute key used for storing attributes that are too large. */
export const LARGE_ATTRS_KEY = '_too_large_attrs';
/**
* Maximum allowed size in bytes, before the attribute is considered large
* and filtered out of the graph.
*/
export const LIMIT_ATTR_SIZE = 1024;
// Separator between the source and the destination name of the edge.
export const EDGE_KEY_DELIM = '--';
export enum GraphType {FULL, EMBEDDED, META, SERIES, CORE, SHADOW, BRIDGE,
EDGE};
export enum NodeType {META, OP, SERIES, BRIDGE, ELLIPSIS};
/** Indicates if a node is to be included in the main graph when rendered. */
export enum InclusionType {INCLUDE, EXCLUDE, UNSPECIFIED};
/** Indicates if a series is to be grouped in the graph when rendered. */
export enum SeriesGroupingType {GROUP, UNGROUP};
/** Attribute key reserved for the shapes of the output tensors. */
const OUTPUT_SHAPES_KEY = '_output_shapes';
/** Attribute key reserved for the XLA cluster that an op runs on. */
const _XLA_CLUSTER_KEY = '_XlaCluster';
/**
* A BaseEdge is the label object (in the graphlib sense) for an edge in the
* original, full graph produced after parsing. Subsequent graphs, like those
* which belong to Metanodes, should not use BaseEdge objects, but instead
* contain Metaedges (which in turn may contain any number of BaseEdges).
*/
export interface BaseEdge extends graphlib.EdgeObject {
isControlDependency: boolean;
isReferenceEdge: boolean;
/** The index of the output tensor of the source node. */
outputTensorKey: string;
}
/**
* A SlimGraph is inspired by graphlib.Graph, but having only the functionality
* that we need.
*/
export class SlimGraph {
nodes: { [nodeName: string]: OpNode };
edges: BaseEdge[];
constructor() {
this.nodes = {};
this.edges = [];
}
}
export interface NormalizedInput {
name: string;
/** The index of the output tensor of the source node. */
outputTensorKey: string;
isControlDependency: boolean;
}
export interface BuildParams {
enableEmbedding: boolean;
inEmbeddingTypes: string[];
outEmbeddingTypes: string[];
refEdges: { [inputEdge: string]: boolean };
}
/**
* The most basic information about a node in the hierarchical graph.
*/
export interface Node {
/** The name of the node, used frequently to look up nodes by name. */
name: string;
/** Which type of node this is. */
type: NodeType;
/**
* Whether this node is a type that may contain other nodes. Those types
* should extend from GroupNode.
*
* For an OpNode, isGroupNode will be false, even though it may have
* embeddings. These embedding Nodes will have their parentNode set to the
* OpNode. However, embeddings are later rendered as annotations, not as
* children to be made visible on expansion (like a Metanode or SeriesNode).
*/
isGroupNode: boolean;
/**
* The number of nodes this node represents. For OpNodes, this will be 1, and
* for GroupNodes it will be a count of the total number of descendents it
* contains.
*/
cardinality: number;
/**
* The Node which is this Node's parent. This is of type Node and not
* GroupNode because of embeddings, which will have a parent OpNode.
*/
parentNode: Node;
/** Runtime execution stats for this node, if available */
stats: NodeStats;
/** If the node is to be included or excluded from the main graph when
* rendered. Defaults to UNSPECIFIED, which means that the rendering
* algorithm determines if it will be included or not. Then can be set to
* INCLUDE or EXCLUDE manually by the user.
*/
include: InclusionType;
/**
* Node attributes specify customizable visual aspects of a node and
* application-specific metadata associated with a node. The name
* 'nodeAttributes' is meant to avoid naming-conflicts with the 'attr' in
* subclasses of Node.
*/
nodeAttributes: {[key: string]: any;};
}
export type TensorShape = number[];
export interface OpNode extends Node {
op: string;
// The device on which the op ran. Null if it is unknown.
device: string;
attr: {key: string, value: any}[];
inputs: NormalizedInput[];
inEmbeddings: OpNode[];
outEmbeddings: OpNode[];
// The name of the SeriesNode that can contain this node in its series.
// If there is no such node, then this is null.
owningSeries: string;
/**
* Object mapping output channel string to tensor shapes. The output channel
* is a string rather than a number because within TensorFlow functions, an
* output may be a cross between an output variable and a number (combined
* with a colon) such as "foo:2" rather than just a number alone.
*
* Each tensor shape is an array of numbers, or null. Details:
* - null means unknown rank, and therefore entire shape is unknown.
* - [4, 2, 1] means rank-3 tensor of size 4x2x1.
* - [] means a scalar (rank-0 tensor).
* - [1] means rank-1 tensor of size 1 (not the same as scalar).
* - [5, -1, 3] means rank-3 tensor of shape is 5x?x3. The size
* of the middle dimension is unknown (encoded as -1).
*/
outputShapes: {[key: string]: TensorShape;};
// The XLA Cluster on which the op ran. Null if it is unknown.
xlaCluster: string;
// Whether op is compatible with its assigned device. Currently, if an op
// is not specified a device, the device is defaulted to the TPU.
// Furthermore, all ops are considered compatible for CPU and GPU devices,
// while a whitelist of compatible ops are specifed for the TPU.
// Reference: opValid func in op.ts.
compatible: boolean;
// This field is only defined if the op node represents an input_arg to a
// library function. It is the index of the input_arg.
functionInputIndex: number;
// This field is only defined if the op node represents an output_arg of a
// library function. It is the index of the output_arg.
functionOutputIndex: number;
}
export interface BridgeNode extends Node {
/**
* Whether this bridge node represents edges coming into its parent node.
*/
inbound: boolean;
}
/**
* A node that is used when there are more than the maximum number of allowed
* annotations hanging off of a node. This node represents an ellipsis
* annotation, indicating a number of additional annotations.
*/
export interface EllipsisNode extends Node {
/**
* The number of nodes this ellipsis represents.
*/
numMoreNodes: number;
/**
* Sets the number of nodes this ellipsis represents and changes the node
* name accordingly.
*/
setNumMoreNodes(numNodes: number);
}
export interface GroupNode extends Node {
/**
* The metagraph contains nodes and metaedges between the immediate children
* of this group. The node label objects may be other GroupNodes (like
* SeriesNodes and Metanodes) or individual OpNodes. All edge label objects
* are Metaedges, each of which contains references to the original
* BaseEdge(s) from which it was created.
*/
metagraph: graphlib.Graph<GroupNode|OpNode, Metaedge>;
/**
* The bridgegraph contains only edges which link immediate children of this
* group with nodes outside of the metagraph. As in the metagraph, all edge
* label objects are Metaedges which contain references to the original
* BaseEdge(s) that contribute to it.
*
* For a Metaedge in the bridgegraph, its external endpoint will be the same
* as the metagraph edge from which it came. This is most easily explained
* by example.
*
* Consider an original graph that contains a BaseEdge A/B/C->Z/Y/X.
*
* +-------+ (BaseEdge) +-------+
* | A/B/C |>----------------->| Z/Y/X |
* +-------+ +-------+
*
* When we construct the Root's metagraph, it will contain nodes for A and Z,
* and a Metaedge A->Z. The A->Z Metaedge will contain the original BaseEdge
* A/B/C->Z/Y/X in its baseEdgeGraph. The Root's bridgegraph will always be
* empty.
*
* +---+ (Root.metagraph edge) +---+
* | A |>--------------------------->| Z |
* +---+ +---+
*
* Now consider the Metanode A. Its metagraph will contain a Metanode for A/B
* and no edges. A's bridgegraph will have one Metaedge from A/B->Z, which
* was derived from the Root's Metaedge A->Z. That Metaedge will contain the
* original BaseEdge in its baseEdgeGraph.
*
* +---------+
* | A |
* | +---+ | (A.bridgegraph edge) +---+
* | | B |>---------------------------->| Z |
* | +---+ | +---+
* +---------+
*
* Finally, consider the Metanode A/B. Its metagraph will contain a Metanode
* for A/B/C and again no edges. A/B's bridgegraph will have one Metaedge
* from A/B/C->Z, which was derived from A's bridgegraph Metaedge A/B->Z.
* As before, the A/B/C->Z Metaedge will contain the original BaseEdge in its
* baseEdgeGraph.
*
* +---------------+
* | A |
* | +---------+ |
* | | B | |
* | | +---+ | | (A/B.bridgegraph edge) +---+
* | | | C |>----------------------------------->| Z |
* | | +---+ | | +---+
* | +---------+ |
* +---------------+
*
* Likewise, under the Metanode Z and Z/Y, to compute the bridgegraph, we'll
* end up with Metaedges A->Z/Y and A->Z/Y/X respectively. So the original
* BaseEdge A/B/C->Z/Y/X becomes four different Metaedges in four different
* bridgegraphs:
*
* + A/B->Z in GroupNode A's bridgegraph,
* + A/B/C->Z in GroupNode A/B's bridgegraph,
* + A->Z/Y in GroupNode Z's bridgegraph, and
* + A->Z/Y/X in GroupNode Z/Y's bridgegraph.
*
* Considering any BaseEdge then, if N is the number of path segments in the
* source and M is the number of path segments in the destination, then the
* total number of bridgegraph edges you could create would be (N-1)(M-1).
*
* For this reason, it is computationally expensive to generate all the
* bridgegraphs for all the Metanodes, and instead they should be computed
* on demand as needed.
*/
bridgegraph: graphlib.Graph<GroupNode|OpNode, Metaedge>;
/**
* Stores how many times each device name appears in its children
* op nodes. Used to color group nodes by devices.
*/
deviceHistogram: {[device: string]: number};
/**
* Stores how many times each XLA cluster name appears in its children
* op nodes. Used to color group nodes by XLA clusters.
*/
xlaClusterHistogram: {[device: string]: number};
/**
* Stores how many ops in sub-graph were compatible and how many are
* incompatible.
*/
compatibilityHistogram: {compatible: number, incompatible: number}
/**
* Flag indicating whether this GroupNode's metagraph contains any edges that
* are not control edges. Used to quickly determine how to draw a collapsed
* series (vertically or horizontally).
*/
hasNonControlEdges: boolean;
}
export interface Metanode extends GroupNode {
depth: number;
templateId: string;
opHistogram: {[op: string]: number};
// The name of the function this metanode is associated with if any.
associatedFunction: string;
getFirstChild(): GroupNode|OpNode;
getRootOp(): OpNode;
/** Return name of all leaves inside a metanode. */
leaves(): string[];
}
export interface SeriesNode extends GroupNode {
hasLoop: boolean;
prefix: string;
suffix: string;
clusterId: number;
ids: number[];
parent: string;
}
export class EllipsisNodeImpl implements EllipsisNode {
name: string;
numMoreNodes: number;
stats: NodeStats;
type: NodeType;
isGroupNode: boolean;
cardinality: number;
parentNode: Node;
include: InclusionType;
nodeAttributes: {[key: string]: any;};
/**
* Constructs a new ellipsis annotation node.
*
* @param numNodes The number of additional annotations this node represents.
*/
constructor(numNodes: number) {
this.type = NodeType.ELLIPSIS;
this.isGroupNode = false;
this.cardinality = 1;
this.parentNode = null;
this.stats = null;
this.setNumMoreNodes(numNodes);
this.include = InclusionType.UNSPECIFIED;
}
setNumMoreNodes(numNodes: number) {
this.numMoreNodes = numNodes;
this.name = '... ' + numNodes + ' more';
}
};
/**
* A label object for nodes in the full graph and leaf nodes in the render
* graph.
*/
export class OpNodeImpl implements OpNode {
name: string;
op: string;
device: string;
stats: NodeStats;
attr: {key: string, value: any}[];
inputs: NormalizedInput[];
type: NodeType;
isGroupNode: boolean;
cardinality: number;
inEmbeddings: OpNode[];
outEmbeddings: OpNode[];
parentNode: Node;
include: InclusionType;
owningSeries: string;
outputShapes: {[key: string]: TensorShape;};
nodeAttributes: {[key: string]: any;};
xlaCluster: string;
compatible: boolean;
// This field is only defined if the op node represents an input_arg to a
// library function. It is the index of the input_arg.
functionInputIndex: number;
// This field is only defined if the op node represents an output_arg of a
// library function. It is the index of the output_arg.
functionOutputIndex: number;
/**
* Constructs a new Op node.
*
* @param rawNode The raw node.
*/
constructor(rawNode: tf.graph.proto.NodeDef) {
this.op = rawNode.op;
this.name = rawNode.name;
this.device = rawNode.device;
this.attr = rawNode.attr;
// An array of normalized inputs that denote the incoming edges to
// the current node. Each input contains the normalized name of the
// source node, whether it has a number part and whether it is a
// control dependency.
this.inputs = normalizeInputs(rawNode.input);
this.outputShapes = extractOutputShapes(rawNode.attr);
this.xlaCluster = extractXlaCluster(rawNode.attr);
this.compatible = false;
// additional properties
this.type = NodeType.OP;
this.isGroupNode = false;
this.cardinality = 1;
this.inEmbeddings = [];
this.outEmbeddings = [];
this.parentNode = null;
this.include = InclusionType.UNSPECIFIED;
this.owningSeries = null;
}
};
export function createMetanode(name: string, opt = {}): Metanode {
return new MetanodeImpl(name, opt);
}
/**
* Joins the information from the stats file (memory, compute time) with the
* graph information.
*/
export function joinStatsInfoWithGraph(
graph: SlimGraph, stats: tf.graph.proto.StepStats,
devicesForStats?: {[device: string]: boolean}): void {
// Reset stats for each node.
_.each(graph.nodes, node => { node.stats = null; });
_.each(stats.dev_stats, devStats => {
// Ignore devices that are not selected.
if (devicesForStats && !devicesForStats[devStats.device]) {
return;
}
_.each(devStats.node_stats, nodeStats => {
// Lookup the node in the graph by its original name, e.g. A/B. If not
// found, lookup by the rewritten name A/B/(B) in case the name is both
// a namespace and a node name.
let nodeName = nodeStats.node_name in graph.nodes ?
nodeStats.node_name :
getStrictName(nodeStats.node_name);
// Couldn't find a matching node.
if (!(nodeName in graph.nodes)) {
return;
}
// Compute the total bytes used.
let totalBytes = 0;
if (nodeStats.memory) {
_.each(nodeStats.memory, alloc => {
if (alloc.total_bytes) {
if (alloc.total_bytes > 0) {
totalBytes += Number(alloc.total_bytes);
} else {
/* tslint:disable */
console.log(
'ignoring negative memory allocation for ' + nodeName);
/* tslint:enable */
}
}
});
}
let outputSize: number[][] = null;
if (nodeStats.output) {
outputSize = _.map(nodeStats.output, output => {
return _.map(output.tensor_description.shape.dim,
dim => Number(dim.size));
});
}
graph.nodes[nodeName].device = devStats.device;
if (graph.nodes[nodeName].stats == null) {
graph.nodes[nodeName].stats = new NodeStats(outputSize);
}
graph.nodes[nodeName].stats.addBytesAllocation(totalBytes);
if (nodeStats.all_end_rel_micros) {
if (nodeStats.all_end_rel_micros > 0) {
graph.nodes[nodeName].stats.addExecutionTime(
nodeStats.all_start_micros,
nodeStats.all_start_micros + nodeStats.all_end_rel_micros);
} else {
/* tslint:disable */
console.log('ignoring negative runtime for ' + nodeName);
/* tslint:enable */
}
}
});
});
}
/**
* Execution stats for the node.
*/
export class NodeStats {
constructor(outputSize: number[][]) { this.outputSize = outputSize; }
/**
* Add the start and end time for a particular kernel execution of this op.
* Ops can have multiple kernel executions within the same session run.
*/
addExecutionTime(startTime: number, endTime: number) {
if (this.startTime != null) {
this.startTime = Math.min(this.startTime, startTime);
} else {
this.startTime = startTime;
}
if (this.endTime != null) {
this.endTime = Math.max(this.endTime, endTime);
} else {
this.endTime = endTime;
}
}
/**
* Add the bytes allocated for a particular kernel execution of this op.
* Ops can have multiple kernel executions within the same session run.
*/
addBytesAllocation(totalBytes: number) {
if (this.totalBytes != null) {
this.totalBytes = Math.max(this.totalBytes, totalBytes);
} else {
this.totalBytes = totalBytes;
}
}
/**
* Absolute start time for the very first kernel execution of this op.
*/
startTime: number;
/**
* Absolute end time for the very last kernel execution of this op.
*/
endTime: number;
/**
* Total number of bytes used for the node. Sum of all children
* if it is a Group node.
*/
totalBytes = 0;
/**
* The shape of each output tensors, if there are any.
* Empty if it is a Group node.
*/
outputSize: number[][];
/**
* Combines the specified stats with the current stats.
* Modifies the current object. This method is used to
* compute aggregate stats for group nodes.
*/
combine(stats: NodeStats): void {
if (stats.totalBytes != null) {
this.totalBytes += stats.totalBytes;
}
if (stats.getTotalMicros() != null) {
this.addExecutionTime(stats.startTime, stats.endTime);
}
}
/**
* Total number of compute time in microseconds used for the node.
* Sum of all children if it is a Group node. Null if it is unknown.
* This method can not be scaffolded under a getter attribute because
* ECMAScript 5 does not support getter attributes.
*/
getTotalMicros(): number {
if (this.startTime == null || this.endTime == null) {
return null;
}
return this.endTime - this.startTime;
}
}
export class MetanodeImpl implements Metanode {
name: string;
stats: NodeStats;
type: NodeType;
depth: number;
isGroupNode: boolean;
cardinality: number;
metagraph: graphlib.Graph<GroupNode|OpNode, Metaedge>;
bridgegraph: graphlib.Graph<GroupNode|OpNode, Metaedge>;
templateId: string;
opHistogram: {[op: string]: number};
deviceHistogram: {[op: string]: number};
xlaClusterHistogram: {[op: string]: number};
compatibilityHistogram: {compatible: number, incompatible: number};
parentNode: Node;
hasNonControlEdges: boolean;
include: InclusionType;
nodeAttributes: {[key: string]: any;};
associatedFunction: string;
/** A label object for meta-nodes in the graph hierarchy */
constructor(name: string, opt = {}) {
this.name = name;
this.type = NodeType.META;
/** number of levels under this group */
this.depth = 1;
this.isGroupNode = true;
/** # of leaf nodes (including embedded ones) */
this.cardinality = 0;
/** graph contains metanodes, nodes, edges
* and metaedges for main items within this metanode
*/
this.metagraph =
createGraph<GroupNode|OpNode, Metaedge>(name, GraphType.META, opt);
/** bridgegraph must be constructed lazily-see hierarchy.getBridgegraph() */
this.bridgegraph = null;
/**
* A dictionary that count ops type of nodes in this metanode
* (op type => count).
*/
this.opHistogram = {};
this.deviceHistogram = {};
this.xlaClusterHistogram = {};
this.compatibilityHistogram = {compatible: 0, incompatible: 0};
/** unique id for a metanode of similar subgraph */
this.templateId = null;
/** Metanode which contains this node, if any */
this.parentNode = null;
this.hasNonControlEdges = false;
this.include = InclusionType.UNSPECIFIED;
this.associatedFunction = '';
}
getFirstChild(): GroupNode|OpNode {
return this.metagraph.node(this.metagraph.nodes()[0]);
}
/**
* Returns the op node associated with the metanode.
* For example, if the metanode is 'sgd', the associated
* op node is sgd/(sgd).
*/
getRootOp(): OpNode {
let nameSplit = this.name.split('/');
let rootOpName = this.name + '/(' + nameSplit[nameSplit.length - 1] + ')';
return <OpNode>this.metagraph.node(rootOpName);
}
/**
* Return an array of the names of all the leaves (non-GroupNodes) inside
* this metanode. This performs a breadth-first search of the tree, so
* immediate child leaves will appear earlier in the output array than
* descendant leaves.
*/
leaves(): string[] {
let leaves = [];
let queue = [<Node> this];
let metagraph; // Defined here due to a limitation of ES6->5 compilation.
while (queue.length) {
let node = queue.shift();
if (node.isGroupNode) {
metagraph = (<GroupNode> node).metagraph;
_.each(metagraph.nodes(), name => queue.push(metagraph.node(name)));
} else {
leaves.push(node.name);
}
}
return leaves;
}
};
export interface Metaedge extends graphlib.EdgeObject {
/**
* Stores the original BaseEdges represented by this Metaedge.
*/
baseEdgeList: BaseEdge[];
/**
* Whether this edge represents a relationship that is inbound (or outbound)
* to the object which contains this information. For example, in a Metanode's
* bridgegraph, each edge connects an immediate child to something outside
* the Metanode. If the destination of the edge is inside the Metanode, then
* its inbound property should be true. If the destination is outside the
* Metanode, then its inbound property should be false.
*
* The property is optional because not all edges can be described as
* inbound/outbound. For example, in a Metanode's metagraph, all of the edges
* connect immediate children of the Metanode. None should have an inbound
* property, or they should be null/undefined.
*/
inbound?: boolean;
/**
* Number of regular edges (not control dependency edges).
*/
numRegularEdges: number;
/**
* Number of control dependency edges.
*/
numControlEdges: number;
/**
* Number of reference edges, which is an edge to an operation
* that takes a reference to its input and changes its value.
*/
numRefEdges: number;
/**
* Total size (number of units) of all the tensors flowing through this edge.
*/
totalSize: number;
addBaseEdge(edge: BaseEdge, h: hierarchy.Hierarchy): void;
}
export function createMetaedge(v: string, w: string): Metaedge {
return new MetaedgeImpl(v, w);
}
/**
* A label object for edges between metanodes of subgraphs in the render graph.
*/
export class MetaedgeImpl implements Metaedge {
v: string;
w: string;
baseEdgeList: BaseEdge[];
inbound: boolean;
numRegularEdges: number;
numControlEdges: number;
numRefEdges: number;
totalSize: number;
constructor(v: string, w: string) {
this.v = v;
this.w = w;
this.baseEdgeList = [];
this.inbound = null;
this.numRegularEdges = 0;
this.numControlEdges = 0;
this.numRefEdges = 0;
this.totalSize = 0;
}
addBaseEdge(edge: BaseEdge, h: hierarchy.Hierarchy): void {
this.baseEdgeList.push(edge);
if (edge.isControlDependency) {
this.numControlEdges += 1;
} else {
this.numRegularEdges += 1;
}
if (edge.isReferenceEdge) {
this.numRefEdges += 1;
}
// Compute the size of the tensor flowing through this
// base edge.
this.totalSize += MetaedgeImpl.computeSizeOfEdge(edge, h);
h.maxMetaEdgeSize = Math.max(h.maxMetaEdgeSize, this.totalSize);
}
private static computeSizeOfEdge(edge: BaseEdge, h: hierarchy.Hierarchy):
number {
let opNode = <OpNode> h.node(edge.v);
if (!opNode.outputShapes) {
// No shape information. Asssume a single number. This gives
// a lower bound for the total size.
return 1;
}
h.hasShapeInfo = true;
// Sum the sizes of all output tensors.
return _(opNode.outputShapes).mapValues((shape: number[]) => {
// If the shape is unknown, treat it as 1 when computing
// total size. This gives a lower bound for the total size.
if (shape == null) {
return 1;
}
// Multiply all shapes to get the total size of the tensor.
// E.g. The total size of [4, 2, 1] is 4 * 2 * 1.
return _(shape).reduce((accumulated, currSize) => {
// If this particular dimension is unknown, treat
// it as 1 when computing total size. This gives a lower bound
// for the total size.
if (currSize === -1) {
currSize = 1;
}
return accumulated * currSize;
}, 1);
}).sum();
}
}
export function createSeriesNode(
prefix: string,
suffix: string,
parent: string,
clusterId: number,
name: string,
graphOptions: graphlib.GraphOptions): SeriesNode {
return new SeriesNodeImpl(
prefix, suffix, parent, clusterId, name, graphOptions);
}
export function getSeriesNodeName(prefix: string, suffix: string,
parent: string, startId?: number, endId?: number): string {
let numRepresentation =
(typeof startId !== 'undefined' && typeof endId !== 'undefined') ?
'[' + startId + '-' + endId + ']' :
'#';
let pattern = prefix + numRepresentation + suffix;
return (parent ? parent + '/' : '') + pattern;
}
class SeriesNodeImpl implements SeriesNode {
name: string;
type: NodeType;
stats: NodeStats;
hasLoop: boolean;
prefix: string;
suffix: string;
clusterId: number;
ids: number[];
parent: string;
isGroupNode: boolean;
cardinality: number;
metagraph: graphlib.Graph<GroupNode|OpNode, Metaedge>;
bridgegraph: graphlib.Graph<GroupNode|OpNode, Metaedge>;
parentNode: Node;
deviceHistogram: {[op: string]: number};
xlaClusterHistogram: {[op: string]: number};
compatibilityHistogram: {compatible: number, incompatible: number};
hasNonControlEdges: boolean;
include: InclusionType;
nodeAttributes: {[key: string]: any;};
constructor(
prefix: string,
suffix: string,
parent: string,
clusterId: number,
name: string,
graphOptions: graphlib.GraphOptions) {
this.name = name || getSeriesNodeName(prefix, suffix, parent);
this.type = NodeType.SERIES;
this.hasLoop = false;
this.prefix = prefix;
this.suffix = suffix;
this.clusterId = clusterId;
this.ids = [];
this.parent = parent;
this.isGroupNode = true;
this.cardinality = 0;
this.metagraph = createGraph<Metanode, Metaedge>(
name, GraphType.SERIES, graphOptions);
// bridgegraph must be constructed lazily-see hierarchy.getBridgegraph()
this.bridgegraph = null;
this.parentNode = null;
this.deviceHistogram = {};
this.xlaClusterHistogram = {};
this.compatibilityHistogram = {compatible: 0, incompatible: 0};
this.hasNonControlEdges = false;
this.include = InclusionType.UNSPECIFIED;
}
}
/**
* Extracts the shapes of the output tensors from the attr property in the
* node proto.
*/
// tslint:disable-next-line:no-any
function extractOutputShapes(attr: Array<{key: string, value: any}>):
{[key: string]: TensorShape;} {
let result = null;
// We don't know anything about the output tensors.
if (!attr) {
return null;
}
for (let i = 0; i < attr.length; i++) {
let {key, value} = attr[i];
if (key === OUTPUT_SHAPES_KEY) {
if (!value.list.shape) {
// The OUTPUT_SHAPES_KEY lacks a value. We know nothing about the shape.
return null;
}
// Map all output tensors into array of numbers denoting their shape.
let result = value.list.shape.map(shape => {
if (shape.unknown_rank) {
// This output tensor is of unknown rank. We don't know if it is a
// scalar, or a tensor, or of what shape it is.
return null;
}
if (shape.dim == null ||
(shape.dim.length === 1 && shape.dim[0].size == null)) {
// This output tensor is a scalar.
return [];
}
// This output tensor has a known rank. Map each dimension size
// into a number.
return shape.dim.map(dim => {
// Size can be -1 if this particular dimension is unknown.
return dim.size;
});
});
// Since we already processed it, remove the entry from the attribute
// list (saves memory).
attr.splice(i, 1);
return result;
}
}
// We didn't find OUTPUT_SHAPES_KEY in attributes, so we don't know anything
// about the output tensors.
return null;
}
/**
* Extracts the XLA Cluster that an op runs on from the attrs of the OpNode.
* @param attr The attr property.
* @return A string that is the name of the cluster. Or null if it could not be
* determined.
*/
// tslint:disable-next-line:no-any
function extractXlaCluster(attr: Array<{key: string, value: any}>): string|
null {
if (!attr) {
return null;
}
| // Find the attribute for XLA cluster if there is one.
for (let i = 0; i < attr.length; i++) {
if (attr[i].key === _XLA_CLUSTER_KEY) {
return attr[i].value['s'] || null;
}
}
return null;
}
/**
* Normalizes the inputs and extracts associated metadata:
* 1) Inputs can contain a colon followed by a suffix of characters.
* That suffix may be a single number (e.g. inputName:1) or several word
* characters separated from a number by a colon (e.g. inputName:foo:1). The
* latter case is used to denote inputs and outputs of functions.
* 2) Control dependency inputs contain caret at the beginning and we
* remove this and annotate the edge as a control dependency.
* @param inputs Array of unnormalized names of input nodes.
*/
function normalizeInputs(inputs: string[]): NormalizedInput[] {
let normalizedInputs: NormalizedInput[] = [];
_.each(inputs, inputName => {
let isControlDependency = inputName[0] === '^';
if (isControlDependency) {
// The carat merely indicates whether this input is a control dependency.
// It should not be part of the name.
inputName = inputName.substring(1);
}
let name = inputName;
let outputTensorKey = '0';
let match = inputName.match(/(.*):(\w+:\d+)$/);
if (match) {
// The output string consists of several characters and a number separated
// by a colon.
name = match[1];
outputTensorKey = match[2];
} else {
match = inputName.match(/(.*):(\d+)$/);
if (match) {
// The output string consists of a single number.
name = match[1];
outputTensorKey = match[2];
}
}
if (normalizedInputs.length === 0 ||
name !== normalizedInputs[normalizedInputs.length - 1].name) {
normalizedInputs.push({
name: name,
outputTensorKey: outputTensorKey,
isControlDependency: isControlDependency,
});
}
});
return normalizedInputs;
}
function addEdgeToGraph(
graph: SlimGraph, inputName: string, outputNode: OpNode,
input: NormalizedInput, params: BuildParams, index: number) {
// Don't allow loops in the graph.
if (inputName === outputNode.name) {
return;
}
// Check if this op type and input number corresponds to a
// reference edge using the refEdges dictionary in the params.
let isRefEdge = params.refEdges[outputNode.op + ' ' + index] === true;
graph.edges.push({
v: inputName,
w: outputNode.name,
outputTensorKey: input.outputTensorKey,
isControlDependency: input.isControlDependency,
isReferenceEdge: isRefEdge
});
}
export function build(
graphDef: tf.graph.proto.GraphDef, params: BuildParams,
tracker: ProgressTracker): Promise<SlimGraph|void> {
/**
* A dictionary that maps each in-embedding node name to the node
* object.
*/
let inEmbedding: {[nodeName: string]: OpNode} = {};
/**
* A dictionary that maps each out-embedding node name to the node
* object.
*/
let outEmbedding: {[nodeName: string]: OpNode} = {};
/**
* A dictionary that maps each node name to an array of the node's
* out-embedding node label objects.
*/
let outEmbeddings: {[inputName: string]: OpNode[]} = {};
let isInEmbeddedPred = getEmbedPredicate(params.inEmbeddingTypes);
let isOutEmbeddedPred = getEmbedPredicate(params.outEmbeddingTypes);
let embeddingNodeNames: string[] = [];
let rawNodes = graphDef.node;
/**
* A list of all the non-embedding node names which appear in the processed
* list of raw nodes. Here we pre-allocate enough room for all the rawNodes,
* even though there will some number of embeddings. The excess array length
* is spliced off later.
*
* Experimentation shows that around 30% of the array will go unused, and
* even for very large networks that amounts to less than 10k spaces.
*/
let nodeNames = new Array<string>(rawNodes.length);
return tf.graph.util
.runAsyncTask(
'Normalizing names', 30,
() => {
let opNodes = new Array<OpNode>(rawNodes.length);
let index = 0;
const processRawNode = rawNode => {
let opNode = new OpNodeImpl(rawNode);
if (isInEmbeddedPred(opNode)) {
embeddingNodeNames.push(opNode.name);
inEmbedding[opNode.name] = opNode;
return opNode;
}
if (isOutEmbeddedPred(opNode)) {
embeddingNodeNames.push(opNode.name);
outEmbedding[opNode.name] = opNode;
_.each(opNode.inputs, input => {
let inputName = input.name;
outEmbeddings[inputName] = outEmbeddings[inputName] || [];
outEmbeddings[inputName].push(opNode);
});
return opNode;
}
// The node is not an embedding, so add it to the names and nodes
// lists.
opNodes[index] = opNode;
nodeNames[index] = opNode.name;
index++;
return opNode;
};
_.each(rawNodes, processRawNode);
const processFunction = (func: tf.graph.proto.FunctionDef) => {
// Give the function itself a node.
const functionNodeName =
FUNCTION_LIBRARY_NODE_PREFIX + func.signature.name;
// Create an op node for the function. Mark it as part of a
// function library.
processRawNode({
name: functionNodeName,
input: [],
device: '',
op: '',
attr: [],
});
// If the function has inputs, make nodes out of them.
if (func.signature.input_arg) {
// Makes an OpNode out of either an input_arg of a library
// function.
let currentInputIndex = 0;
const processInput = (arg) => {
const opNode = processRawNode({
name: functionNodeName + NAMESPACE_DELIM + arg.name,
input: [],
device: '',
op: 'input_arg',
attr: [{
key: 'T',
value: {
type: arg.type,
},
}],
});
opNode.functionInputIndex = currentInputIndex;
currentInputIndex++;
};
// Make nodes for input args of the function. Unfortunately, the
// pbtxt configuration language is not rich enough to
// differentiate between an array with 1 item vs 1 object
// property.
if (func.signature.input_arg['name']) {
// There is only 1 input arg.
processInput(func.signature.input_arg);
} else {
// There are several input args.
_.each(func.signature.input_arg, processInput);
}
}
// Make nodes for output args of the function. Track the names of
// output args within the keys of this object. Unlike the
// input_args, the output_args are already defined within the
// node_defs of the library function.
let currentOutputIndex = 0;
const outputArgNames = {};
// If the function has outputs, make nodes out of them.
if (func.signature.output_arg) {
const processOutput = arg => {
outputArgNames[
functionNodeName + NAMESPACE_DELIM + arg.name] =
currentOutputIndex;
currentOutputIndex++;
};
if (func.signature.output_arg['name']) {
// There is only 1 output arg.
processOutput(func.signature.output_arg);
} else {
// There are several output args.
_.each(func.signature.output_arg, processOutput);
}
}
_.each(func.node_def, rawNode => {
// Prefix with the name of the function so that the graph
// correctly computes the hierarchy (and makes metanodes).
rawNode.name = functionNodeName + '/' + rawNode.name;
if (typeof rawNode.input === 'string') {
rawNode.input = [rawNode.input];
}
const opNode = processRawNode(rawNode);
if (_.isNumber(outputArgNames[rawNode.name])) {
// Mark the node as one of the outputs of the function.
opNode.functionOutputIndex = outputArgNames[rawNode.name];
}
_.each(opNode.inputs, normalizedInput => {
normalizedInput.name =
functionNodeName + NAMESPACE_DELIM + normalizedInput.name;
});
});
};
if (graphDef.library && graphDef.library.function) {
// This graph contains functions.
_.each(graphDef.library.function, processFunction);
}
opNodes.splice(index);
nodeNames.splice(index);
return opNodes;
},
tracker)
.then((opNodes) => {
// Create the graph data structure from the graphlib library.
return tf.graph.util.runAsyncTask(
'Building the data structure', 70, () => {
let normalizedNameDict =
mapStrictHierarchy(nodeNames, embeddingNodeNames);
let graph = new SlimGraph;
// Add the nodes to the graph.
_.each(opNodes, opNode => {
let normalizedName =
normalizedNameDict[opNode.name] || opNode.name;
graph.nodes[normalizedName] = opNode;
// Check if the node has out-embeddings. If yes, add them to the
// node.
if (opNode.name in outEmbeddings) {
opNode.outEmbeddings = outEmbeddings[opNode.name];
// Normalize the names of the out-embeddings.
_.each(opNode.outEmbeddings, node => {
node.name = normalizedNameDict[node.name] || node.name;
});
}
// Update the name of the node.
opNode.name = normalizedName;
});
// Visit each node's inputs to add the edges to the graph. If the
// input
// is an in-embedding, then add it to the node's in-embeddings
// instead.
_.each(opNodes, opNode => {
_.each(opNode.inputs, (input, i) => {
let inputName = input.name;
if (inputName in inEmbedding) {
let inEmbedNode = inEmbedding[inputName];
opNode.inEmbeddings.push(inEmbedNode);
// Move the inputs of the in-embedding node into incoming
// edges of
// the main node. E.g. the control dependency of a constant
// node
// should be moved to the op node where the constant is
// embedded.
for (let embedInput of inEmbedNode.inputs) {
addEdgeToGraph(
graph, normalizedNameDict[embedInput.name] ||
embedInput.name,
opNode, embedInput, params, i);
}
} else if (inputName in outEmbedding) {
// Move the inputs of the out-embedding node into inputs of
// the main node where the out-embedding points to.
let outEmbedNode = outEmbedding[inputName];
for (let embedInput of outEmbedNode.inputs) {
addEdgeToGraph(
graph, normalizedNameDict[embedInput.name] ||
embedInput.name,
opNode, input, params, i);
}
} else {
addEdgeToGraph(
graph, normalizedNameDict[inputName] || inputName,
opNode, input, params, i);
}
});
});
// Normalize the names of in-embeddings.
_.each(inEmbedding, (node, name) => {
node.name = normalizedNameDict[node.name] || node.name;
});
return graph;
}, tracker);
});
};
/**
* Create a new graphlib.Graph() instance with default parameters
*/
export function createGraph<N, E>(
name: string, type,
opt?: graphlib.GraphOptions): graphlib.Graph<N, E> {
const graphOptions = opt || {};
let graph = new graphlib.Graph<N, E>(graphOptions);
graph.setGraph({
name: name,
rankdir: graphOptions.rankdir || 'BT', // BT,TB,LR,RL
type: type
});
return graph;
};
/**
* Create a predicate for checking whether a node should be embedded based on
* the specified types.
*/
function getEmbedPredicate(types: string[]) {
return function(node: OpNode) {
// check types
for (let i = 0; i < types.length; i++) {
let regExp = new RegExp(types[i]);
if (node.op.match(regExp)) { return true; }
}
return false;
};
};
/**
* Returns a strict node name (name => name/(name)) to avoid conflicts
* where the node name is also a namespace.
*/
export function getStrictName(name: string): string {
let parts = name.split(NAMESPACE_DELIM);
return name + NAMESPACE_DELIM + '(' + parts[parts.length - 1] + ')';
}
/**
* For each op node (embedding or non-embedding), rename it if there is a
* non-embedding node under its namespace. For example, assume node name 'A'.
* If there is a non-embedding node under its namespace (e.g. 'A/B'), 'A' will
* be renamed to 'A/(A)'. Then the namespace 'A' will contain 2 nodes: '(A)'
* and 'B'. If all the nodes under 'A' are embedding nodes (e.g. constant and
* summary), keep 'A' as an Op node and don't create a namespace.
*
* @param nodeNames An array of regular (non-embedding) node names.
* @param embeddingNodeNames An array of embedding node names.
* @return Dictionary object mapping names that need to be renamed to
* new names.
*/
function mapStrictHierarchy(nodeNames: string[],
embeddingNodeNames: string[]): {[oldName: string]: string} {
/** Dictionary that maps the old new to the new name */
let newNameDictionary: {[oldName: string]: string} = {};
/** Set used to store all namespaces. */
let namespaceSet: {[namespace: string]: boolean} = {};
// sort the nodes to make prefix check faster
nodeNames.sort();
// look for nodes with a prefix a,a/b -> a/(a),a/b
for (let i = 0; i < nodeNames.length - 1; ++i) {
let a = nodeNames[i];
// Get all the parent namespaces of the current node
// and add them in the namespace set.
_.each(getHierarchicalPath(a).slice(0, -1), ns => {
namespaceSet[ns] = true;
});
for (let j = i + 1; j < nodeNames.length; ++j) {
let b = nodeNames[j];
if (_.startsWith(b, a)) {
if (b.length > a.length && b.charAt(a.length) === NAMESPACE_DELIM) {
newNameDictionary[a] = getStrictName(a);
break;
}
} else {
break;
}
}
}
// Go through all the embedding node names and rename them in case they
// collide with namespaces.
_.each(embeddingNodeNames, embeddingName => {
if (embeddingName in namespaceSet) {
// Rename to follow strict hierarchy.
newNameDictionary[embeddingName] = getStrictName(embeddingName);
}
});
return newNameDictionary;
};
/**
* Returns a list of the degrees of each node in the graph.
*/
function degreeSequence(graph: graphlib.Graph<any, any>): number[] {
let degrees = graph.nodes().map(function(name) {
return graph.neighbors(name).length;
});
degrees.sort();
return degrees;
};
/**
* Returns if the degree sequence of the two graphs is the same.
*/
export function hasSimilarDegreeSequence(graph1: graphlib.Graph<any, any>,
graph2: graphlib.Graph<any, any>): boolean {
let dg1 = degreeSequence(graph1);
let dg2 = degreeSequence(graph2);
for (let i = 0; i < dg1.length; i++) {
if (dg1[i] !== dg2[i]) {
return false;
}
}
return true;
};
/**
* Returns the hierarchical path of the current node, based on the node's name.
* For example, if the name is 'a/b/c', the returned path is
* ['a', 'a/b', 'a/b/c'].
*/
export function getHierarchicalPath(name: string,
seriesNames?: { [name: string]: string }): string[] {
let path: string[] = [];
let i = name.indexOf(NAMESPACE_DELIM);
// Push all parent portions of the path.
while (i >= 0) {
path.push(name.substring(0, i));
i = name.indexOf(NAMESPACE_DELIM, i + 1);
}
// If the node's path is under a series, then add the series node name to the
// hierarchical path as the parent of the leaf.
if (seriesNames) {
let seriesName = seriesNames[name];
if (seriesName) {
path.push(seriesName);
}
}
// Push the leaf of the path.
path.push(name);
return path;
};
/**
* Returns the string for the node inclusion toggle button, dependant
* on the provided current InclusionType.
*/
export function getIncludeNodeButtonString(include: InclusionType) {
if (include === tf.graph.InclusionType.EXCLUDE) {
return 'Add to main graph';
} else {
return 'Remove from main graph';
}
};
/**
* Returns the string for the series node grouping toggle button, dependant
* on the provided current SeriesGroupingType.
*/
export function getGroupSeriesNodeButtonString(group: SeriesGroupingType) {
if (group === tf.graph.SeriesGroupingType.GROUP) {
return 'Ungroup this series of nodes';
} else {
return 'Group this series of nodes';
}
};
/**
* Toggle the node series grouping option in the provided map, setting it
* to ungroup if the series is not already in the map.
*/
export function toggleNodeSeriesGroup(
map: { [name: string]: tf.graph.SeriesGroupingType }, name: string) {
if (!(name in map) || map[name] === tf.graph.SeriesGroupingType.GROUP) {
map[name] = tf.graph.SeriesGroupingType.UNGROUP;
} else {
map[name] = tf.graph.SeriesGroupingType.GROUP;
}
};
} // close module tf.graph | |
EntryRouter.ts | import {CommandManager, EntryModel, EntryManager, ExpressCall, Log, MessageType, RequestResolver, RequestManager} from '../chook';
import {Server} from '.';
import {Request, Response, NextFunction, RequestHandler} from 'express';
import {red} from 'chalk';
export default class | {
server: Server;
entryManager: EntryManager;
requestManager: RequestManager;
commandManager: CommandManager;
constructor( server: Server, entryManager: EntryManager, commandManager: CommandManager ){
this.handler = this.handler.bind( this );
this.server = server;
this.entryManager = entryManager;
this.requestManager = new RequestManager();
this.commandManager = commandManager;
}
setup(): void{
this.entryManager
.getEntries()
.then( ( entries: EntryModel[] ) => {
this.setupRoutes( entries );
})
.catch( ( error: any ) => {
Log( 'error', red( error.message ), error );
})
}
setupRoutes( entries: EntryModel[] ): void{
let {express} = this.server;
entries.forEach( ( entry: EntryModel ) => {
let {id, method, uri} = entry.get();
switch( method ){
case "get":
express.get( uri, this.handler( entry ) );
break;
case "post":
express.post( uri, this.handler( entry ) );
break;
case "put":
express.put( uri, this.handler( entry ) );
break;
case "patch":
express.patch( uri, this.handler( entry ) );
break;
case "delete":
express.delete( uri, this.handler( entry ) );
break;
}
Log( 'debug', `Setup entry #${id} ${method.toUpperCase()} ${uri}.` );
});
}
handler( entry: EntryModel ): RequestHandler{
return ( request: Request, response: Response, next: NextFunction ) => {
this.answerCall( entry, { request, response, next });
};
}
getConsoleLogHandler(){
return ( message: string, type?: MessageType ) => {
switch( type ){
case 'log':
Log( 'info', message );
break;
case 'error':
Log( 'error', message );
break;
case 'command':
Log( 'notice', message );
break;
}
};
}
answerCall( entry: EntryModel, expressCall: ExpressCall ): void{
entry.loadTasks()
.then( ( entry: EntryModel ) => {
this.requestManager.create( entry, expressCall, this.commandManager.getLogHandler(), this.getConsoleLogHandler() );
})
.catch( ( error: any ) => {
Log( 'error', red( error.message ), error );
});
}
} | EntryRouter |
priceoracle.go | package priceoracle
import (
"compound/core"
"compound/core/proposal"
"compound/pkg/id"
"compound/pkg/mtg"
"compound/worker"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/fox-one/mixin-sdk-go"
"github.com/fox-one/pkg/logger"
"github.com/gofrs/uuid"
"github.com/shopspring/decimal"
)
//Worker price oracle worker
//
// request the price of the asset and put a price proposal on chain
type Worker struct {
worker.TickWorker
System *core.System
Dapp *core.Wallet
MarketStore core.IMarketStore
PriceStore core.IPriceStore
PriceOracleService core.IPriceOracleService
}
// New new block worker
func New(system *core.System, dapp *core.Wallet, marketStore core.IMarketStore, priceStr core.IPriceStore, priceSrv core.IPriceOracleService) *Worker {
job := Worker{
TickWorker: worker.TickWorker{
Delay: 1 * time.Second,
ErrDelay: 1 * time.Second,
},
System: system,
Dapp: dapp,
MarketStore: marketStore,
PriceStore: priceStr,
PriceOracleService: priceSrv,
}
return &job
}
// Run run worker
func (w *Worker) Run(ctx context.Context) error {
return w.StartTick(ctx, func(ctx context.Context) error {
return w.onWork(ctx)
})
}
func (w *Worker) onWork(ctx context.Context) error {
log := logger.FromContext(ctx).WithField("worker", "priceoracle")
// delete expired price
t := time.Now().AddDate(0, 0, -7)
w.PriceStore.DeleteByTime(ctx, t)
markets, err := w.MarketStore.All(ctx)
if err != nil {
log.Errorln("fetch all markets error:", err)
return err
}
if len(markets) <= 0 {
log.Infoln("no market found!!!")
return nil
}
wg := sync.WaitGroup{}
for _, m := range markets {
wg.Add(1)
go func(market *core.Market) {
defer wg.Done()
if !w.isPriceProvided(ctx, market) {
// pull price ticker from outside
ticker, e := w.PriceOracleService.PullPriceTicker(ctx, market.AssetID, time.Now())
if e != nil {
log.Errorln("pull price ticker error:", e)
return
}
if ticker.Price.LessThanOrEqual(decimal.Zero) {
log.Errorln("invalid ticker price:", ticker.Symbol, ":", ticker.Price)
return
}
w.pushPriceOnChain(ctx, market, ticker)
}
}(m)
}
wg.Wait()
return nil
}
func (w *Worker) isPriceProvided(ctx context.Context, market *core.Market) bool {
curBlockNum := core.CalculatePriceBlock(time.Now())
price, _, e := w.PriceStore.FindByAssetBlock(ctx, market.AssetID, curBlockNum)
if e != nil {
return false
}
var priceTickers []*core.PriceTicker
if e = json.Unmarshal(price.Content, &priceTickers); e != nil {
return false
}
for _, p := range priceTickers {
if p.Provider == w.System.ClientID {
return true
}
}
return false
}
func (w *Worker) pushPriceOnChain(ctx context.Context, market *core.Market, ticker *core.PriceTicker) error {
log := logger.FromContext(ctx).WithField("worker", "priceoracle")
| providePriceReq := proposal.ProvidePriceReq{
Symbol: market.Symbol,
Price: ticker.Price,
}
cID, _ := uuid.FromString(w.System.ClientID)
tID, _ := uuid.FromString(traceID)
memo, e := mtg.Encode(cID, tID, int(core.ActionTypeProposalProvidePrice), providePriceReq)
if e != nil {
log.WithError(e).Errorln("mtg.Encode priceoracle memo error")
return e
}
sign := mtg.Sign(memo, w.System.SignKey)
memo = mtg.Pack(memo, sign)
input := mixin.TransferInput{
AssetID: w.System.VoteAsset,
Amount: w.System.VoteAmount,
TraceID: traceID,
Memo: base64.StdEncoding.EncodeToString(memo),
}
input.OpponentMultisig.Receivers = w.System.MemberIDs()
input.OpponentMultisig.Threshold = w.System.Threshold
// multisig transfer
_, e = w.Dapp.Client.Transaction(ctx, &input, w.Dapp.Pin)
if e != nil {
log.WithError(e).Errorln("mtg:: Client.Transaction error")
return e
}
return nil
} | blockNum := core.CalculatePriceBlock(time.Now())
traceID := id.UUIDFromString(fmt.Sprintf("price-%s-%s-%d", w.System.ClientID, market.AssetID, blockNum))
// transfer |
signal.rs | // use super::varargs::VarArgs;
use wasmer_runtime_core::vm::Ctx;
#[allow(clippy::cast_ptr_alignment)]
pub fn _sigemptyset(ctx: &mut Ctx, set: u32) -> i32 {
debug!("emscripten::_sigemptyset");
let set_addr = emscripten_memory_pointer!(ctx.memory(0), set) as *mut u32;
unsafe {
*set_addr = 0;
}
0
}
pub fn _sigaction(_ctx: &mut Ctx, signum: u32, act: u32, oldact: u32) -> i32 {
debug!("emscripten::_sigaction {}, {}, {}", signum, act, oldact);
0
}
#[allow(clippy::cast_ptr_alignment)]
pub fn _sigaddset(ctx: &mut Ctx, set: u32, signum: u32) -> i32 {
debug!("emscripten::_sigaddset {}, {}", set, signum);
let set_addr = emscripten_memory_pointer!(ctx.memory(0), set) as *mut u32;
unsafe {
*set_addr |= 1 << (signum - 1);
}
0
}
pub fn _sigsuspend(_ctx: &mut Ctx, _one: i32) -> i32 {
debug!("emscripten::_sigsuspend");
-1
}
pub fn _sigprocmask(_ctx: &mut Ctx, _one: i32, _two: i32, _three: i32) -> i32 |
pub fn _signal(_ctx: &mut Ctx, sig: u32, _two: i32) -> i32 {
debug!("emscripten::_signal ({})", sig);
0
}
| {
debug!("emscripten::_sigprocmask");
0
} |
g1.rs | use crate::{
biginteger::{BigInteger256, BigInteger384},
curves::{
bls12::{G1Affine as Bls12G1Affine, G1Prepared, G1Projective as Bls12G1Projective},
bls12_381::{g2::G2Affine, Bls12_381, Bls12_381Parameters},
models::{ModelParameters, SWModelParameters},
PairingCurve, PairingEngine,
},
field_new,
fields::{
bls12_381::{Fq, Fq12, Fr},
Field,
},
};
pub type G1Affine = Bls12G1Affine<Bls12_381Parameters>;
pub type G1Projective = Bls12G1Projective<Bls12_381Parameters>;
impl PairingCurve for G1Affine {
type Engine = Bls12_381;
type Prepared = G1Prepared<Bls12_381Parameters>;
type PairWith = G2Affine;
type PairingResult = Fq12;
fn prepare(&self) -> Self::Prepared {
Self::Prepared::from_affine(*self)
}
fn | (&self, other: &Self::PairWith) -> Self::PairingResult {
Bls12_381::pairing(*self, *other)
}
}
#[derive(Copy, Clone, Default, PartialEq, Eq)]
pub struct Bls12_381G1Parameters;
impl ModelParameters for Bls12_381G1Parameters {
type BaseField = Fq;
type ScalarField = Fr;
}
impl SWModelParameters for Bls12_381G1Parameters {
/// COEFF_A = 0
const COEFF_A: Fq = field_new!(Fq, BigInteger384([0x0, 0x0, 0x0, 0x0, 0x0, 0x0]));
/// COEFF_B = 4
const COEFF_B: Fq = field_new!(
Fq,
BigInteger384([
0xaa270000000cfff3,
0x53cc0032fc34000a,
0x478fe97a6b0a807f,
0xb1d37ebee6ba24d7,
0x8ec9733bbf78ab2f,
0x9d645513d83de7e,
])
);
/// COFACTOR = (x - 1)^2 / 3 = 76329603384216526031706109802092473003
const COFACTOR: &'static [u64] = &[0x8c00aaab0000aaab, 0x396c8c005555e156];
/// COFACTOR_INV = COFACTOR^{-1} mod r
/// = 52435875175126190458656871551744051925719901746859129887267498875565241663483
const COFACTOR_INV: Fr = field_new!(
Fr,
BigInteger256([
288839107172787499,
1152722415086798946,
2612889808468387987,
5124657601728438008,
])
);
/// AFFINE_GENERATOR_COEFFS = (G1_GENERATOR_X, G1_GENERATOR_Y)
const AFFINE_GENERATOR_COEFFS: (Self::BaseField, Self::BaseField) =
(G1_GENERATOR_X, G1_GENERATOR_Y);
#[inline(always)]
fn mul_by_a(_: &Self::BaseField) -> Self::BaseField {
Self::BaseField::zero()
}
}
/// G1_GENERATOR_X =
/// 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507
pub const G1_GENERATOR_X: Fq = field_new!(
Fq,
BigInteger384([
0x5cb38790fd530c16,
0x7817fc679976fff5,
0x154f95c7143ba1c1,
0xf0ae6acdf3d0e747,
0xedce6ecc21dbf440,
0x120177419e0bfb75,
])
);
/// G1_GENERATOR_Y =
/// 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569
pub const G1_GENERATOR_Y: Fq = field_new!(
Fq,
BigInteger384([
0xbaac93d50ce72271,
0x8c22631a7918fd8e,
0xdd595f13570725ce,
0x51ac582950405194,
0xe1c8c3fad0059c0,
0xbbc3efc5008a26a,
])
);
| pairing_with |
verifier.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package verifiable
import (
"errors"
"fmt"
"github.com/Universal-Health-Chain/aries-framework-go/pkg/crypto"
"github.com/Universal-Health-Chain/aries-framework-go/pkg/doc/jose"
sigverifier "github.com/Universal-Health-Chain/aries-framework-go/pkg/doc/signature/verifier"
"github.com/Universal-Health-Chain/aries-framework-go/pkg/kms"
"github.com/Universal-Health-Chain/aries-framework-go/pkg/kms/localkms"
)
// localCryptoVerifier defines a verifier which is based on Local KMS and Crypto
// which uses keyset.Handle as input for verification.
type localCryptoVerifier struct {
crypto.Crypto
localKMS *localkms.LocalKMS
}
func newLocalCryptoVerifier(cr crypto.Crypto, localKMS *localkms.LocalKMS) *localCryptoVerifier {
return &localCryptoVerifier{
Crypto: cr,
localKMS: localKMS,
}
}
func (t *localCryptoVerifier) Verify(sig, msg []byte, kh interface{}) error {
pubKey, ok := kh.(*sigverifier.PublicKey)
if !ok {
return errors.New("bad key handle format")
}
kmsKeyType, err := mapPublicKeyToKMSKeyType(pubKey)
if err != nil {
return err
}
handle, err := t.localKMS.PubKeyBytesToHandle(pubKey.Value, kmsKeyType)
if err != nil |
return t.Crypto.Verify(sig, msg, handle)
}
func mapPublicKeyToKMSKeyType(pubKey *sigverifier.PublicKey) (kms.KeyType, error) {
switch pubKey.Type {
case "Ed25519VerificationKey2018":
return kms.ED25519Type, nil
case "JwsVerificationKey2020":
return mapJWKToKMSKeyType(pubKey.JWK)
default:
return "", fmt.Errorf("unsupported key type: %s", pubKey.Type)
}
}
func mapJWKToKMSKeyType(jwk *jose.JWK) (kms.KeyType, error) {
switch jwk.Kty {
case "OKP":
return kms.ED25519Type, nil
case "EC":
switch jwk.Crv {
case "P-256":
return kms.ECDSAP256TypeIEEEP1363, nil
case "P-384":
return kms.ECDSAP384TypeIEEEP1363, nil
case "P-521":
return kms.ECDSAP521TypeIEEEP1363, nil
}
}
return "", fmt.Errorf("unsupported JWK: %v", jwk)
}
| {
return err
} |
torchFunc.py | from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
|
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args) | w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.