file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
policy_integration_test.rs
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. use { anyhow::{Context, Error}, fidl_fidl_examples_echo as fidl_echo, fuchsia_async::{self as fasync}, fuchsia_component::client::{launch, launcher}, lazy_static::lazy_static, }; macro_rules! policy_url { ($cmx:expr) => { format!("{}{}", "fuchsia-pkg://fuchsia.com/policy-integration-tests#meta/", $cmx) }; } lazy_static! { static ref NONE_ACCEPTED_URL: String = policy_url!("none.cmx"); static ref PACKAGE_CACHE_DENIED_URL: String = policy_url!("package_cache_denied.cmx"); static ref PACKAGE_CACHE_ALLOWED_URL: String = policy_url!("package_cache_allowed.cmx"); static ref PACKAGE_RESOLVER_DENIED_URL: String = policy_url!("package_resolver_denied.cmx"); static ref PACKAGE_RESOLVER_ALLOWED_URL: String = policy_url!("package_resolver_allowed.cmx"); static ref ROOT_JOB_DENIED_URL: String = policy_url!("root_job_denied.cmx"); static ref ROOT_JOB_ALLOWED_URL: String = policy_url!("root_job_allowed.cmx"); static ref CPU_RESOURCE_DENIED_URL: String = policy_url!("cpu_resource_denied.cmx"); static ref CPU_RESOURCE_ALLOWED_URL: String = policy_url!("cpu_resource_allowed.cmx"); static ref DEBUG_RESOURCE_DENIED_URL: String = policy_url!("debug_resource_denied.cmx"); static ref DEBUG_RESOURCE_ALLOWED_URL: String = policy_url!("debug_resource_allowed.cmx"); static ref HYPERVISOR_RESOURCE_DENIED_URL: String = policy_url!("hypervisor_resource_denied.cmx"); static ref HYPERVISOR_RESOURCE_ALLOWED_URL: String = policy_url!("hypervisor_resource_allowed.cmx"); static ref MMIO_RESOURCE_DENIED_URL: String = policy_url!("mmio_resource_denied.cmx"); static ref MMIO_RESOURCE_ALLOWED_URL: String = policy_url!("mmio_resource_allowed.cmx"); static ref INFO_RESOURCE_DENIED_URL: String = policy_url!("info_resource_denied.cmx"); static ref INFO_RESOURCE_ALLOWED_URL: String = policy_url!("info_resource_allowed.cmx"); static ref IRQ_RESOURCE_DENIED_URL: String = policy_url!("irq_resource_denied.cmx"); static ref IRQ_RESOURCE_ALLOWED_URL: String = policy_url!("irq_resource_allowed.cmx"); static ref IOPORT_RESOURCE_DENIED_URL: String = policy_url!("ioport_resource_denied.cmx"); static ref IOPORT_RESOURCE_ALLOWED_URL: String = policy_url!("ioport_resource_allowed.cmx"); static ref POWER_RESOURCE_DENIED_URL: String = policy_url!("power_resource_denied.cmx"); static ref POWER_RESOURCE_ALLOWED_URL: String = policy_url!("power_resource_allowed.cmx"); static ref SMC_RESOURCE_DENIED_URL: String = policy_url!("smc_resource_denied.cmx"); static ref SMC_RESOURCE_ALLOWED_URL: String = policy_url!("smc_resource_allowed.cmx"); static ref ROOT_RESOURCE_DENIED_URL: String = policy_url!("root_resource_denied.cmx"); static ref ROOT_RESOURCE_ALLOWED_URL: String = policy_url!("root_resource_allowed.cmx"); static ref VMEX_RESOURCE_DENIED_URL: String = policy_url!("vmex_resource_denied.cmx"); static ref VMEX_RESOURCE_ALLOWED_URL: String = policy_url!("vmex_resource_allowed.cmx"); static ref PKGFS_VERSIONS_DENIED_URL: String = policy_url!("pkgfs_versions_denied.cmx"); static ref PKGFS_VERSIONS_ALLOWED_URL: String = policy_url!("pkgfs_versions_allowed.cmx"); static ref DEPRECATED_SHELL_DENIED_URL: String = policy_url!("deprecated_shell_denied.cmx"); static ref DEPRECATED_SHELL_ALLOWED_URL: String = policy_url!("deprecated_shell_allowed.cmx"); static ref DEPRECATED_EXEC_DENIED_URL: String = policy_url!("deprecated_ambient_replace_as_exec_denied.cmx"); static ref DEPRECATED_EXEC_ALLOWED_URL: String = policy_url!("deprecated_ambient_replace_as_exec_allowed.cmx"); } async fn launch_component(component_url: &str) -> Result<String, Error> { let launcher = launcher().context("failed to open the launcher")?; let app = launch(&launcher, component_url.to_string(), None).context("failed to launch service")?; let echo = app .connect_to_protocol::<fidl_echo::EchoMarker>() .context("Failed to connect to echo service")?; let result = echo.echo_string(Some("policy")).await?; Ok(result.unwrap()) } async fn assert_launch_allowed(component_url: &str) { assert!(launch_component(component_url).await.unwrap() == "policy") } async fn assert_launch_denied(component_url: &str) { assert!(launch_component(component_url).await.is_err()) } #[fasync::run_singlethreaded(test)] async fn none_allowed() -> Result<(), Error> { assert_launch_allowed(&NONE_ACCEPTED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn package_cache_allowed() -> Result<(), Error> { assert_launch_allowed(&PACKAGE_CACHE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn package_cache_denied() -> Result<(), Error> { assert_launch_denied(&PACKAGE_CACHE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn package_resolver_allowed() -> Result<(), Error> { assert_launch_allowed(&PACKAGE_RESOLVER_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn package_resolver_denied() -> Result<(), Error> { assert_launch_denied(&PACKAGE_RESOLVER_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn root_job_allowed() -> Result<(), Error> { assert_launch_allowed(&ROOT_JOB_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn root_job_denied() -> Result<(), Error> { assert_launch_denied(&ROOT_JOB_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn cpu_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&CPU_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn cpu_resource_denied() -> Result<(), Error> { assert_launch_denied(&CPU_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn debug_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&DEBUG_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn debug_resource_denied() -> Result<(), Error> { assert_launch_denied(&DEBUG_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn hypervisor_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&HYPERVISOR_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn hypervisor_resource_denied() -> Result<(), Error> { assert_launch_denied(&HYPERVISOR_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn mmio_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&MMIO_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn
() -> Result<(), Error> { assert_launch_denied(&MMIO_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn info_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&INFO_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn info_resource_denied() -> Result<(), Error> { assert_launch_denied(&INFO_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn irq_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&IRQ_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn irq_resource_denied() -> Result<(), Error> { assert_launch_denied(&IRQ_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn ioport_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&IOPORT_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn ioport_resource_denied() -> Result<(), Error> { assert_launch_denied(&IOPORT_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn power_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&POWER_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn power_resource_denied() -> Result<(), Error> { assert_launch_denied(&POWER_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn smc_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&SMC_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn smc_resource_denied() -> Result<(), Error> { assert_launch_denied(&SMC_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn root_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&ROOT_RESOURCE_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn root_resource_denied() -> Result<(), Error> { assert_launch_denied(&ROOT_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn vmex_resource_allowed() -> Result<(), Error> { assert_launch_allowed(&VMEX_RESOURCE_ALLOWED_URL).await; Ok(()) } // The `vmex_resource_denied` test here is disabled since eng builds have a permissive * allowlist // for `VmexResource` to support out-of-tree tests that need to be able to JIT without having to // enumerate every single test component in-tree that e.g. Chromium wants to run from out-of-tree. // Unfortunately, this means that it's impossible to make a component that will be denied // `VmexResource` on eng builds, which means we can't test this behavior using the approach // taken in this testsuite, so we simply disable this test for the time being. // TODO(https://fxbug.dev/78074): enable this test or something exercising equivalent coverage #[ignore] #[fasync::run_singlethreaded(test)] async fn vmex_resource_denied() -> Result<(), Error> { assert_launch_denied(&VMEX_RESOURCE_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn pkgfs_versions_allowed() -> Result<(), Error> { assert_launch_allowed(&PKGFS_VERSIONS_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn pkgfs_versions_denied() -> Result<(), Error> { assert_launch_denied(&PKGFS_VERSIONS_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn deprecated_shell_allowed() -> Result<(), Error> { assert_launch_allowed(&DEPRECATED_SHELL_ALLOWED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn deprecated_shell_denied() -> Result<(), Error> { assert_launch_denied(&DEPRECATED_SHELL_DENIED_URL).await; Ok(()) } #[fasync::run_singlethreaded(test)] async fn deprecated_exec_allowed() -> Result<(), Error> { assert_launch_allowed(&DEPRECATED_EXEC_ALLOWED_URL).await; Ok(()) } // Disabled because we can't reasonably test this on eng builds, because we need to be permissive // to allow tests that use JITs to run. See similar discussion around `vmex_resource_denied`. // TODO(https://fxbug.dev/78074): enable this test or something exercising equivalent coverage #[ignore] #[fasync::run_singlethreaded(test)] async fn deprecated_exec_denied() -> Result<(), Error> { assert_launch_denied(&DEPRECATED_EXEC_DENIED_URL).await; Ok(()) }
mmio_resource_denied
simulators.py
from warnings import warn from bluesky.utils import maybe_await from bluesky.preprocessors import print_summary_wrapper from bluesky.run_engine import call_in_bluesky_event_loop, in_bluesky_event_loop from .protocols import Checkable def plot_raster_path(plan, x_motor, y_motor, ax=None, probe_size=None, lw=2): """Plot the raster path for this plan Parameters ---------- plan : iterable Must yield `Msg` objects and not be a co-routine x_motor, y_motor : str Names of the x and y motors ax : matplotlib.axes.Axes The axes to plot to, if none, make new figure + axes probe_size : float, optional
If not None, use as radius of probe (in same units as motor positions) lw : float, optional Width of lines drawn between points """ import matplotlib.pyplot as plt from matplotlib import collections as mcollections from matplotlib import patches as mpatches if ax is None: ax = plt.subplots()[1] ax.set_aspect('equal') cur_x = cur_y = None traj = [] for msg in plan: cmd = msg.command if cmd == 'set': if msg.obj.name == x_motor: cur_x = msg.args[0] if msg.obj.name == y_motor: cur_y = msg.args[0] elif cmd == 'save': traj.append((cur_x, cur_y)) x, y = zip(*traj) path, = ax.plot(x, y, marker='', linestyle='-', lw=lw) ax.set_xlabel(x_motor) ax.set_ylabel(y_motor) if probe_size is None: read_points = ax.scatter(x, y, marker='o', lw=lw) else: circles = [mpatches.Circle((_x, _y), probe_size, facecolor='black', alpha=0.5) for _x, _y in traj] read_points = mcollections.PatchCollection(circles, match_original=True) ax.add_collection(read_points) return {'path': path, 'events': read_points} def summarize_plan(plan): """Print summary of plan Prints a minimal version of the plan, showing only moves and where events are created. Parameters ---------- plan : iterable Must yield `Msg` objects """ for msg in print_summary_wrapper(plan): ... print_summary = summarize_plan # back-compat def check_limits(plan): """Run check_limits_async in the RE""" if in_bluesky_event_loop(): raise RuntimeError("Can't call check_limits() from within RE, use await check_limits_async() instead") call_in_bluesky_event_loop(check_limits_async(plan)) async def check_limits_async(plan): """ Check that a plan will not move devices outside of their limits. Parameters ---------- plan : iterable Must yield `Msg` objects """ ignore = [] for msg in plan: obj = msg.obj if msg.command == 'set' and obj not in ignore: if isinstance(obj, Checkable): await maybe_await(obj.check_value(msg.args[0])) else: warn(f"{obj.name} has no check_value() method" f" to check if {msg.args[0]} is within its limits.") ignore.append(obj)
delete_domains_request.py
# coding: utf-8 import re import six from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization class DeleteDomainsRequest: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'domain_name': 'str' } attribute_map = { 'domain_name': 'domain_name' } def __init__(self, domain_name=None): """DeleteDomainsRequest - a model defined in huaweicloud sdk""" self._domain_name = None self.discriminator = None self.domain_name = domain_name @property def domain_name(self): """Gets the domain_name of this DeleteDomainsRequest. 域名 :return: The domain_name of this DeleteDomainsRequest. :rtype: str """ return self._domain_name @domain_name.setter def domain_name(self, domain_name): """Sets the domain_name of this DeleteDomainsRequest. 域名 :param domain_name: The domain_name of this DeleteDomainsRequest. :type: str """ self._domain_name = domain_name def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False) def __repr__
"""For `print`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DeleteDomainsRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
(self):
_paged_models.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.paging import Paged class
(Paged): """ A paging container for iterating over a list of :class:`PolicyAssignment <azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment>` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, 'current_page': {'key': 'value', 'type': '[PolicyAssignment]'} } def __init__(self, *args, **kwargs): super(PolicyAssignmentPaged, self).__init__(*args, **kwargs) class PolicyDefinitionPaged(Paged): """ A paging container for iterating over a list of :class:`PolicyDefinition <azure.mgmt.resource.policy.v2018_05_01.models.PolicyDefinition>` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, 'current_page': {'key': 'value', 'type': '[PolicyDefinition]'} } def __init__(self, *args, **kwargs): super(PolicyDefinitionPaged, self).__init__(*args, **kwargs) class PolicySetDefinitionPaged(Paged): """ A paging container for iterating over a list of :class:`PolicySetDefinition <azure.mgmt.resource.policy.v2018_05_01.models.PolicySetDefinition>` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, 'current_page': {'key': 'value', 'type': '[PolicySetDefinition]'} } def __init__(self, *args, **kwargs): super(PolicySetDefinitionPaged, self).__init__(*args, **kwargs)
PolicyAssignmentPaged
conditional.rs
// Copyright 2022 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::scalars::Function2Factory; use crate::scalars::IfFunction; #[derive(Clone)] pub struct
; impl ConditionalFunction { pub fn register(factory: &mut Function2Factory) { factory.register("if", IfFunction::desc()); } }
ConditionalFunction
forms.py
from django import forms from django.urls import reverse from django.utils.translation import pgettext_lazy, ugettext_lazy as _ from i18nfield.forms import I18nFormField, I18nTextarea, I18nTextInput from pretix.base.email import get_available_placeholders from pretix.base.forms import PlaceholderValidator from pretix.base.models import Item, Order, SubEvent from pretix.control.forms.widgets import Select2 class MailForm(forms.Form): recipients = forms.ChoiceField( label=_('Send email to'), widget=forms.RadioSelect, initial='orders', choices=[] ) sendto = forms.MultipleChoiceField() # overridden later subject = forms.CharField(label=_("Subject")) message = forms.CharField(label=_("Message")) items = forms.ModelMultipleChoiceField( widget=forms.CheckboxSelectMultiple( attrs={'class': 'scrolling-multiple-choice'} ), label=_('Only send to people who bought'), required=True, queryset=Item.objects.none() ) subevent = forms.ModelChoiceField( SubEvent.objects.none(), label=_('Only send to customers of'), required=False, empty_label=pgettext_lazy('subevent', 'All dates') ) def _set_field_placeholders(self, fn, base_parameters):
def __init__(self, *args, **kwargs): event = self.event = kwargs.pop('event') super().__init__(*args, **kwargs) recp_choices = [ ('orders', _('Everyone who created a ticket order')) ] if event.settings.attendee_emails_asked: recp_choices += [ ('attendees', _('Every attendee (falling back to the order contact when no attendee email address is ' 'given)')), ('both', _('Both (all order contact addresses and all attendee email addresses)')) ] self.fields['recipients'].choices = recp_choices self.fields['subject'] = I18nFormField( label=_('Subject'), widget=I18nTextInput, required=True, locales=event.settings.get('locales'), ) self.fields['message'] = I18nFormField( label=_('Message'), widget=I18nTextarea, required=True, locales=event.settings.get('locales'), ) self._set_field_placeholders('subject', ['event', 'order', 'position_or_address']) self._set_field_placeholders('message', ['event', 'order', 'position_or_address']) choices = list(Order.STATUS_CHOICE) if not event.settings.get('payment_term_expire_automatically', as_type=bool): choices.append( ('overdue', _('pending with payment overdue')) ) self.fields['sendto'] = forms.MultipleChoiceField( label=_("Send to customers with order status"), widget=forms.CheckboxSelectMultiple( attrs={'class': 'scrolling-multiple-choice'} ), choices=choices ) if not self.initial.get('sendto'): self.initial['sendto'] = ['p', 'n'] self.fields['items'].queryset = event.items.all() if not self.initial.get('items'): self.initial['items'] = event.items.all() if event.has_subevents: self.fields['subevent'].queryset = event.subevents.all() self.fields['subevent'].widget = Select2( attrs={ 'data-model-select2': 'event', 'data-select2-url': reverse('control:event.subevents.select2', kwargs={ 'event': event.slug, 'organizer': event.organizer.slug, }), 'data-placeholder': pgettext_lazy('subevent', 'Date') } ) self.fields['subevent'].widget.choices = self.fields['subevent'].choices else: del self.fields['subevent']
phs = [ '{%s}' % p for p in sorted(get_available_placeholders(self.event, base_parameters).keys()) ] ht = _('Available placeholders: {list}').format( list=', '.join(phs) ) if self.fields[fn].help_text: self.fields[fn].help_text += ' ' + str(ht) else: self.fields[fn].help_text = ht self.fields[fn].validators.append( PlaceholderValidator(phs) )
n_slack_ssvm.py
###################### # (c) 2012 Andreas Mueller <[email protected]> # License: BSD 3-clause # # Implements structured SVM as described in Tsochantaridis et. al. # Support Vector Machines Learning for Interdependent # and Structures Output Spaces from time import time import numpy as np import cvxopt import cvxopt.solvers from sklearn.externals.joblib import Parallel, delayed from sklearn.utils import gen_even_slices from .ssvm import BaseSSVM from ..utils import unwrap_pairwise, find_constraint class NSlackSSVM(BaseSSVM):
"""Structured SVM solver for the n-slack QP with l1 slack penalty. Implements margin rescaled structural SVM using the n-slack formulation and cutting plane method, solved using CVXOPT. The optimization is restarted in each iteration. Parameters ---------- model : StructuredModel Object containing the model structure. Has to implement `loss`, `inference` and `loss_augmented_inference`. max_iter : int Maximum number of passes over dataset to find constraints. C : float Regularization parameter check_constraints : bool (default=True) Whether to check if the new "most violated constraint" is more violated than previous constraints. Helpful for stopping and debugging, but costly. verbose : int (default=0) Verbosity. negativity_constraint: list of ints Indices of parmeters that are constraint to be negative. This is useful for learning submodular CRFs (inference is formulated as maximization in SSVMs, flipping some signs). break_on_bad: bool (default=False) Whether to break (start debug mode) when inference was approximate. n_jobs : int, default=1 Number of parallel jobs for inference. -1 means as many as cpus. show_loss_every : int, default=0 Controlls how often the hamming loss is computed (for monitoring purposes). Zero means never, otherwise it will be computed very show_loss_every'th epoch. batch_size : int, default=100 Number of constraints after which we solve the QP again. batch_size=-1 means that an update is performed only after going once over the whole training set. tol : float, default=-10 Convergence tolerance. If dual objective decreases less than tol, learning is stopped. The default corresponds to ignoring the behavior of the dual objective and stop only if no more constraints can be found. inactive_threshold : float, default=1e-5 Threshold for dual variable of a constraint to be considered inactive. inactive_window : float, default=50 Window for measuring inactivity. If a constraint is inactive for ``inactive_window`` iterations, it will be pruned from the QP. If set to 0, no constraints will be removed. switch_to : None or string, default=None Switch to the given inference method if the previous method does not find any more constraints. logger : logger object, default=None Pystruct logger for storing the model or extracting additional information. Attributes ---------- w : nd-array, shape=(model.size_joint_feature,) The learned weights of the SVM. old_solution : dict The last solution found by the qp solver. ``loss_curve_`` : list of float List of loss values if show_loss_every > 0. ``objective_curve_`` : list of float Cutting plane objective after each pass through the dataset. ``primal_objective_curve_`` : list of float Primal objective after each pass through the dataset. ``timestamps_`` : list of int Total training time stored before each iteration. References ---------- * Tsochantaridis, Ioannis and Joachims, Thorsten and Hofmann, Thomas and Altun, Yasemin and Singer, Yoram: Large margin methods for structured and interdependent output variables, JMLR 2006 * Joachims, Thorsten and Finley, Thomas and Yu, Chun-Nam John: Cutting-plane training of structural SVMs, JMLR 2009 """ def __init__(self, model, max_iter=100, C=1.0, check_constraints=True, verbose=0, negativity_constraint=None, n_jobs=1, break_on_bad=False, show_loss_every=0, batch_size=100, tol=1e-3, inactive_threshold=1e-5, inactive_window=50, logger=None, switch_to=None): BaseSSVM.__init__(self, model, max_iter, C, verbose=verbose, n_jobs=n_jobs, show_loss_every=show_loss_every, logger=logger) self.negativity_constraint = negativity_constraint self.check_constraints = check_constraints self.break_on_bad = break_on_bad self.batch_size = batch_size self.tol = tol self.inactive_threshold = inactive_threshold self.inactive_window = inactive_window self.switch_to = switch_to def _solve_n_slack_qp(self, constraints, n_samples): C = self.C joint_features = [c[1] for sample in constraints for c in sample] losses = [c[2] for sample in constraints for c in sample] joint_feature_matrix = np.vstack(joint_features).astype(np.float) n_constraints = len(joint_features) P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T)) # q contains loss from margin-rescaling q = cvxopt.matrix(-np.array(losses, dtype=np.float)) # constraints are a bit tricky. first, all alpha must be >zero idy = np.identity(n_constraints) tmp1 = np.zeros(n_constraints) # box constraint: sum of all alpha for one example must be <= C blocks = np.zeros((n_samples, n_constraints)) first = 0 for i, sample in enumerate(constraints): blocks[i, first: first + len(sample)] = 1 first += len(sample) # positivity constraints: if self.negativity_constraint is None: #empty constraints zero_constr = np.zeros(0) joint_features_constr = np.zeros((0, n_constraints)) else: joint_features_constr = joint_feature_matrix.T[self.negativity_constraint] zero_constr = np.zeros(len(self.negativity_constraint)) # put together G = cvxopt.sparse(cvxopt.matrix(np.vstack((-idy, blocks, joint_features_constr)))) tmp2 = np.ones(n_samples) * C h = cvxopt.matrix(np.hstack((tmp1, tmp2, zero_constr))) # solve QP model cvxopt.solvers.options['feastol'] = 1e-5 try: solution = cvxopt.solvers.qp(P, q, G, h) except ValueError: solution = {'status': 'error'} if solution['status'] != "optimal": print("regularizing QP!") P = cvxopt.matrix(np.dot(joint_feature_matrix, joint_feature_matrix.T) + 1e-8 * np.eye(joint_feature_matrix.shape[0])) solution = cvxopt.solvers.qp(P, q, G, h) if solution['status'] != "optimal": raise ValueError("QP solver failed. Try regularizing your QP.") # Lagrange multipliers a = np.ravel(solution['x']) self.prune_constraints(constraints, a) self.old_solution = solution # Support vectors have non zero lagrange multipliers sv = a > self.inactive_threshold * C box = np.dot(blocks, a) if self.verbose > 1: print("%d support vectors out of %d points" % (np.sum(sv), n_constraints)) # calculate per example box constraint: print("Box constraints at C: %d" % np.sum(1 - box / C < 1e-3)) print("dual objective: %f" % -solution['primal objective']) self.w = np.dot(a, joint_feature_matrix) return -solution['primal objective'] def _check_bad_constraint(self, y_hat, slack, old_constraints): if slack < 1e-5: return True y_hat_plain = unwrap_pairwise(y_hat) already_active = np.any([True for y__, _, _ in old_constraints if np.all(y_hat_plain == unwrap_pairwise(y__))]) if already_active: return True # "smart" stopping criterion # check if most violated constraint is more violated # than previous ones by more then eps. # If it is less violated, inference was wrong/approximate if self.check_constraints: for con in old_constraints: # compute slack for old constraint slack_tmp = max(con[2] - np.dot(self.w, con[1]), 0) if self.verbose > 5: print("slack old constraint: %f" % slack_tmp) # if slack of new constraint is smaller or not # significantly larger, don't add constraint. # if smaller, complain about approximate inference. if slack - slack_tmp < -1e-5: if self.verbose > 0: print("bad inference: %f" % (slack_tmp - slack)) if self.break_on_bad: raise ValueError("bad inference: %f" % (slack_tmp - slack)) return True return False def fit(self, X, Y, constraints=None, warm_start=None, initialize=True): """Learn parameters using cutting plane method. Parameters ---------- X : iterable Traing instances. Contains the structured input objects. No requirement on the particular form of entries of X is made. Y : iterable Training labels. Contains the strctured labels for inputs in X. Needs to have the same length as X. contraints : iterable Known constraints for warm-starts. List of same length as X. Each entry is itself a list of constraints for a given instance x . Each constraint is of the form [y_hat, delta_joint_feature, loss], where y_hat is a labeling, ``delta_joint_feature = joint_feature(x, y) - joint_feature(x, y_hat)`` and loss is the loss for predicting y_hat instead of the true label y. initialize : boolean, default=True Whether to initialize the model for the data. Leave this true except if you really know what you are doing. """ if self.verbose: print("Training n-slack dual structural SVM") cvxopt.solvers.options['show_progress'] = self.verbose > 3 if initialize: self.model.initialize(X, Y) self.w = np.zeros(self.model.size_joint_feature) n_samples = len(X) stopping_criterion = False if constraints is None: # fresh start constraints = [[] for i in range(n_samples)] self.last_active = [[] for i in range(n_samples)] self.objective_curve_ = [] self.primal_objective_curve_ = [] self.timestamps_ = [time()] else: # warm start objective = self._solve_n_slack_qp(constraints, n_samples) try: # catch ctrl+c to stop training # we have to update at least once after going through the dataset for iteration in range(self.max_iter): # main loop self.timestamps_.append(time() - self.timestamps_[0]) if self.verbose > 0: print("iteration %d" % iteration) if self.verbose > 2: print(self) new_constraints = 0 # generate slices through dataset from batch_size if self.batch_size < 1 and not self.batch_size == -1: raise ValueError("batch_size should be integer >= 1 or -1," "got %s." % str(self.batch_size)) batch_size = (self.batch_size if self.batch_size != -1 else len(X)) n_batches = int(np.ceil(float(len(X)) / batch_size)) slices = gen_even_slices(n_samples, n_batches) indices = np.arange(n_samples) slack_sum = 0 for batch in slices: new_constraints_batch = 0 verbose = max(0, self.verbose - 3) X_b = X[batch] Y_b = Y[batch] indices_b = indices[batch] candidate_constraints = Parallel( n_jobs=self.n_jobs, verbose=verbose)( delayed(find_constraint)(self.model, x, y, self.w) for x, y in zip(X_b, Y_b)) # for each batch, gather new constraints for i, x, y, constraint in zip(indices_b, X_b, Y_b, candidate_constraints): # loop over samples in batch y_hat, delta_joint_feature, slack, loss = constraint slack_sum += slack if self.verbose > 3: print("current slack: %f" % slack) if not loss > 0: # can have y != y_hat but loss = 0 in latent svm. # we need this here as djoint_feature is then != 0 continue if self._check_bad_constraint(y_hat, slack, constraints[i]): continue constraints[i].append([y_hat, delta_joint_feature, loss]) new_constraints_batch += 1 # after processing the slice, solve the qp if new_constraints_batch: objective = self._solve_n_slack_qp(constraints, n_samples) new_constraints += new_constraints_batch self.objective_curve_.append(objective) self._compute_training_loss(X, Y, iteration) primal_objective = (self.C * slack_sum + np.sum(self.w ** 2) / 2) self.primal_objective_curve_.append(primal_objective) if self.verbose > 0: print("new constraints: %d, " "cutting plane objective: %f primal objective: %f" % (new_constraints, objective, primal_objective)) if new_constraints == 0: if self.verbose: print("no additional constraints") stopping_criterion = True if (iteration > 1 and self.objective_curve_[-1] - self.objective_curve_[-2] < self.tol): if self.verbose: print("objective converged.") stopping_criterion = True if stopping_criterion: if (self.switch_to is not None and self.model.inference_method != self.switch_to): if self.verbose: print("Switching to %s inference" % str(self.switch_to)) self.model.inference_method_ = \ self.model.inference_method self.model.inference_method = self.switch_to stopping_criterion = False continue else: break if self.verbose > 5: print(self.w) if self.logger is not None: self.logger(self, iteration) except KeyboardInterrupt: pass self.constraints_ = constraints if self.verbose and self.n_jobs == 1: print("calls to inference: %d" % self.model.inference_calls) if verbose: print("Computing final objective.") self.timestamps_.append(time() - self.timestamps_[0]) self.primal_objective_curve_.append(self._objective(X, Y)) self.objective_curve_.append(objective) if self.logger is not None: self.logger(self, 'final') return self def prune_constraints(self, constraints, a): # append list for new constraint # self.alpha is a list which has # an entry per sample. each sample has an int for each constraint, # saying when was it last used if self.inactive_window == 0: return k = 0 for i, sample in enumerate(constraints): # if there are no constraints for this sample, do nothing: if not len(sample): continue # add self.last_active for any new constraint n_old_constraints_sample = len(self.last_active[i]) if n_old_constraints_sample < len(sample): self.last_active[i] = np.hstack([self.last_active[i], [0]]) # if inactive, count up inactive_this = (a[k:k + len(sample)] < self.inactive_threshold * self.C) self.last_active[i][inactive_this] += 1 k += len(sample) assert(len(sample) == len(self.last_active[i])) # remove unused constraints: to_remove = self.last_active[i] > self.inactive_window self.last_active[i] = self.last_active[i][~to_remove] for j in np.where(to_remove)[0][::-1]: del sample[j] assert(len(sample) == len(self.last_active[i]))
filters.py
from typing import List import django_filters from django.db.models import Q from django.utils import timezone from ...discount import DiscountValueType from ...discount.models import Sale, Voucher, VoucherQueryset from ..core.filters import ListObjectTypeFilter, ObjectTypeFilter from ..core.types.common import DateTimeRangeInput, IntRangeInput from ..utils.filters import filter_by_query_param, filter_range_field from .enums import DiscountStatusEnum, DiscountValueTypeEnum, VoucherDiscountType def filter_status( qs: VoucherQueryset, _, value: List[DiscountStatusEnum] ) -> VoucherQueryset: if not value: return qs query_objects = qs.none() now = timezone.now() if DiscountStatusEnum.ACTIVE in value: query_objects |= qs.active(now) if DiscountStatusEnum.EXPIRED in value: query_objects |= qs.expired(now) if DiscountStatusEnum.SCHEDULED in value: query_objects |= qs.filter(start_date__gt=now) return qs & query_objects def filter_times_used(qs, _, value): return filter_range_field(qs, "used", value) def filter_discount_type( qs: VoucherQueryset, _, value: List[VoucherDiscountType] ) -> VoucherQueryset: if value: query = Q() if VoucherDiscountType.FIXED in value:
if VoucherDiscountType.PERCENTAGE in value: query |= Q( discount_value_type=VoucherDiscountType.PERCENTAGE.value # type: ignore ) if VoucherDiscountType.SHIPPING in value: query |= Q(type=VoucherDiscountType.SHIPPING) qs = qs.filter(query).distinct() return qs def filter_started(qs, _, value): return filter_range_field(qs, "start_date", value) def filter_sale_type(qs, _, value): if value in [DiscountValueType.FIXED, DiscountValueType.PERCENTAGE]: qs = qs.filter(type=value) return qs def filter_sale_search(qs, _, value): search_fields = ("name", "channel_listings__discount_value", "type") if value: qs = filter_by_query_param(qs, value, search_fields) return qs def filter_voucher_search(qs, _, value): search_fields = ("name", "code") if value: qs = filter_by_query_param(qs, value, search_fields) return qs class VoucherFilter(django_filters.FilterSet): status = ListObjectTypeFilter(input_class=DiscountStatusEnum, method=filter_status) times_used = ObjectTypeFilter(input_class=IntRangeInput, method=filter_times_used) discount_type = ListObjectTypeFilter( input_class=VoucherDiscountType, method=filter_discount_type ) started = ObjectTypeFilter(input_class=DateTimeRangeInput, method=filter_started) search = django_filters.CharFilter(method=filter_voucher_search) class Meta: model = Voucher fields = ["status", "times_used", "discount_type", "started", "search"] class SaleFilter(django_filters.FilterSet): status = ListObjectTypeFilter(input_class=DiscountStatusEnum, method=filter_status) sale_type = ObjectTypeFilter( input_class=DiscountValueTypeEnum, method=filter_sale_type ) started = ObjectTypeFilter(input_class=DateTimeRangeInput, method=filter_started) search = django_filters.CharFilter(method=filter_sale_search) class Meta: model = Sale fields = ["status", "sale_type", "started", "search"]
query |= Q( discount_value_type=VoucherDiscountType.FIXED.value # type: ignore )
Table.js
import HeaderCell from './HeaderCell'; import TableLoader from './loaders/Table'; import TableDeltaHelper from './snippets/TableDeltaHelper'; import {TABLE_FADE_IN, TABLE_FADE_OUT} from '../animations'; import { DISTRICT_TABLE_COUNT, STATE_NAMES, STATISTIC_CONFIGS, TABLE_STATISTICS, TABLE_STATISTICS_EXPANDED, UNASSIGNED_STATE_CODE, } from '../constants'; import { getTableStatistic, parseIndiaDate, retry, } from '../utils/commonFunctions'; import { FilterIcon, FoldDownIcon, InfoIcon, OrganizationIcon, QuestionIcon, } from '@primer/octicons-react'; import classnames from 'classnames'; import {max} from 'date-fns'; import equal from 'fast-deep-equal'; import produce from 'immer'; import {memo, useCallback, useEffect, useMemo, useState, lazy} from 'react'; import { ChevronLeft, ChevronsLeft, ChevronRight, ChevronsRight, } from 'react-feather'; import {useTranslation} from 'react-i18next'; import {Link} from 'react-router-dom'; import {useTrail, useTransition, animated, config} from 'react-spring'; import {useSessionStorage} from 'react-use'; // eslint-disable-next-line import worker from 'workerize-loader!../workers/getDistricts'; const Row = lazy(() => retry(() => import('./Row'))); function Table({ data: states, date: timelineDate, regionHighlighted, setRegionHighlighted, expandTable, setExpandTable, hideDistrictData, hideVaccinated, }) { const {t} = useTranslation(); const [sortData, setSortData] = useSessionStorage('sortData', { sortColumn: 'confirmed', isAscending: false, delta: false, }); const [page, setPage] = useState(0); const handleSortClick = useCallback( (statistic) => { if (sortData.sortColumn !== statistic) { setSortData( produce(sortData, (draftSortData) => { draftSortData.sortColumn = statistic; }) ); } else { setSortData( produce(sortData, (draftSortData) => { draftSortData.isAscending = !sortData.isAscending; }) ); } }, [sortData, setSortData] ); const trail = useTrail(3, { from: {transform: 'translate3d(0, 10px, 0)', opacity: 0}, to: {transform: 'translate3d(0, 0px, 0)', opacity: 1}, config: config.wobbly, }); const [districts, setDistricts] = useState(); const [tableOption, setTableOption] = useState('States'); const [isPerMillion, setIsPerMillion] = useState(false); const [isInfoVisible, setIsInfoVisible] = useState(false); const numPages = Math.ceil( Object.keys(districts || {}).length / DISTRICT_TABLE_COUNT ); const lastUpdatedTT = useMemo(() => { const updatedDates = [ states['TT']?.meta?.['last_updated'] || timelineDate, states['TT']?.meta?.tested?.['last_updated'], ]; return max( updatedDates.filter((date) => date).map((date) => parseIndiaDate(date)) ); }, [states, timelineDate]); const sortingFunction = useCallback( (regionKeyA, regionKeyB) => { if (sortData.sortColumn !== 'regionName') { const statisticConfig = STATISTIC_CONFIGS[sortData.sortColumn]; const dataType = sortData.delta && !statisticConfig.hideDelta ? 'delta' : 'total'; const statisticA = getTableStatistic( districts?.[regionKeyA] || states[regionKeyA], sortData.sortColumn, {perMillion: isPerMillion}, lastUpdatedTT )[dataType]; const statisticB = getTableStatistic( districts?.[regionKeyB] || states[regionKeyB], sortData.sortColumn, {perMillion: isPerMillion}, lastUpdatedTT )[dataType]; return sortData.isAscending ? statisticA - statisticB : statisticB - statisticA; } else { const regionNameA = districts?.[regionKeyA]?.districtName || STATE_NAMES[regionKeyA]; const regionNameB = districts?.[regionKeyB]?.districtName || STATE_NAMES[regionKeyB]; return sortData.isAscending ? regionNameA.localeCompare(regionNameB) : regionNameB.localeCompare(regionNameA); } }, [ districts, isPerMillion, lastUpdatedTT, sortData.delta, sortData.isAscending, sortData.sortColumn, states, ] ); const _setTableOption = useCallback(() => { setTableOption((prevTableOption) => prevTableOption === 'States' ? 'Districts' : 'States' ); }, []); useEffect(() => { const workerInstance = worker(); workerInstance.getDistricts(states); workerInstance.addEventListener('message', (message) => { if (message.data.type !== 'RPC') { setDistricts(message.data); workerInstance.terminate(); } }); }, [tableOption, states]); useEffect(() => { setPage((p) => Math.max(0, Math.min(p, numPages - 1))); }, [numPages]); const handlePageClick = (direction) => { if (Math.abs(direction) === 1) { setPage(Math.min(Math.max(0, page + direction), numPages - 1)); } else if (direction < 0) { setPage(0); } else if (direction > 0) { setPage(numPages - 1); } }; const transition = useTransition(isInfoVisible, { from: TABLE_FADE_OUT, enter: TABLE_FADE_IN, leave: TABLE_FADE_OUT, }); const tableStatistics = (expandTable ? TABLE_STATISTICS_EXPANDED : TABLE_STATISTICS ).filter((statistic) => statistic !== 'vaccinated' || !hideVaccinated); const showDistricts = tableOption === 'Districts' && !hideDistrictData; useEffect(() => { if (!showDistricts) setPage(0); }, [showDistricts]); return ( <div className="Table"> <div className="table-top"> <animated.div className={classnames('option-toggle', { 'is-highlighted': showDistricts, })} onClick={_setTableOption} style={trail[0]} > <OrganizationIcon size={14} /> </animated.div> <animated.div className={classnames('million-toggle', { 'is-highlighted': isPerMillion, })} onClick={setIsPerMillion.bind(this, !isPerMillion)} style={trail[0]} > <span>10L</span> </animated.div> <animated.div className={classnames('info-toggle', { 'is-highlighted': isInfoVisible, })} onClick={setIsInfoVisible.bind(this, !isInfoVisible)} style={trail[0]} > <QuestionIcon size={14} /> </animated.div> <animated.div className={classnames('expand-table-toggle', { 'is-highlighted': expandTable, })} style={trail[1]} onClick={setExpandTable.bind(this, !expandTable)} > <FoldDownIcon size={16} /> </animated.div> </div> {transition( (style, item) => item && ( <animated.div className="table-helper" {...{style}}> <div className="helper-top"> <div className="helper-left"> <div className="info-item"> <span> <OrganizationIcon size={14} /> </span> <p>{t('Toggle between States/Districts')}</p> </div> <div className="info-item"> <h5>10L</h5> <p>{t('Per Ten Lakh People')}</p> </div> <div className="info-item sort"> <span> <FilterIcon size={14} /> </span> <p>{t('Sort by Descending')}</p> </div> <div className="info-item sort invert"> <span> <FilterIcon size={14} /> </span> <p>{t('Sort by Ascending')}</p> </div> <div className="info-item sort"> <TableDeltaHelper /> </div> <div className="info-item notes"> <span> <InfoIcon size={15} /> </span> <p>{t('Notes')}</p> </div> </div> <div className="helper-right"> <div className="info-item"> <p>{t('Units')}</p> </div> {Object.entries({'1K': 3, '1L': 5, '1Cr': 7}).map( ([abbr, exp]) => ( <div className="info-item" key={abbr}> <h5>{abbr}</h5> <p> 10 <sup style={{ verticalAlign: 'baseline', position: 'relative', top: '-.4em', }} > {exp} </sup> </p> </div> ) )} </div> </div> <h5 className="text"> {t('Compiled from State Govt. numbers')},{' '} <Link to="/about">{t('know more')}!</Link> </h5> </animated.div> ) )} <div className="table-container"> <div
> <div className="row heading"> <div className="cell heading" onClick={handleSortClick.bind(this, 'regionName')} > <div>{t(!showDistricts ? 'State/UT' : 'District')}</div> {sortData.sortColumn === 'regionName' && ( <div className={classnames('sort-icon', { invert: sortData.isAscending, })} > <FilterIcon size={10} /> </div> )} </div> {tableStatistics.map((statistic) => ( <HeaderCell key={statistic} {...{statistic, sortData, setSortData}} handleSort={handleSortClick.bind(this, statistic)} /> ))} </div> {!showDistricts && Object.keys(states) .filter( (stateCode) => stateCode !== 'TT' && !(stateCode === UNASSIGNED_STATE_CODE && isPerMillion) ) .sort((a, b) => sortingFunction(a, b)) .map((stateCode) => { return ( <Row key={stateCode} data={states[stateCode]} {...{ stateCode, isPerMillion, regionHighlighted, setRegionHighlighted, expandTable, lastUpdatedTT, tableStatistics, }} /> ); })} {showDistricts && !districts && <TableLoader />} {showDistricts && districts && Object.keys(districts) .sort((a, b) => sortingFunction(a, b)) .slice( page * DISTRICT_TABLE_COUNT, (page + 1) * DISTRICT_TABLE_COUNT ) .map((districtKey) => { return ( <Row key={districtKey} data={districts[districtKey]} districtName={districts[districtKey].districtName} {...{ isPerMillion, regionHighlighted, setRegionHighlighted, expandTable, lastUpdatedTT, tableStatistics, }} /> ); })} <Row key={'TT'} data={states['TT']} stateCode={'TT'} {...{ isPerMillion, regionHighlighted, setRegionHighlighted, expandTable, lastUpdatedTT, tableStatistics, }} /> </div> </div> {showDistricts && ( <div className="paginate"> <div className={classnames('left', {disabled: page === 0})} onClick={handlePageClick.bind(this, -2)} > <ChevronsLeft size={16} /> </div> <div className={classnames('left', {disabled: page === 0})} onClick={handlePageClick.bind(this, -1)} > <ChevronLeft size={16} /> </div> <h5>{`${page + 1} / ${numPages}`}</h5> <div className={classnames('right', {disabled: page === numPages - 1})} onClick={handlePageClick.bind(this, 1)} > <ChevronRight size={16} /> </div> <div className={classnames('right', {disabled: page === numPages - 1})} onClick={handlePageClick.bind(this, 2)} > <ChevronsRight size={16} /> </div> </div> )} </div> ); } const isEqual = (prevProps, currProps) => { if ( !equal( prevProps.regionHighlighted?.districtName, currProps.regionHighlighted?.districtName ) ) { return false; } else if ( !equal( prevProps.regionHighlighted?.stateCode, currProps.regionHighlighted?.stateCode ) ) { return false; } else if (!equal(prevProps.date, currProps.date)) { return false; } else if (!equal(prevProps.hideDistrictData, currProps.hideDistrictData)) { } else if (!equal(prevProps.hideVaccinated, currProps.hideVaccinated)) { return false; } else if ( !equal( prevProps.data['TT'].total.confirmed, currProps.data['TT'].total.confirmed ) ) { return false; } else if (!equal(prevProps.expandTable, currProps.expandTable)) { return false; } else return true; }; export default memo(Table, isEqual);
className="table fadeInUp" style={{ gridTemplateColumns: `repeat(${tableStatistics.length + 1}, auto)`, }}
conftest.py
# -*- coding: UTF-8 -*- import os import sys if sys.version_info.major == 2: # python2 import ConfigParser as configparser else: # python3 import configparser from .plugin import PyTestRailPlugin from .testrail_api import APIClient def pytest_addoption(parser): group = parser.getgroup('testrail') group.addoption( '--testrail', action='store_true', help='Create and update testruns with TestRail') group.addoption( '--tr-config', action='store', default='testrail.cfg', help='Path to the config file containing information about the TestRail server (defaults to testrail.cfg)') group.addoption( '--tr-url', action='store', help='TestRail address you use to access TestRail with your web browser (config file: url in API section)') group.addoption( '--tr-email', action='store', help='Email for the account on the TestRail server (config file: email in API section)') group.addoption( '--tr-password', action='store', help='Password for the account on the TestRail server (config file: password in API section)') group.addoption( '--tr-testrun-assignedto-id', action='store', help='ID of the user assigned to the test run (config file: assignedto_id in TESTRUN section)') group.addoption( '--tr-testrun-project-id', action='store', help='ID of the project the test run is in (config file: project_id in TESTRUN section)') group.addoption( '--tr-testrun-suite-id', action='store', help='ID of the test suite containing the test cases (config file: suite_id in TESTRUN section)') group.addoption( '--tr-testrun-suite-include-all', action='store_true', default=None, help='Include all test cases in specified test suite when creating test run (config file: include_all in TESTRUN section)') group.addoption( '--tr-testrun-name', action='store', default=None, help='Name given to testrun, that appears in TestRail (config file: name in TESTRUN section)') group.addoption( '--tr-run-id', action='store', default=0, required=False, help='Identifier of testrun, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored') group.addoption( '--tr-plan-id', action='store', default=0, required=False, help='Identifier of testplan, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored') group.addoption( '--tr-version', action='store', default='', required=False, help='Indicate a version in Test Case result') group.addoption( '--tr-no-ssl-cert-check', action='store_false', default=None, help='Do not check for valid SSL certificate on TestRail host') group.addoption( '--tr-close-on-complete', action='store_true', default=False, required=False, help='Close a test run on completion') group.addoption( '--tr-dont-publish-blocked', action='store_false', required=False, help='Determine if results of "blocked" testcases (in TestRail) are published or not') group.addoption( '--tr-skip-missing', action='store_true', required=False, help='Skip test cases that are not present in testrun'), group.addoption( "--tr-add-passes", action="store", default=None, required=False, help="Add passing results, default is False" ), group.addoption( '--tr-testrun-milestone-id', action='store', help='Identifier for milestone, that appears in TestRail. If provided, testrun will be associated with milestone' ) def pytest_configure(config): if config.getoption('--testrail'): cfg_file_path = config.getoption('--tr-config') config_manager = ConfigManager(cfg_file_path, config) client = APIClient(config_manager.getoption('tr-url', 'url', 'API'), config_manager.getoption('tr-email', 'email', 'API'), config_manager.getoption('tr-password', 'password', 'API')) config.pluginmanager.register( PyTestRailPlugin( client=client, assign_user_id=config_manager.getoption('tr-testrun-assignedto-id', 'assignedto_id', 'TESTRUN'), project_id=config_manager.getoption('tr-testrun-project-id', 'project_id', 'TESTRUN'), suite_id=config_manager.getoption('tr-testrun-suite-id', 'suite_id', 'TESTRUN'), include_all=config_manager.getoption('tr-testrun-suite-include-all', 'include_all', 'TESTRUN', is_bool=True, default=False), cert_check=config_manager.getoption('tr-no-ssl-cert-check', 'no_ssl_cert_check', 'API', is_bool=True, default=True), tr_name=config_manager.getoption('tr-testrun-name', 'name', 'TESTRUN'), milestone_id=config_manager.getoption('tr-testrun-milestone-id', 'milestone_id', 'TESTRUN'), run_id=config.getoption('--tr-run-id'), plan_id=config.getoption('--tr-plan-id'), version=config.getoption('--tr-version'),
skip_missing=config.getoption('--tr-skip-missing'), add_passes=config_manager.getoption("tr-add-passes", "add_passes", "TESTRUN", is_bool=True, default=None) ), # Name of plugin instance (allow to be used by other plugins) name="pytest-testrail-instance" ) class ConfigManager(object): def __init__(self, cfg_file_path, config): ''' Handles retrieving configuration values. Config options set in flags are given preferance over options set in the config file. :param cfg_file_path: Path to the config file containing information about the TestRail server. :type cfg_file_path: str or None :param config: Config object containing commandline flag options. :type config: _pytest.config.Config ''' self.cfg_file = None if os.path.isfile(cfg_file_path) or os.path.islink(cfg_file_path): self.cfg_file = configparser.ConfigParser() self.cfg_file.read(cfg_file_path) self.config = config def getoption(self, flag, cfg_name, section=None, is_bool=False, default=None): # priority: cli > config file > default # 1. return cli option (if set) value = self.config.getoption('--{}'.format(flag)) if value is not None: return value # 2. return default if not config file path is specified if section is None or self.cfg_file is None: return default if self.cfg_file.has_option(section, cfg_name): # 3. return config file value return self.cfg_file.getboolean(section, cfg_name) if is_bool else self.cfg_file.get(section, cfg_name) else: # 4. if entry not found in config file return default
close_on_complete=config.getoption('--tr-close-on-complete'), publish_blocked=config.getoption('--tr-dont-publish-blocked'),
dummy-deployment.ts
import { ApplicationKind } from "pipecd/web/model/common_pb"; import { Deployment, DeploymentStatus } from "~/modules/deployments"; import { createGitPathFromObject } from "./common"; import { dummyApplication } from "./dummy-application"; import { dummyPiped } from "./dummy-piped"; import { createPipelineFromObject, dummyPipeline } from "./dummy-pipeline"; import { createTriggerFromObject, dummyTrigger } from "./dummy-trigger"; import { createRandTimes, randomUUID } from "./utils"; const [createdAt, completedAt] = createRandTimes(3); export const dummyDeployment: Deployment.AsObject = { id: randomUUID(), pipedId: dummyPiped.id, projectId: "project-1", applicationName: dummyApplication.name, applicationId: dummyApplication.id, runningCommitHash: randomUUID().slice(0, 8), runningConfigFilename: ".pipe.yaml", stagesList: dummyPipeline, status: DeploymentStatus.DEPLOYMENT_SUCCESS, statusReason: "good", trigger: dummyTrigger, version: "0.0.0", versionsList: [], cloudProvider: "kube-1", labelsMap: [], createdAt: createdAt.unix(), updatedAt: completedAt.unix(), completedAt: completedAt.unix(), summary: "Quick sync by deploying the new version and configuring all traffic to it because no pipeline was configured", gitPath: { configPath: "", configFilename: "", path: "", url: "", repo: { id: "repo-1", branch: "master", remote: "xxx", }, }, kind: ApplicationKind.KUBERNETES, metadataMap: [], deploymentChainId: "", deploymentChainBlockIndex: 0,
const deployment = new Deployment(); deployment.setId(o.id); deployment.setApplicationId(o.applicationId); deployment.setApplicationName(o.applicationName); deployment.setCloudProvider(o.cloudProvider); deployment.setCompletedAt(o.completedAt); deployment.setCreatedAt(o.createdAt); deployment.setKind(o.kind); deployment.setPipedId(o.pipedId); deployment.setProjectId(o.projectId); deployment.setRunningCommitHash(o.runningCommitHash); deployment.setStatus(o.status); deployment.setStatusReason(o.statusReason); deployment.setSummary(o.summary); deployment.setUpdatedAt(o.updatedAt); deployment.setVersion(o.version); o.gitPath && deployment.setGitPath(createGitPathFromObject(o.gitPath)); o.trigger && deployment.setTrigger(createTriggerFromObject(o.trigger)); o.stagesList && deployment.setStagesList(createPipelineFromObject(o.stagesList)); return deployment; }
}; export function createDeploymentFromObject(o: Deployment.AsObject): Deployment {
main.rs
use std::cmp; use clap::{crate_authors, crate_version, App, AppSettings, Arg}; use yansi::Paint; #[derive(Debug)] struct Args { str1: String, str2: String, } fn parse_args() -> Args { let matches = App::new("Longest Common Subsequence") .version(crate_version!()) .author(crate_authors!()) .about("Compute the length of the longest common subsequences between two strings.") .setting(AppSettings::ArgRequiredElseHelp) .setting(AppSettings::ColoredHelp) .arg(Arg::with_name("str1").required(true)) .arg(Arg::with_name("str2").required(true)) .get_matches(); Args { str1: String::from(matches.value_of("str1").unwrap()), str2: String::from(matches.value_of("str2").unwrap()), } } /// Compute the longest common subsequence between two strings. /// /// Adapted from https://rosettacode.org/wiki/Longest_common_subsequence#Rust /// /// Returns (lcs_distance, lcs_len, lcs) fn lcs(string1: &str, string2: &str) -> (usize, usize, String) { let total_rows = string1.len() + 1; let total_columns = string2.len() + 1; // rust doesn't allow accessing string by index let string1_chars = string1.as_bytes(); let string2_chars = string2.as_bytes(); let mut table = vec![vec![0; total_columns]; total_rows]; for row in 1..total_rows { for col in 1..total_columns { if string1_chars[row - 1] == string2_chars[col - 1] { table[row][col] = table[row - 1][col - 1] + 1; } else { table[row][col] = cmp::max(table[row][col - 1], table[row - 1][col]); } } } let mut common_seq = Vec::new(); let mut x = total_rows - 1; let mut y = total_columns - 1; while x != 0 && y != 0 { // Check element above is equal if table[x][y] == table[x - 1][y] { x = x - 1; } // check element to the left is equal else if table[x][y] == table[x][y - 1] { y = y - 1; } else { // check the two element at the respective x,y position is same assert_eq!(string1_chars[x - 1], string2_chars[y - 1]); let char = string1_chars[x - 1]; common_seq.push(char); x = x - 1; y = y - 1; } } let len = table[total_rows - 1][total_columns - 1]; common_seq.reverse(); ( (string1.len() - len) + (string2.len() - len), len, String::from_utf8(common_seq).unwrap(), ) } fn
() { let args = parse_args(); let (dist, len, lcs) = lcs(args.str1.as_str(), args.str2.as_str()); println!( "dist: {}\nlen(lcs): {}\nlcs: {}", Paint::green(dist), len, lcs ); } #[cfg(test)] mod test { use super::*; #[test] fn test_simple() { let (dist, len, lcs) = lcs("asdf", "asd"); assert_eq!(dist, 1); assert_eq!(len, 3); assert_eq!(lcs, "asd"); } #[test] fn test_wikipedia() { let (dist, len, lcs) = lcs("kitten", "sitting"); assert_eq!(dist, 5); assert_eq!(len, 4); assert_eq!(lcs, "ittn"); } }
main
check_hand_test.go
package player import ( "testing" "github.com/marcsantiago/go-poker/deck" ) func Test_isRoyalFlush(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want bool }{ { name: "Should be Royal Straight Flush 1", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack")}, deck.Card{Value: 10}, deck.Card{Value: 9}, }}, want: true, }, { name: "Should be Royal Straight Flush 2", args: args{deck.Hand{ deck.Card{IsAce: true}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack")}, deck.Card{Value: 10}, }}, want: true, }, { name: "Should not be a Royal Flush", args: args{deck.Hand{ deck.Card{IsAce: true}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{Value: 10}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack")}, deck.Card{Value: 10}, }}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := isRoyalFlush(tt.args.hand); got != tt.want { t.Errorf("isRoyalFlush() = %v, want %v", got, tt.want) } }) } } func Test_isFlush(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want bool }{ { name: "Should be a flush", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king"), Suit: deck.Heart("hearts")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen"), Suit: deck.Heart("hearts")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack"), Suit: deck.Heart("hearts")}, deck.Card{Value: 10, Suit: deck.Heart("hearts")}, deck.Card{Value: 9, Suit: deck.Heart("hearts")}, }}, want: true, }, { name: "Should be a flush 2", args: args{deck.Hand{ deck.Card{Value: 8, Suit: deck.Clubs("club")}, deck.Card{Value: 9, Suit: deck.Clubs("club")}, deck.Card{Value: 10, Suit: deck.Clubs("club")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack"), Suit: deck.Clubs("club")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen"), Suit: deck.Clubs("club")}, }}, want: true, }, { name: "Should not be a flush", args: args{deck.Hand{ deck.Card{Value: 6, Suit: deck.Clubs("club")}, deck.Card{Value: 7, Suit: deck.Heart("hearts")}, deck.Card{Value: 8, Suit: deck.Clubs("club")}, deck.Card{Value: 9, Suit: deck.Clubs("club")}, deck.Card{Value: 10, Suit: deck.Clubs("club")}, }}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := isFlush(tt.args.hand); got != tt.want
}) } } func Test_isStraight(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want bool }{ { name: "Should be a straight flush", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack")}, deck.Card{Value: 10}, deck.Card{Value: 9}, }}, want: true, }, { name: "Should be a straight flush 2", args: args{deck.Hand{ deck.Card{Value: 8}, deck.Card{Value: 9}, deck.Card{Value: 10}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: true, }, { name: "Should be a straight flush 3", args: args{deck.Hand{ deck.Card{Value: 6}, deck.Card{Value: 7}, deck.Card{Value: 8}, deck.Card{Value: 9}, deck.Card{Value: 10}, }}, want: true, }, { name: "Should not be a straight flush", args: args{deck.Hand{ deck.Card{Value: 5}, deck.Card{Value: 7}, deck.Card{Value: 8}, deck.Card{Value: 9}, deck.Card{Value: 10}, }}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := isStraight(tt.args.hand); got != tt.want { t.Errorf("isFlush() = %v, want %v", got, tt.want) } }) } } func Test_isFullHouse(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want bool }{ { name: "Should be a full house", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{Value: 2}, deck.Card{Value: 2}, }}, want: true, }, { name: "Should be a full house 2", args: args{deck.Hand{ deck.Card{Value: 3}, deck.Card{Value: 3}, deck.Card{Value: 2}, deck.Card{Value: 2}, deck.Card{Value: 2}, }}, want: true, }, { name: "Should not be a full house", args: args{deck.Hand{ deck.Card{Value: 8}, deck.Card{Value: 9}, deck.Card{Value: 10}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := isFullHouse(tt.args.hand); got != tt.want { t.Errorf("isFullHouse() = %v, want %v", got, tt.want) } }) } } func Test_isFourOfAKind(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want string want1 bool }{ { name: "Should be a 4 of a kind", args: args{deck.Hand{ deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "8", want1: true, }, { name: "Should be a 4 of a kind 2", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "king", want1: true, }, { name: "Should not be a 4 of a kind", args: args{deck.Hand{ deck.Card{Value: 3}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "", want1: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, got1 := isFourOfAKind(tt.args.hand) if got != tt.want { t.Errorf("isFourOfAKind() got = %v, want %v", got, tt.want) } if got1 != tt.want1 { t.Errorf("isFourOfAKind() got1 = %v, want %v", got1, tt.want1) } }) } } func Test_isThreeOfAKind(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want string want1 bool }{ { name: "Should be a 3 of a kind", args: args{deck.Hand{ deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 7}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "8", want1: true, }, { name: "Should be a 3 of a kind 2", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "queen", want1: true, }, { name: "Should not be a 3 of a kind", args: args{deck.Hand{ deck.Card{Value: 3}, deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 3}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "", want1: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, got1 := isThreeOfAKind(tt.args.hand) if got != tt.want { t.Errorf("isThreeOfAKind() got = %v, want %v", got, tt.want) } if got1 != tt.want1 { t.Errorf("isThreeOfAKind() got1 = %v, want %v", got1, tt.want1) } }) } } func Test_isTwoPair(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want string want1 bool }{ { name: "Should be a 2 pairs", args: args{deck.Hand{ deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 7}, deck.Card{Value: 7}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "8", want1: true, }, { name: "Should be a 2 pairs 2", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "king", want1: true, }, { name: "Should not be a 2 pairs", args: args{deck.Hand{ deck.Card{Value: 3}, deck.Card{Value: 8}, deck.Card{Value: 9}, deck.Card{Value: 3}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "3", want1: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, got1 := isTwoPair(tt.args.hand) if got != tt.want { t.Errorf("isTwoPair() got = %v, want %v", got, tt.want) } if got1 != tt.want1 { t.Errorf("isTwoPair() got1 = %v, want %v", got1, tt.want1) } }) } } func Test_isPair(t *testing.T) { type args struct { hand deck.Hand } tests := []struct { name string args args want string want1 bool }{ { name: "Should be a pair", args: args{deck.Hand{ deck.Card{Value: 8}, deck.Card{Value: 8}, deck.Card{Value: 6}, deck.Card{Value: 4}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "8", want1: true, }, { name: "Should be a pair 2", args: args{deck.Hand{ deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("king")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("jack")}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, deck.Card{Value: 4}, }}, want: "king", want1: true, }, { name: "Should not be a 2 pairs", args: args{deck.Hand{ deck.Card{Value: 1}, deck.Card{Value: 8}, deck.Card{Value: 9}, deck.Card{Value: 3}, deck.Card{IsRoyal: true, RoyalType: deck.Royal("queen")}, }}, want: "", want1: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, got1 := isPair(tt.args.hand) if got != tt.want { t.Errorf("isPair() got = %v, want %v", got, tt.want) } if got1 != tt.want1 { t.Errorf("isPair() got1 = %v, want %v", got1, tt.want1) } }) } }
{ t.Errorf("isFlush() = %v, want %v", got, tt.want) }
factors_datasets.py
""" FACTOR dataset loader """ import os import logging import time import numpy as np import deepchem from deepchem.molnet.load_function.kaggle_features import merck_descriptors logger = logging.getLogger(__name__) TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz" VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz" TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz" TRAIN_FILENAME = "FACTORS_training_disguised_combined_full.csv.gz" VALID_FILENAME = "FACTORS_test1_disguised_combined_full.csv.gz" TEST_FILENAME = "FACTORS_test2_disguised_combined_full.csv.gz" def remove_missing_entries(dataset): """Remove missing entries. Some of the datasets have missing entries that sneak in as zero'd out feature vectors. Get rid of them. """ for i, (X, y, w, ids) in enumerate(dataset.itershards()): available_rows = X.any(axis=1) logger.info("Shard %d has %d missing entries." % (i, np.count_nonzero(~available_rows))) X = X[available_rows] y = y[available_rows] w = w[available_rows] ids = ids[available_rows] dataset.set_shard(i, X, y, w, ids) def get_transformers(train_dataset):
def gen_factors(FACTORS_tasks, data_dir, train_dir, valid_dir, test_dir, shard_size=2000): """Loads the FACTORS dataset; does not do train/test split""" time1 = time.time() train_files = os.path.join(data_dir, TRAIN_FILENAME) valid_files = os.path.join(data_dir, VALID_FILENAME) test_files = os.path.join(data_dir, TEST_FILENAME) if not os.path.exists(train_files): logger.info("Downloading train file...") deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir) logger.info("Training file download complete.") logger.info("Downloading validation file...") deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir) logger.info("Validation file download complete.") logger.info("Downloading test file...") deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir) logger.info("Test file download complete") # Featurize the FACTORS dataset logger.info("About to featurize the FACTORS dataset") featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors) loader = deepchem.data.UserCSVLoader( tasks=FACTORS_tasks, id_field="Molecule", featurizer=featurizer) logger.info("Featurizing the train dataset...") train_dataset = loader.featurize(train_files, shard_size=shard_size) logger.info("Featurizing the validation dataset...") valid_dataset = loader.featurize(valid_files, shard_size=shard_size) logger.info("Featurizing the test dataset...") test_dataset = loader.featurize(test_files, shard_size=shard_size) logger.info("Remove missing entries from dataset") remove_missing_entries(train_dataset) remove_missing_entries(valid_dataset) remove_missing_entries(test_dataset) # Shuffle the training data logger.info("Shuffling the training dataset") train_dataset.sparse_shuffle() # Apply transformations logger.info("Transforming datasets with transformers") transformers = get_transformers(train_dataset) for transformer in transformers: logger.info("Performing transformations with {}".format( transformer.__class__.__name__)) logger.info("Transforming the training dataset...") train_dataset = transformer.transform(train_dataset) logger.info("Transforming the validation dataset...") valid_dataset = transformer.transform(valid_dataset) logger.info("Transforming the test dataset...") test_dataset = transformer.transform(test_dataset) logger.info("Transformations complete.") logger.info("Moving datasets to corresponding directories") train_dataset.move(train_dir) logger.info("Train dataset moved.") valid_dataset.move(valid_dir) logger.info("Validation dataset moved.") test_dataset.move(test_dir) logger.info("Test dataset moved.") time2 = time.time() # TIMING logger.info("TIMING: FACTORS fitting took %0.3f s" % (time2 - time1)) return train_dataset, valid_dataset, test_dataset def load_factors(shard_size=2000, featurizer=None, split=None, reload=True): """Loads FACTOR dataset; does not do train/test split The Factors dataset is an in-house dataset from Merck that was first introduced in the following paper: Ramsundar, Bharath, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076. It contains 1500 Merck in-house compounds that were measured for IC50 of inhibition on 12 serine proteases. Unlike most of the other datasets featured in MoleculeNet, the Factors collection does not have structures for the compounds tested since they were proprietary Merck compounds. However, the collection does feature pre-computed descriptors for these compounds. Note that the original train/valid/test split from the source data was preserved here, so this function doesn't allow for alternate modes of splitting. Similarly, since the source data came pre-featurized, it is not possible to apply alternative featurizations. Parameters ---------- shard_size: int, optional Size of the DiskDataset shards to write on disk featurizer: optional Ignored since featurization pre-computed split: optional Ignored since split pre-computed reload: bool, optional Whether to automatically re-load from disk """ FACTORS_tasks = [ 'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006', 'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012' ] data_dir = deepchem.utils.data_utils.get_data_dir() data_dir = os.path.join(data_dir, "factors") if not os.path.exists(data_dir): os.mkdir(data_dir) train_dir = os.path.join(data_dir, "train_dir") valid_dir = os.path.join(data_dir, "valid_dir") test_dir = os.path.join(data_dir, "test_dir") if (os.path.exists(train_dir) and os.path.exists(valid_dir) and os.path.exists(test_dir)): logger.info("Reloading existing datasets") train_dataset = deepchem.data.DiskDataset(train_dir) valid_dataset = deepchem.data.DiskDataset(valid_dir) test_dataset = deepchem.data.DiskDataset(test_dir) else: logger.info("Featurizing datasets") train_dataset, valid_dataset, test_dataset = gen_factors( FACTORS_tasks=FACTORS_tasks, data_dir=data_dir, train_dir=train_dir, valid_dir=valid_dir, test_dir=test_dir, shard_size=shard_size) transformers = get_transformers(train_dataset) return FACTORS_tasks, (train_dataset, valid_dataset, test_dataset), transformers
"""Gets transformers applied to the dataset""" transformers = list() # TODO: Check if anything needs to be added return transformers
minLength.spec.ts
import {JsonSchema, MinLength} from "../../../src/jsonschema"; import {stubSchemaDecorator} from "./utils"; describe("MinLength", () => { it("should store data", () => { const decorateStub = stubSchemaDecorator(); const schema = new JsonSchema();
schema.minLength.should.eq(10); decorateStub.restore(); }); it("should throw an error when the given parameters is as negative integer", () => { let error: any; try { MinLength(-10); } catch (er) { error = er; } error.message.should.deep.equal("The value of minLength MUST be a non-negative integer."); }); });
MinLength(10); // @ts-ignore decorateStub.getCall(0).args[0](schema);
default.rs
use crate::utils::{ any_parent_is_automatically_derived, contains_name, match_def_path, paths, qpath_res, snippet_with_macro_callsite, }; use crate::utils::{span_lint_and_note, span_lint_and_sugg}; use if_chain::if_chain; use rustc_data_structures::fx::FxHashSet; use rustc_errors::Applicability; use rustc_hir::def::Res; use rustc_hir::{Block, Expr, ExprKind, PatKind, QPath, Stmt, StmtKind}; use rustc_lint::{LateContext, LateLintPass}; use rustc_middle::lint::in_external_macro; use rustc_middle::ty; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::symbol::{Ident, Symbol}; use rustc_span::Span; declare_clippy_lint! { /// **What it does:** Checks for literal calls to `Default::default()`. /// /// **Why is this bad?** It's more clear to the reader to use the name of the type whose default is /// being gotten than the generic `Default`. /// /// **Known problems:** None. /// /// **Example:** /// ```rust /// // Bad /// let s: String = Default::default(); /// /// // Good /// let s = String::default(); /// ``` pub DEFAULT_TRAIT_ACCESS, pedantic, "checks for literal calls to `Default::default()`" } declare_clippy_lint! { /// **What it does:** Checks for immediate reassignment of fields initialized /// with Default::default(). /// /// **Why is this bad?**It's more idiomatic to use the [functional update syntax](https://doc.rust-lang.org/reference/expressions/struct-expr.html#functional-update-syntax). /// /// **Known problems:** Assignments to patterns that are of tuple type are not linted. /// /// **Example:** /// Bad: /// ``` /// # #[derive(Default)] /// # struct A { i: i32 } /// let mut a: A = Default::default(); /// a.i = 42; /// ``` /// Use instead: /// ``` /// # #[derive(Default)] /// # struct A { i: i32 } /// let a = A {
/// i: 42, /// .. Default::default() /// }; /// ``` pub FIELD_REASSIGN_WITH_DEFAULT, style, "binding initialized with Default should have its fields set in the initializer" } #[derive(Default)] pub struct Default { // Spans linted by `field_reassign_with_default`. reassigned_linted: FxHashSet<Span>, } impl_lint_pass!(Default => [DEFAULT_TRAIT_ACCESS, FIELD_REASSIGN_WITH_DEFAULT]); impl LateLintPass<'_> for Default { fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) { if_chain! { // Avoid cases already linted by `field_reassign_with_default` if !self.reassigned_linted.contains(&expr.span); if let ExprKind::Call(ref path, ..) = expr.kind; if !any_parent_is_automatically_derived(cx.tcx, expr.hir_id); if let ExprKind::Path(ref qpath) = path.kind; if let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id(); if match_def_path(cx, def_id, &paths::DEFAULT_TRAIT_METHOD); // Detect and ignore <Foo as Default>::default() because these calls do explicitly name the type. if let QPath::Resolved(None, _path) = qpath; then { let expr_ty = cx.typeck_results().expr_ty(expr); if let ty::Adt(def, ..) = expr_ty.kind() { // TODO: Work out a way to put "whatever the imported way of referencing // this type in this file" rather than a fully-qualified type. let replacement = format!("{}::default()", cx.tcx.def_path_str(def.did)); span_lint_and_sugg( cx, DEFAULT_TRAIT_ACCESS, expr.span, &format!("calling `{}` is more clear than this expression", replacement), "try", replacement, Applicability::Unspecified, // First resolve the TODO above ); } } } } fn check_block<'tcx>(&mut self, cx: &LateContext<'tcx>, block: &Block<'tcx>) { // start from the `let mut _ = _::default();` and look at all the following // statements, see if they re-assign the fields of the binding let stmts_head = match block.stmts { // Skip the last statement since there cannot possibly be any following statements that re-assign fields. [head @ .., _] if !head.is_empty() => head, _ => return, }; for (stmt_idx, stmt) in stmts_head.iter().enumerate() { // find all binding statements like `let mut _ = T::default()` where `T::default()` is the // `default` method of the `Default` trait, and store statement index in current block being // checked and the name of the bound variable let (local, variant, binding_name, binding_type, span) = if_chain! { // only take `let ...` statements if let StmtKind::Local(local) = stmt.kind; if let Some(expr) = local.init; if !any_parent_is_automatically_derived(cx.tcx, expr.hir_id); if !in_external_macro(cx.tcx.sess, expr.span); // only take bindings to identifiers if let PatKind::Binding(_, binding_id, ident, _) = local.pat.kind; // only when assigning `... = Default::default()` if is_expr_default(expr, cx); let binding_type = cx.typeck_results().node_type(binding_id); if let Some(adt) = binding_type.ty_adt_def(); if adt.is_struct(); let variant = adt.non_enum_variant(); if adt.did.is_local() || !variant.is_field_list_non_exhaustive(); let module_did = cx.tcx.parent_module(stmt.hir_id).to_def_id(); if variant .fields .iter() .all(|field| field.vis.is_accessible_from(module_did, cx.tcx)); then { (local, variant, ident.name, binding_type, expr.span) } else { continue; } }; // find all "later statement"'s where the fields of the binding set as // Default::default() get reassigned, unless the reassignment refers to the original binding let mut first_assign = None; let mut assigned_fields = Vec::new(); let mut cancel_lint = false; for consecutive_statement in &block.stmts[stmt_idx + 1..] { // find out if and which field was set by this `consecutive_statement` if let Some((field_ident, assign_rhs)) = field_reassigned_by_stmt(consecutive_statement, binding_name) { // interrupt and cancel lint if assign_rhs references the original binding if contains_name(binding_name, assign_rhs) { cancel_lint = true; break; } // if the field was previously assigned, replace the assignment, otherwise insert the assignment if let Some(prev) = assigned_fields .iter_mut() .find(|(field_name, _)| field_name == &field_ident.name) { *prev = (field_ident.name, assign_rhs); } else { assigned_fields.push((field_ident.name, assign_rhs)); } // also set first instance of error for help message if first_assign.is_none() { first_assign = Some(consecutive_statement); } } // interrupt if no field was assigned, since we only want to look at consecutive statements else { break; } } // if there are incorrectly assigned fields, do a span_lint_and_note to suggest // construction using `Ty { fields, ..Default::default() }` if !assigned_fields.is_empty() && !cancel_lint { // if all fields of the struct are not assigned, add `.. Default::default()` to the suggestion. let ext_with_default = !variant .fields .iter() .all(|field| assigned_fields.iter().any(|(a, _)| a == &field.ident.name)); let field_list = assigned_fields .into_iter() .map(|(field, rhs)| { // extract and store the assigned value for help message let value_snippet = snippet_with_macro_callsite(cx, rhs.span, ".."); format!("{}: {}", field, value_snippet) }) .collect::<Vec<String>>() .join(", "); let sugg = if ext_with_default { if field_list.is_empty() { format!("{}::default()", binding_type) } else { format!("{} {{ {}, ..Default::default() }}", binding_type, field_list) } } else { format!("{} {{ {} }}", binding_type, field_list) }; // span lint once per statement that binds default span_lint_and_note( cx, FIELD_REASSIGN_WITH_DEFAULT, first_assign.unwrap().span, "field assignment outside of initializer for an instance created with Default::default()", Some(local.span), &format!( "consider initializing the variable with `{}` and removing relevant reassignments", sugg ), ); self.reassigned_linted.insert(span); } } } } /// Checks if the given expression is the `default` method belonging to the `Default` trait. fn is_expr_default<'tcx>(expr: &'tcx Expr<'tcx>, cx: &LateContext<'tcx>) -> bool { if_chain! { if let ExprKind::Call(ref fn_expr, _) = &expr.kind; if let ExprKind::Path(qpath) = &fn_expr.kind; if let Res::Def(_, def_id) = qpath_res(cx, qpath, fn_expr.hir_id); then { // right hand side of assignment is `Default::default` match_def_path(cx, def_id, &paths::DEFAULT_TRAIT_METHOD) } else { false } } } /// Returns the reassigned field and the assigning expression (right-hand side of assign). fn field_reassigned_by_stmt<'tcx>(this: &Stmt<'tcx>, binding_name: Symbol) -> Option<(Ident, &'tcx Expr<'tcx>)> { if_chain! { // only take assignments if let StmtKind::Semi(ref later_expr) = this.kind; if let ExprKind::Assign(ref assign_lhs, ref assign_rhs, _) = later_expr.kind; // only take assignments to fields where the left-hand side field is a field of // the same binding as the previous statement if let ExprKind::Field(ref binding, field_ident) = assign_lhs.kind; if let ExprKind::Path(QPath::Resolved(_, path)) = binding.kind; if let Some(second_binding_name) = path.segments.last(); if second_binding_name.ident.name == binding_name; then { Some((field_ident, assign_rhs)) } else { None } } }
truncate.pipe.ts
import { Pipe, PipeTransform } from '@angular/core'; @Pipe({ name: 'truncate' }) export class
implements PipeTransform { transform(value: string, limit = 25, completeWords = false, ellipsis = '...') { if (completeWords) { limit = value.substr(0, limit).lastIndexOf(' '); } return `${value.substr(0, limit)}${ellipsis}`; } }
TruncatePipe
__init__.py
# -*- coding: utf-8 -*- import docker_registry.core.driver as engine import tempfile from ..lib import config __all__ = ['load'] def temp_store_handler():
_storage = {} def load(kind=None): """Returns the right storage class according to the configuration.""" global _storage cfg = config.load() if not kind: kind = cfg.storage.lower() if kind == 'local': kind = 'file' if kind in _storage: return _storage[kind] _storage[kind] = engine.fetch(kind)( path=cfg.storage_path, config=cfg) return _storage[kind]
tmpf = tempfile.TemporaryFile() def fn(buf): tmpf.write(buf) return tmpf, fn
events_eventa.rs
use super::{hub, identification, producer::Control, protocol, Context}; use crate::stat::Alias; use crate::test::samples; use clibri::server; use std::str::FromStr; use uuid::Uuid; type BroadcastStructA = (Vec<Uuid>, protocol::StructA); type BroadcastStructB = (Vec<Uuid>, protocol::StructB); #[allow(unused_variables)] pub async fn emit<E: server::Error, C: server::Control<E> + Send + Clone>( event: protocol::Events::EventA, filter: hub::filter::Filter, context: &Context, control: &Control<E, C>, ) -> Result<(BroadcastStructA, BroadcastStructB), String>
{ let uuid = match Uuid::from_str(&event.uuid) { Ok(uuid) => uuid, Err(err) => { return Err(format!("Fail to parse uuid {}: {:?}", event.uuid, err)); } }; context.inc_stat(uuid, Alias::StructA).await; context.inc_stat(uuid, Alias::StructB).await; Ok(( (vec![uuid], samples::struct_a::get()), (vec![uuid], samples::struct_b::get()), )) }
index.tsx
import { useState, useEffect, useCallback, Key, useRef } from 'react'; import { TreeSelect, Tree, Input, Button, Modal, message, Typography, } from 'antd'; import config from '@/utils/config'; import { PageContainer } from '@ant-design/pro-layout'; import Editor from '@monaco-editor/react'; import { request } from '@/utils/http'; import styles from './index.module.less'; import EditModal from './editModal'; import { Controlled as CodeMirror } from 'react-codemirror2'; import { useCtx, useTheme } from '@/utils/hooks'; import SplitPane from 'react-split-pane'; const { Text } = Typography; function getFilterData(keyword: string, data: any) { if (keyword) { const tree: any = []; data.forEach((item: any) => { if (item.title.toLocaleLowerCase().includes(keyword)) { tree.push(item); } }); return { tree }; } return { tree: data }; } const LangMap: any = { '.py': 'python', '.js': 'javascript', '.sh': 'shell', '.ts': 'typescript', }; const Script = ({ headerStyle, isPhone, theme }: any) => { const [title, setTitle] = useState('请选择脚本文件'); const [value, setValue] = useState('请选择脚本文件'); const [select, setSelect] = useState<string>(); const [data, setData] = useState<any[]>([]); const [filterData, setFilterData] = useState<any[]>([]); const [loading, setLoading] = useState(false); const [mode, setMode] = useState(''); const [height, setHeight] = useState<number>(); const treeDom = useRef<any>(); const [isLogModalVisible, setIsLogModalVisible] = useState(false); const [searchValue, setSearchValue] = useState(''); const [isEditing, setIsEditing] = useState(false); const editorRef = useRef<any>(null); const getScripts = () => { setLoading(true); request .get(`${config.apiPrefix}scripts/files`) .then((data) => { setData(data.data); setFilterData(data.data); onSelect(data.data[0].value, data.data[0]); }) .finally(() => setLoading(false)); }; const getDetail = (node: any) => { request.get(`${config.apiPrefix}scripts/${node.value}`).then((data) => { setValue(data.data); }); }; const onSelect = (value: any, node: any) => { setValue('加载中...'); const newMode = LangMap[value.slice(-3)] || ''; setMode(isPhone && newMode === 'typescript' ? 'javascript' : newMode); setSelect(value); setTitle(node.parent || node.value); getDetail(node); }; const onTreeSelect = useCallback((keys: Key[], e: any) => { onSelect(keys[0], e.node); }, []); const onSearch = useCallback( (e) => { const keyword = e.target.value; setSearchValue(keyword); const { tree } = getFilterData(keyword.toLocaleLowerCase(), data); setFilterData(tree); }, [data, setFilterData], ); const editFile = () => { setIsEditing(true); }; const cancelEdit = () => { setIsEditing(false); setValue('加载中...'); getDetail({ value: select }); }; const saveFile = () => { Modal.confirm({ title: `确认保存`, content: ( <> 确认保存文件 <Text style={{ wordBreak: 'break-all' }} type="warning"> {select} </Text>{' '} ,保存后不可恢复 </> ), onOk() { const content = editorRef.current ? editorRef.current.getValue().replace(/\r\n/g, '\n') : value; request .put(`${config.apiPrefix}scripts`, { data: { filename: select, content, }, }) .then((_data: any) => { if (_data.code === 200) { message.success(`保存成功`); setIsEditing(false); } else { message.error(_data); } }); }, onCancel() { console.log('Cancel'); }, }); }; const deleteFile = () => { Modal.confirm({ title: `确认删除`, content: ( <> 确认删除文件 <Text style={{ wordBreak: 'break-all' }} type="warning"> {select} </Text>{' '} ,删除后不可恢复 </> ), onOk() { request .delete(`${config.apiPrefix}scripts`, { data: { filename: select, }, }) .then((_data: any) => { if (_data.code === 200) { message.success(`删除成功`); let newData = [...data]; const index = newData.findIndex((x) => x.value === select); newData.splice(index, 1); setData(newData); } else { message.error(_data); } }); }, onCancel() { console.log('Cancel'); }, }); }; useEffect(() => { const word = searchValue || ''; const { tree } = getFilterData(word.toLocaleLowerCase(), data); setFilterData(tree); setSelect(''); setTitle('请选择脚本文件'); setValue('请选择脚本文件'); }, [data]); useEffect(() => { getScripts(); setHeight(treeDom.current.clientHeight); }, []); return ( <PageContainer className="ql-container-wrapper log-wrapper" title={title} loading={loading} extra={ isPhone ? [ <TreeSelect className="log-select" value={select} dropdownStyle={{ maxHeight: 400, overflow: 'auto' }} treeData={data} placeholder="请选择脚本文件" showSearch key="value" onSelect={onSelect} />, <Button type="primary" onClick={deleteFile}> 删除 </Button>, ] : isEditing ? [ <Button type="primary" onClick={saveFile}> 保存 </Button>, <Button type="primary" onClick={cancelEdit}> 退出编辑 </Button>, ] : [ <Button type="primary" onClick={editFile}> 编辑 </Button>, <Button type="primary" onClick={deleteFile}> 删除 </Button>, <Button type="primary" onClick={() => { setIsLogModalVisible(true); }} > 调试 </Button>, ] } header={{ style: headerStyle, }} > <div className={`${styles['log-container']} log-container`}> {!isPhone && ( <SplitPane split="vertical" size={200} maxSize={-100}> <div className={styles['left-tree-container']}> <Input.Search className={styles['left-tree-search']} onChange={onSearch} ></Input.Search> <div className={styles['left-tree-scroller']} ref={treeDom}> <Tree className={styles['left-tree']} treeData={filterData} showIcon={true} height={height} showLine={{ showLeafIcon: true }} onSelect={onTreeSelect} defaultSelectedKeys={[data[0] && data[0].key]} ></Tree> </div> </div> <Editor language={mode} value={value} theme={theme} options={{ readOnly: !isEditing, fontSize: 12, lineNumbersMinChars: 3, folding: false, glyphMargin: false, }} onMount={(editor) => { editorRef.current = editor; }} /> </SplitPane> )} {isPhone && ( <CodeMirror value={value} options={{ lineNumbers: true, lineWrapping: true, styleActiveLine: true, matchBrackets: true, mode, readOnly: true, }} onBeforeChange={(editor, data, value) => { setValue(value); }} onChange={(editor, data, value) => {}} /> )} <EditModal visible={isLogModalVisible} treeData={data} currentFile={select} content={value} handleCancel={() => { setIsLogModalVisible(false);
</div> </PageContainer> ); }; export default Script;
}} />
index_scan_executor.rs
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use std::sync::Arc; use cop_datatype::EvalType; use kvproto::coprocessor::KeyRange; use tipb::executor::IndexScan; use tipb::expression::FieldType; use tipb::schema::ColumnInfo; use crate::storage::{FixtureStore, Store}; use crate::coprocessor::codec::batch::{LazyBatchColumn, LazyBatchColumnVec}; use crate::coprocessor::dag::batch::interface::*; use crate::coprocessor::dag::expr::{EvalConfig, EvalContext}; use crate::coprocessor::dag::Scanner; use crate::coprocessor::{Error, Result}; pub struct BatchIndexScanExecutor<C: ExecSummaryCollector, S: Store>( super::util::scan_executor::ScanExecutor< C, S, IndexScanExecutorImpl, super::util::ranges_iter::PointRangeConditional, >, ); impl BatchIndexScanExecutor< crate::coprocessor::dag::exec_summary::ExecSummaryCollectorDisabled, FixtureStore, > { /// Checks whether this executor can be used. #[inline] pub fn check_supported(descriptor: &IndexScan) -> Result<()> { super::util::scan_executor::check_columns_info_supported(descriptor.get_columns()) .map_err(|e| box_err!("Unable to use BatchIndexScanExecutor: {}", e)) } } impl<C: ExecSummaryCollector, S: Store> BatchIndexScanExecutor<C, S> { pub fn new( summary_collector: C, store: S, config: Arc<EvalConfig>, columns_info: Vec<ColumnInfo>, key_ranges: Vec<KeyRange>, desc: bool, unique: bool, ) -> Result<Self> { // Note 1: `unique = true` doesn't completely mean that it is a unique index scan. Instead // it just means that we can use point-get for this index. In the following scenarios // `unique` will be `false`: // - scan from a non-unique index // - scan from a unique index with like: where unique-index like xxx // // Note 2: Unlike table scan executor, the accepted `columns_info` of index scan executor is // strictly stipulated. The order of columns in the schema must be the same as index data // stored and if PK handle is needed it must be placed as the last one. // // Note 3: Currently TiDB may send multiple PK handles to TiKV (but only the last one is // real). We accept this kind of request for compatibility considerations, but will be // forbidden soon. let decode_handle = columns_info.last().map_or(false, |ci| ci.get_pk_handle()); let schema: Vec<_> = columns_info .iter() .map(|ci| super::util::scan_executor::field_type_from_column_info(&ci)) .collect(); let columns_len_without_handle = if decode_handle { schema.len() - 1 } else { schema.len() }; let imp = IndexScanExecutorImpl { context: EvalContext::new(config), schema, columns_len_without_handle, decode_handle, }; let wrapper = super::util::scan_executor::ScanExecutor::new( summary_collector, imp, store, desc, key_ranges, super::util::ranges_iter::PointRangeConditional::new(unique), )?; Ok(Self(wrapper)) } } impl<C: ExecSummaryCollector, S: Store> BatchExecutor for BatchIndexScanExecutor<C, S> { #[inline] fn schema(&self) -> &[FieldType] { self.0.schema() } #[inline] fn next_batch(&mut self, scan_rows: usize) -> BatchExecuteResult { self.0.next_batch(scan_rows) } #[inline] fn
(&mut self, destination: &mut BatchExecuteStatistics) { self.0.collect_statistics(destination); } } struct IndexScanExecutorImpl { /// See `TableScanExecutorImpl`'s `context`. context: EvalContext, /// See `TableScanExecutorImpl`'s `schema`. schema: Vec<FieldType>, /// Number of interested columns (exclude PK handle column). columns_len_without_handle: usize, /// Whether PK handle column is interested. Handle will be always placed in the last column. decode_handle: bool, } impl super::util::scan_executor::ScanExecutorImpl for IndexScanExecutorImpl { #[inline] fn schema(&self) -> &[FieldType] { &self.schema } #[inline] fn mut_context(&mut self) -> &mut EvalContext { &mut self.context } #[inline] fn build_scanner<S: Store>( &self, store: &S, desc: bool, range: KeyRange, ) -> Result<Scanner<S>> { Scanner::new( store, crate::coprocessor::dag::ScanOn::Index, desc, false, range, ) } /// Constructs empty columns, with PK in decoded format and the rest in raw format. /// /// Note: the structure of the constructed column is the same as table scan executor but due /// to different reasons. fn build_column_vec(&self, scan_rows: usize) -> LazyBatchColumnVec { let columns_len = self.schema.len(); let mut columns = Vec::with_capacity(columns_len); for _ in 0..self.columns_len_without_handle { columns.push(LazyBatchColumn::raw_with_capacity(scan_rows)); } if self.decode_handle { // For primary key, we construct a decoded `VectorValue` because it is directly // stored as i64, without a datum flag, in the value (for unique index). // Note that for normal index, primary key is appended at the end of key with a // datum flag. columns.push(LazyBatchColumn::decoded_with_capacity_and_tp( scan_rows, EvalType::Int, )); } assert_eq!(columns.len(), columns_len); LazyBatchColumnVec::from(columns) } fn process_kv_pair( &mut self, key: &[u8], mut value: &[u8], columns: &mut LazyBatchColumnVec, ) -> Result<()> { use crate::coprocessor::codec::{datum, table}; use byteorder::{BigEndian, ReadBytesExt}; use tikv_util::codec::number; // The payload part of the key let mut key_payload = &key[table::PREFIX_LEN + table::ID_LEN..]; for i in 0..self.columns_len_without_handle { let (val, remaining) = datum::split_datum(key_payload, false)?; columns[i].push_raw(val); key_payload = remaining; } if self.decode_handle { // For normal index, it is placed at the end and any columns prior to it are // ensured to be interested. For unique index, it is placed in the value. let handle_val = if key_payload.is_empty() { // This is a unique index, and we should look up PK handle in value. // NOTE: it is not `number::decode_i64`. value.read_i64::<BigEndian>().map_err(|_| { Error::Other(box_err!("Failed to decode handle in value as i64")) })? } else { // This is a normal index. The remaining payload part is the PK handle. // Let's decode it and put in the column. let flag = key_payload[0]; let mut val = &key_payload[1..]; // TODO: Better to use `push_datum`. This requires us to allow `push_datum` // receiving optional time zone first. match flag { datum::INT_FLAG => number::decode_i64(&mut val).map_err(|_| { Error::Other(box_err!("Failed to decode handle in key as i64")) })?, datum::UINT_FLAG => { (number::decode_u64(&mut val).map_err(|_| { Error::Other(box_err!("Failed to decode handle in key as u64")) })?) as i64 } _ => { return Err(Error::Other(box_err!("Unexpected handle flag {}", flag))); } } }; columns[self.columns_len_without_handle] .mut_decoded() .push_int(Some(handle_val)); } Ok(()) } } #[cfg(test)] mod tests { use super::*; use std::sync::Arc; use byteorder::{BigEndian, WriteBytesExt}; use cop_datatype::{FieldTypeAccessor, FieldTypeTp}; use kvproto::coprocessor::KeyRange; use tipb::schema::ColumnInfo; use crate::coprocessor::codec::mysql::Tz; use crate::coprocessor::codec::{datum, table, Datum}; use crate::coprocessor::dag::exec_summary::*; use crate::coprocessor::dag::expr::EvalConfig; use crate::coprocessor::util::convert_to_prefix_next; use crate::storage::{FixtureStore, Key}; #[test] fn test_basic() { const TABLE_ID: i64 = 3; const INDEX_ID: i64 = 42; // Index schema: (INT, FLOAT) // the elements in data are: [int index, float index, handle id]. let data = vec![ [Datum::I64(-5), Datum::F64(0.3), Datum::I64(10)], [Datum::I64(5), Datum::F64(5.1), Datum::I64(5)], [Datum::I64(5), Datum::F64(10.5), Datum::I64(2)], ]; // The column info for each column in `data`. Used to build the executor. let columns_info = vec![ { let mut ci = ColumnInfo::new(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci }, { let mut ci = ColumnInfo::new(); ci.as_mut_accessor().set_tp(FieldTypeTp::Double); ci }, { let mut ci = ColumnInfo::new(); ci.as_mut_accessor().set_tp(FieldTypeTp::LongLong); ci.set_pk_handle(true); ci }, ]; // The schema of these columns. Used to check executor output. let schema = vec![ FieldTypeTp::LongLong.into(), FieldTypeTp::Double.into(), FieldTypeTp::LongLong.into(), ]; // Case 1. Normal index. // For a normal index, the PK handle is stored in the key and nothing interesting is stored // in the value. So let's build corresponding KV data. let store = { let kv = data .iter() .map(|datums| { let index_data = datum::encode_key(datums).unwrap(); let key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &index_data); let key = Key::from_raw(key.as_slice()); let value = vec![]; (key, Ok(value)) }) .collect(); FixtureStore::new(kv) }; { // Case 1.1. Normal index, without PK, scan total index in reverse order. let key_ranges = vec![{ let mut range = KeyRange::new(); let start_data = datum::encode_key(&[Datum::Min]).unwrap(); let start_key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &start_data); range.set_start(start_key); let end_data = datum::encode_key(&[Datum::Max]).unwrap(); let end_key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &end_data); range.set_end(end_key); range }]; let mut executor = BatchIndexScanExecutor::new( ExecSummaryCollectorDisabled, store.clone(), Arc::new(EvalConfig::default()), vec![columns_info[0].clone(), columns_info[1].clone()], key_ranges, true, false, ) .unwrap(); let mut result = executor.next_batch(10); assert!(result.is_drained.as_ref().unwrap()); assert_eq!(result.data.columns_len(), 2); assert_eq!(result.data.rows_len(), 3); assert!(result.data[0].is_raw()); result.data[0].decode(&Tz::utc(), &schema[0]).unwrap(); assert_eq!( result.data[0].decoded().as_int_slice(), &[Some(5), Some(5), Some(-5)] ); assert!(result.data[1].is_raw()); result.data[1].decode(&Tz::utc(), &schema[1]).unwrap(); assert_eq!( result.data[1].decoded().as_real_slice(), &[Some(10.5), Some(5.1), Some(0.3)] ); } { // Case 1.2. Normal index, with PK, scan index prefix. let key_ranges = vec![{ let mut range = KeyRange::new(); let start_data = datum::encode_key(&[Datum::I64(2)]).unwrap(); let start_key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &start_data); range.set_start(start_key); let end_data = datum::encode_key(&[Datum::I64(6)]).unwrap(); let end_key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &end_data); range.set_end(end_key); range }]; let mut executor = BatchIndexScanExecutor::new( ExecSummaryCollectorDisabled, store.clone(), Arc::new(EvalConfig::default()), vec![ columns_info[0].clone(), columns_info[1].clone(), columns_info[2].clone(), ], key_ranges, false, false, ) .unwrap(); let mut result = executor.next_batch(10); assert!(result.is_drained.as_ref().unwrap()); assert_eq!(result.data.columns_len(), 3); assert_eq!(result.data.rows_len(), 2); assert!(result.data[0].is_raw()); result.data[0].decode(&Tz::utc(), &schema[0]).unwrap(); assert_eq!(result.data[0].decoded().as_int_slice(), &[Some(5), Some(5)]); assert!(result.data[1].is_raw()); result.data[1].decode(&Tz::utc(), &schema[1]).unwrap(); assert_eq!( result.data[1].decoded().as_real_slice(), &[Some(5.1), Some(10.5)] ); assert!(result.data[2].is_decoded()); assert_eq!(result.data[2].decoded().as_int_slice(), &[Some(5), Some(2)]); } // Case 2. Unique index. // For a unique index, the PK handle is stored in the value. let store = { let kv = data .iter() .map(|datums| { let index_data = datum::encode_key(&datums[0..2]).unwrap(); let key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &index_data); let key = Key::from_raw(key.as_slice()); // PK handle in the value let mut value = vec![]; value .write_i64::<BigEndian>(datums[2].as_int().unwrap().unwrap()) .unwrap(); (key, Ok(value)) }) .collect(); FixtureStore::new(kv) }; { // Case 2.1. Unique index, prefix range scan. let key_ranges = vec![{ let mut range = KeyRange::new(); let start_data = datum::encode_key(&[Datum::I64(5)]).unwrap(); let start_key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &start_data); range.set_start(start_key); range.set_end(range.get_start().to_vec()); convert_to_prefix_next(range.mut_end()); range }]; let mut executor = BatchIndexScanExecutor::new( ExecSummaryCollectorDisabled, store.clone(), Arc::new(EvalConfig::default()), vec![ columns_info[0].clone(), columns_info[1].clone(), columns_info[2].clone(), ], key_ranges, false, false, ) .unwrap(); let mut result = executor.next_batch(10); assert!(result.is_drained.as_ref().unwrap()); assert_eq!(result.data.columns_len(), 3); assert_eq!(result.data.rows_len(), 2); assert!(result.data[0].is_raw()); result.data[0].decode(&Tz::utc(), &schema[0]).unwrap(); assert_eq!(result.data[0].decoded().as_int_slice(), &[Some(5), Some(5)]); assert!(result.data[1].is_raw()); result.data[1].decode(&Tz::utc(), &schema[1]).unwrap(); assert_eq!( result.data[1].decoded().as_real_slice(), &[Some(5.1), Some(10.5)] ); assert!(result.data[2].is_decoded()); assert_eq!(result.data[2].decoded().as_int_slice(), &[Some(5), Some(2)]); } { // Case 2.2. Unique index, point scan. let key_ranges = vec![{ let mut range = KeyRange::new(); let start_data = datum::encode_key(&[Datum::I64(5), Datum::F64(5.1)]).unwrap(); let start_key = table::encode_index_seek_key(TABLE_ID, INDEX_ID, &start_data); range.set_start(start_key); range.set_end(range.get_start().to_vec()); convert_to_prefix_next(range.mut_end()); range }]; let mut executor = BatchIndexScanExecutor::new( ExecSummaryCollectorDisabled, store.clone(), Arc::new(EvalConfig::default()), vec![ columns_info[0].clone(), columns_info[1].clone(), columns_info[2].clone(), ], key_ranges, false, true, ) .unwrap(); let mut result = executor.next_batch(10); assert!(result.is_drained.as_ref().unwrap()); assert_eq!(result.data.columns_len(), 3); assert_eq!(result.data.rows_len(), 1); assert!(result.data[0].is_raw()); result.data[0].decode(&Tz::utc(), &schema[0]).unwrap(); assert_eq!(result.data[0].decoded().as_int_slice(), &[Some(5)]); assert!(result.data[1].is_raw()); result.data[1].decode(&Tz::utc(), &schema[1]).unwrap(); assert_eq!(result.data[1].decoded().as_real_slice(), &[Some(5.1)]); assert!(result.data[2].is_decoded()); assert_eq!(result.data[2].decoded().as_int_slice(), &[Some(5)]); } } }
collect_statistics
test_decomp.py
""" Test functions for linalg.decomp module """ from __future__ import division, print_function, absolute_import __usage__ = """ Build linalg: python setup_linalg.py build Run tests if scipy is installed: python -c 'import scipy;scipy.linalg.test()' Run tests if linalg is not installed: python tests/test_decomp.py """ import numpy as np from numpy.testing import (TestCase, assert_equal, assert_almost_equal, assert_array_almost_equal, assert_array_equal, assert_raises, assert_, assert_allclose, run_module_suite, dec) from scipy._lib.six import xrange from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr, schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq, eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth, ordqz) from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \ dsbev, dsbevd, dsbevx, zhbevd, zhbevx from scipy.linalg.misc import norm from numpy import array, transpose, sometrue, diag, ones, linalg, \ argsort, zeros, arange, float32, complex64, dot, conj, identity, \ ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \ asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\ triu, tril from numpy.random import normal, seed, random from scipy.linalg._testutils import assert_no_overwrite # digit precision to use in asserts for different types DIGITS = {'d':11, 'D':11, 'f':4, 'F':4} # XXX: This function should be available through numpy.testing def assert_dtype_equal(act, des): if isinstance(act, ndarray): act = act.dtype else: act = dtype(act) if isinstance(des, ndarray): des = des.dtype else: des = dtype(des) assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des)) # XXX: This function should not be defined here, but somewhere in # scipy.linalg namespace def symrand(dim_or_eigv): """Return a random symmetric (Hermitian) matrix. If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues uniformly distributed on (-1,1). If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose eigenvalues are 'a'. """ if isinstance(dim_or_eigv, int): dim = dim_or_eigv d = random(dim)*2 - 1 elif (isinstance(dim_or_eigv, ndarray) and len(dim_or_eigv.shape) == 1): dim = dim_or_eigv.shape[0] d = dim_or_eigv else: raise TypeError("input type not supported.") v = random_rot(dim) h = dot(dot(v.T.conj(), diag(d)), v) # to avoid roundoff errors, symmetrize the matrix (again) h = 0.5*(h.T+h) return h # XXX: This function should not be defined here, but somewhere in # scipy.linalg namespace def random_rot(dim): """Return a random rotation matrix, drawn from the Haar distribution (the only uniform distribution on SO(n)). The algorithm is described in the paper Stewart, G.W., 'The efficient generation of random orthogonal matrices with an application to condition estimators', SIAM Journal on Numerical Analysis, 17(3), pp. 403-409, 1980. For more information see http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization""" H = eye(dim) D = ones((dim,)) for n in range(1, dim): x = normal(size=(dim-n+1,)) D[n-1] = sign(x[0]) x[0] -= D[n-1]*sqrt((x*x).sum()) # Householder transformation Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum() mat = eye(dim) mat[n-1:,n-1:] = Hx H = dot(H, mat) # Fix the last sign such that the determinant is 1 D[-1] = -D.prod() H = (D*H.T).T return H class TestEigVals(TestCase): def test_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] w = eigvals(a) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] assert_array_almost_equal(w,exact_w) def test_simple_tr(self): a = array([[1,2,3],[1,2,3],[2,5,6]],'d') a = transpose(a).copy() a = transpose(a) w = eigvals(a) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] assert_array_almost_equal(w,exact_w) def test_simple_complex(self): a = [[1,2,3],[1,2,3],[2,5,6+1j]] w = eigvals(a) exact_w = [(9+1j+sqrt(92+6j))/2, 0, (9+1j-sqrt(92+6j))/2] assert_array_almost_equal(w,exact_w) def test_check_finite(self): a = [[1,2,3],[1,2,3],[2,5,6]] w = eigvals(a, check_finite=False) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] assert_array_almost_equal(w,exact_w) class TestEig(object): def test_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] w,v = eig(a) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] v0 = array([1,1,(1+sqrt(93)/3)/2]) v1 = array([3.,0,-1]) v2 = array([1,1,(1-sqrt(93)/3)/2]) v0 = v0 / sqrt(dot(v0,transpose(v0))) v1 = v1 / sqrt(dot(v1,transpose(v1))) v2 = v2 / sqrt(dot(v2,transpose(v2))) assert_array_almost_equal(w,exact_w) assert_array_almost_equal(v0,v[:,0]*sign(v[0,0])) assert_array_almost_equal(v1,v[:,1]*sign(v[0,1])) assert_array_almost_equal(v2,v[:,2]*sign(v[0,2])) for i in range(3): assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) w,v = eig(a,left=1,right=0) for i in range(3): assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i]) def test_simple_complex_eig(self): a = [[1,2],[-2,1]] w,vl,vr = eig(a,left=1,right=1) assert_array_almost_equal(w, array([1+2j, 1-2j])) for i in range(2): assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) for i in range(2): assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), conjugate(w[i])*vl[:,i]) def test_simple_complex(self): a = [[1,2,3],[1,2,3],[2,5,6+1j]] w,vl,vr = eig(a,left=1,right=1) for i in range(3): assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) for i in range(3): assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), conjugate(w[i])*vl[:,i]) def _check_gen_eig(self, A, B): A, B = asarray(A), asarray(B) msg = "\n%r\n%r" % (A, B) w, vr = eig(A,B) wt = eigvals(A,B) val1 = dot(A, vr) val2 = dot(B, vr) * w res = val1 - val2 for i in range(res.shape[1]): if all(isfinite(res[:, i])): assert_array_almost_equal(res[:, i], 0, err_msg=msg) assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]), err_msg=msg) length = np.empty(len(vr)) for i in xrange(len(vr)): length[i] = norm(vr[:, i]) assert_array_almost_equal(length, np.ones(length.size), err_msg=msg) @dec.knownfailureif(True, "See gh-2254.") def test_singular(self): # Example taken from # http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34], [27,31,26,21,15], [38,44,44,24,30])) B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25], [16,25,27,14,23], [24,35,18,21,22])) olderr = np.seterr(all='ignore') try: self._check_gen_eig(A, B) finally: np.seterr(**olderr) def test_falker(self): """Test matrices giving some Nan generalized eigen values.""" M = diag(array(([1,0,3]))) K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2])) D = array(([1,-1,0],[-1,1,0],[0,0,0])) Z = zeros((3,3)) I = identity(3) A = bmat([[I,Z],[Z,-K]]) B = bmat([[Z,I],[M,D]]) olderr = np.seterr(all='ignore') try: self._check_gen_eig(A, B) finally: np.seterr(**olderr) def test_bad_geneig(self): # Ticket #709 (strange return values from DGGEV) def matrices(omega): c1 = -9 + omega**2 c2 = 2*omega A = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, c1, 0], [0, 0, 0, c1]] B = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, -c2], [0, 1, c2, 0]] return A, B # With a buggy LAPACK, this can fail for different omega on different # machines -- so we need to test several values olderr = np.seterr(all='ignore') try: for k in xrange(100): A, B = matrices(omega=k*5./100) self._check_gen_eig(A, B) finally: np.seterr(**olderr) def test_check_finite(self): a = [[1,2,3],[1,2,3],[2,5,6]] w,v = eig(a, check_finite=False) exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2] v0 = array([1,1,(1+sqrt(93)/3)/2]) v1 = array([3.,0,-1]) v2 = array([1,1,(1-sqrt(93)/3)/2]) v0 = v0 / sqrt(dot(v0,transpose(v0))) v1 = v1 / sqrt(dot(v1,transpose(v1))) v2 = v2 / sqrt(dot(v2,transpose(v2))) assert_array_almost_equal(w,exact_w) assert_array_almost_equal(v0,v[:,0]*sign(v[0,0])) assert_array_almost_equal(v1,v[:,1]*sign(v[0,1])) assert_array_almost_equal(v2,v[:,2]*sign(v[0,2])) for i in range(3): assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) def test_not_square_error(self): """Check that passing a non-square array raises a ValueError.""" A = np.arange(6).reshape(3,2) assert_raises(ValueError, eig, A) def test_shape_mismatch(self): """Check that passing arrays of with different shapes raises a ValueError.""" A = identity(2) B = np.arange(9.0).reshape(3,3) assert_raises(ValueError, eig, A, B) assert_raises(ValueError, eig, B, A) class TestEigBanded(TestCase): def __init__(self, *args): TestCase.__init__(self, *args) self.create_bandmat() def create_bandmat(self): """Create the full matrix `self.fullmat` and the corresponding band matrix `self.bandmat`.""" N = 10 self.KL = 2 # number of subdiagonals (below the diagonal) self.KU = 2 # number of superdiagonals (above the diagonal) # symmetric band matrix self.sym_mat = (diag(1.0*ones(N)) + diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1) + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # hermitian band matrix self.herm_mat = (diag(-1.0*ones(N)) + 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1) + diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # general real band matrix self.real_mat = (diag(1.0*ones(N)) + diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1) + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # general complex band matrix self.comp_mat = (1j*diag(1.0*ones(N)) + diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1) + diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2)) # Eigenvalues and -vectors from linalg.eig ew, ev = linalg.eig(self.sym_mat) ew = ew.real args = argsort(ew) self.w_sym_lin = ew[args] self.evec_sym_lin = ev[:,args] ew, ev = linalg.eig(self.herm_mat) ew = ew.real args = argsort(ew) self.w_herm_lin = ew[args] self.evec_herm_lin = ev[:,args] # Extract upper bands from symmetric and hermitian band matrices # (for use in dsbevd, dsbevx, zhbevd, zhbevx # and their single precision versions) LDAB = self.KU + 1 self.bandmat_sym = zeros((LDAB, N), dtype=float) self.bandmat_herm = zeros((LDAB, N), dtype=complex) for i in xrange(LDAB): self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i) self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i) # Extract bands from general real and complex band matrix # (for use in dgbtrf, dgbtrs and their single precision versions) LDAB = 2*self.KL + self.KU + 1 self.bandmat_real = zeros((LDAB, N), dtype=float) self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal for i in xrange(self.KL): # superdiagonals self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1) # subdiagonals self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1) self.bandmat_comp = zeros((LDAB, N), dtype=complex) self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal for i in xrange(self.KL): # superdiagonals self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1) # subdiagonals self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1) # absolute value for linear equation system A*x = b self.b = 1.0*arange(N) self.bc = self.b * (1 + 1j) ##################################################################### def test_dsbev(self): """Compare dsbev eigenvalues and eigenvectors with the result of linalg.eig.""" w, evec, info = dsbev(self.bandmat_sym, compute_v=1) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_sym_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) def test_dsbevd(self): """Compare dsbevd eigenvalues and eigenvectors with the result of linalg.eig.""" w, evec, info = dsbevd(self.bandmat_sym, compute_v=1) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_sym_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) def test_dsbevx(self): """Compare dsbevx eigenvalues and eigenvectors with the result of linalg.eig.""" N,N = shape(self.sym_mat) ## Achtung: Argumente 0.0,0.0,range? w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N, compute_v=1, range=2) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_sym_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) def test_zhbevd(self): """Compare zhbevd eigenvalues and eigenvectors with the result of linalg.eig.""" w, evec, info = zhbevd(self.bandmat_herm, compute_v=1) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_herm_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) def test_zhbevx(self): """Compare zhbevx eigenvalues and eigenvectors with the result of linalg.eig.""" N,N = shape(self.herm_mat) ## Achtung: Argumente 0.0,0.0,range? w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N, compute_v=1, range=2) evec_ = evec[:,argsort(w)] assert_array_almost_equal(sort(w), self.w_herm_lin) assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) def test_eigvals_banded(self): """Compare eigenvalues of eigvals_banded with those of linalg.eig.""" w_sym = eigvals_banded(self.bandmat_sym) w_sym = w_sym.real assert_array_almost_equal(sort(w_sym), self.w_sym_lin) w_herm = eigvals_banded(self.bandmat_herm) w_herm = w_herm.real assert_array_almost_equal(sort(w_herm), self.w_herm_lin) # extracting eigenvalues with respect to an index range ind1 = 2 ind2 = 6 w_sym_ind = eigvals_banded(self.bandmat_sym, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_sym_ind), self.w_sym_lin[ind1:ind2+1]) w_herm_ind = eigvals_banded(self.bandmat_herm, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_herm_ind), self.w_herm_lin[ind1:ind2+1]) # extracting eigenvalues with respect to a value range v_lower = self.w_sym_lin[ind1] - 1.0e-5 v_upper = self.w_sym_lin[ind2] + 1.0e-5 w_sym_val = eigvals_banded(self.bandmat_sym, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_sym_val), self.w_sym_lin[ind1:ind2+1]) v_lower = self.w_herm_lin[ind1] - 1.0e-5 v_upper = self.w_herm_lin[ind2] + 1.0e-5 w_herm_val = eigvals_banded(self.bandmat_herm, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_herm_val), self.w_herm_lin[ind1:ind2+1]) w_sym = eigvals_banded(self.bandmat_sym, check_finite=False) w_sym = w_sym.real assert_array_almost_equal(sort(w_sym), self.w_sym_lin) def test_eig_banded(self): """Compare eigenvalues and eigenvectors of eig_banded with those of linalg.eig. """ w_sym, evec_sym = eig_banded(self.bandmat_sym) evec_sym_ = evec_sym[:,argsort(w_sym.real)] assert_array_almost_equal(sort(w_sym), self.w_sym_lin) assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) w_herm, evec_herm = eig_banded(self.bandmat_herm) evec_herm_ = evec_herm[:,argsort(w_herm.real)] assert_array_almost_equal(sort(w_herm), self.w_herm_lin) assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin)) # extracting eigenvalues with respect to an index range ind1 = 2 ind2 = 6 w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_sym_ind), self.w_sym_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_sym_ind), abs(self.evec_sym_lin[:,ind1:ind2+1])) w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm, select='i', select_range=(ind1, ind2)) assert_array_almost_equal(sort(w_herm_ind), self.w_herm_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_herm_ind), abs(self.evec_herm_lin[:,ind1:ind2+1])) # extracting eigenvalues with respect to a value range v_lower = self.w_sym_lin[ind1] - 1.0e-5 v_upper = self.w_sym_lin[ind2] + 1.0e-5 w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_sym_val), self.w_sym_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_sym_val), abs(self.evec_sym_lin[:,ind1:ind2+1])) v_lower = self.w_herm_lin[ind1] - 1.0e-5 v_upper = self.w_herm_lin[ind2] + 1.0e-5 w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm, select='v', select_range=(v_lower, v_upper)) assert_array_almost_equal(sort(w_herm_val), self.w_herm_lin[ind1:ind2+1]) assert_array_almost_equal(abs(evec_herm_val), abs(self.evec_herm_lin[:,ind1:ind2+1])) w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False) evec_sym_ = evec_sym[:,argsort(w_sym.real)] assert_array_almost_equal(sort(w_sym), self.w_sym_lin) assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) def test_dgbtrf(self): """Compare dgbtrf LU factorisation with the LU factorisation result of linalg.lu.""" M,N = shape(self.real_mat) lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) # extract matrix u from lu_symm_band u = diag(lu_symm_band[2*self.KL,:]) for i in xrange(self.KL + self.KU): u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0) assert_array_almost_equal(u, u_lin) def test_zgbtrf(self): """Compare zgbtrf LU factorisation with the LU factorisation result of linalg.lu.""" M,N = shape(self.comp_mat) lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) # extract matrix u from lu_symm_band u = diag(lu_symm_band[2*self.KL,:]) for i in xrange(self.KL + self.KU): u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1) p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0) assert_array_almost_equal(u, u_lin) def test_dgbtrs(self): """Compare dgbtrs solutions for linear equation system A*x = b with solutions of linalg.solve.""" lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv) y_lin = linalg.solve(self.real_mat, self.b) assert_array_almost_equal(y, y_lin) def test_zgbtrs(self): """Compare zgbtrs solutions for linear equation system A*x = b with solutions of linalg.solve.""" lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv) y_lin = linalg.solve(self.comp_mat, self.bc) assert_array_almost_equal(y, y_lin) def test_eigh(): DIM = 6 v = {'dim': (DIM,), 'dtype': ('f','d','F','D'), 'overwrite': (True, False), 'lower': (True, False), 'turbo': (True, False), 'eigvals': (None, (2, DIM-2))} for dim in v['dim']: for typ in v['dtype']: for overwrite in v['overwrite']: for turbo in v['turbo']: for eigenvalues in v['eigvals']: for lower in v['lower']: yield (eigenhproblem_standard, 'ordinary', dim, typ, overwrite, lower, turbo, eigenvalues) yield (eigenhproblem_general, 'general ', dim, typ, overwrite, lower, turbo, eigenvalues) def test_eigh_of_sparse(): # This tests the rejection of inputs that eigh cannot currently handle. import scipy.sparse a = scipy.sparse.identity(2).tocsc() b = np.atleast_2d(a) assert_raises(ValueError, eigh, a) assert_raises(ValueError, eigh, b) def _complex_symrand(dim, dtype): a1, a2 = symrand(dim), symrand(dim) # add antisymmetric matrix as imag part a = a1 + 1j*(triu(a2)-tril(a2)) return a.astype(dtype) def eigenhproblem_standard(desc, dim, dtype, overwrite, lower, turbo, eigenvalues): """Solve a standard eigenvalue problem.""" if iscomplex(empty(1, dtype=dtype)): a = _complex_symrand(dim, dtype) else: a = symrand(dim).astype(dtype) if overwrite: a_c = a.copy() else: a_c = a w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigenvalues) assert_dtype_equal(z.dtype, dtype) w = w.astype(dtype) diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real assert_array_almost_equal(diag_, w, DIGITS[dtype]) def eigenhproblem_general(desc, dim, dtype, overwrite, lower, turbo, eigenvalues): """Solve a generalized eigenvalue problem.""" if iscomplex(empty(1, dtype=dtype)): a = _complex_symrand(dim, dtype) b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype) else: a = symrand(dim).astype(dtype) b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype) if overwrite: a_c, b_c = a.copy(), b.copy() else: a_c, b_c = a, b w, z = eigh(a, b, overwrite_a=overwrite, lower=lower, overwrite_b=overwrite, turbo=turbo, eigvals=eigenvalues) assert_dtype_equal(z.dtype, dtype) w = w.astype(dtype) diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real assert_array_almost_equal(diag1_, w, DIGITS[dtype]) diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype]) def test_eigh_integer(): a = array([[1,2],[2,7]]) b = array([[3,1],[1,5]]) w,z = eigh(a) w,z = eigh(a,b) class TestLU(TestCase): def __init__(self, *args, **kw): TestCase.__init__(self, *args, **kw) self.a = array([[1,2,3],[1,2,3],[2,5,6]]) self.ca = array([[1,2,3],[1,2,3],[2,5j,6]]) # Those matrices are more robust to detect problems in permutation # matrices than the ones above self.b = array([[1,2,3],[4,5,6],[7,8,9]]) self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]]) # Reectangular matrices self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]]) self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]]) # Medium sizes matrices self.med = random((30, 40)) self.cmed = random((30, 40)) + 1.j * random((30, 40)) def _test_common(self, data): p,l,u = lu(data) assert_array_almost_equal(dot(dot(p,l),u),data) pl,u = lu(data,permute_l=1) assert_array_almost_equal(dot(pl,u),data) # Simple tests def test_simple(self): self._test_common(self.a) def test_simple_complex(self): self._test_common(self.ca) def test_simple2(self): self._test_common(self.b) def test_simple2_complex(self): self._test_common(self.cb) # rectangular matrices tests def test_hrectangular(self): self._test_common(self.hrect) def test_vrectangular(self): self._test_common(self.vrect) def test_hrectangular_complex(self): self._test_common(self.chrect) def test_vrectangular_complex(self): self._test_common(self.cvrect) # Bigger matrices def test_medium1(self): """Check lu decomposition on medium size, rectangular matrix.""" self._test_common(self.med) def test_medium1_complex(self): """Check lu decomposition on medium size, rectangular matrix.""" self._test_common(self.cmed) def test_check_finite(self): p, l, u = lu(self.a, check_finite=False) assert_array_almost_equal(dot(dot(p,l),u), self.a) def test_simple_known(self): # Ticket #1458 for order in ['C', 'F']: A = np.array([[2, 1],[0, 1.]], order=order) LU, P = lu_factor(A) assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]])) assert_array_equal(P, np.array([0, 1])) class TestLUSingle(TestLU): """LU testers for single precision, real and double""" def __init__(self, *args, **kw): TestLU.__init__(self, *args, **kw) self.a = self.a.astype(float32) self.ca = self.ca.astype(complex64) self.b = self.b.astype(float32) self.cb = self.cb.astype(complex64) self.hrect = self.hrect.astype(float32) self.chrect = self.hrect.astype(complex64) self.vrect = self.vrect.astype(float32) self.cvrect = self.vrect.astype(complex64) self.med = self.vrect.astype(float32) self.cmed = self.vrect.astype(complex64) class TestLUSolve(TestCase): def setUp(self): seed(1234) def test_lu(self): a0 = random((10,10)) b = random((10,)) for order in ['C', 'F']: a = np.array(a0, order=order) x1 = solve(a,b) lu_a = lu_factor(a) x2 = lu_solve(lu_a,b) assert_array_almost_equal(x1,x2) def test_check_finite(self): a = random((10,10)) b = random((10,)) x1 = solve(a,b) lu_a = lu_factor(a, check_finite=False) x2 = lu_solve(lu_a,b, check_finite=False) assert_array_almost_equal(x1,x2) class TestSVD_GESDD(TestCase): def setUp(self): self.lapack_driver = 'gesdd' seed(1234) def test_degenerate(self): assert_raises(TypeError, svd, [[1.]], lapack_driver=1.) assert_raises(ValueError, svd, [[1.]], lapack_driver='foo') def test_simple(self): a = [[1,2,3],[1,20,3],[2,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(3)) assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_singular(self): a = [[1,2,3],[1,2,3],[2,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(3)) assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_underdet(self): a = [[1,2,3],[4,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver)
for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_overdet(self): a = [[1,2],[4,5],[3,4]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1])) assert_array_almost_equal(dot(transpose(vh),vh),identity(2)) sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_random(self): n = 20 m = 15 for i in range(3): for a in [random([n,m]),random([m,n])]: for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1])) assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0])) sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_simple_complex(self): a = [[1,2,3],[1,2j,3],[2,5,6]] for full_matrices in (True, False): u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0])) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_random_complex(self): n = 20 m = 15 for i in range(3): for full_matrices in (True, False): for a in [random([n,m]),random([m,n])]: a = a + 1j*random(list(a.shape)) u,s,vh = svd(a, full_matrices=full_matrices, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1])) # This fails when [m,n] # assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char)) sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_crash_1580(self): sizes = [(13, 23), (30, 50), (60, 100)] np.random.seed(1234) for sz in sizes: for dt in [np.float32, np.float64, np.complex64, np.complex128]: a = np.random.rand(*sz).astype(dt) # should not crash svd(a, lapack_driver=self.lapack_driver) def test_check_finite(self): a = [[1,2,3],[1,20,3],[2,5,6]] u,s,vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver) assert_array_almost_equal(dot(transpose(u),u),identity(3)) assert_array_almost_equal(dot(transpose(vh),vh),identity(3)) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char) for i in range(len(s)): sigma[i,i] = s[i] assert_array_almost_equal(dot(dot(u,sigma),vh),a) def test_gh_5039(self): # This is a smoke test for https://github.com/scipy/scipy/issues/5039 # # The following is reported to raise "ValueError: On entry to DGESDD # parameter number 12 had an illegal value". # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')` # This is reported to only show up on LAPACK 3.0.3. # # The matrix below is taken from the call to # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest b = np.array( [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.], [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.], [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.], [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]]) svd(b, lapack_driver=self.lapack_driver) class TestSVD_GESVD(TestSVD_GESDD): def setUp(self): self.lapack_driver = 'gesvd' seed(1234) class TestSVDVals(TestCase): def test_empty(self): for a in [[]], np.empty((2, 0)), np.ones((0, 3)): s = svdvals(a) assert_equal(s, np.empty(0)) def test_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] s = svdvals(a) assert_(len(s) == 3) assert_(s[0] >= s[1] >= s[2]) def test_simple_underdet(self): a = [[1,2,3],[4,5,6]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_simple_overdet(self): a = [[1,2],[4,5],[3,4]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_simple_complex(self): a = [[1,2,3],[1,20,3j],[2,5,6]] s = svdvals(a) assert_(len(s) == 3) assert_(s[0] >= s[1] >= s[2]) def test_simple_underdet_complex(self): a = [[1,2,3],[4,5j,6]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_simple_overdet_complex(self): a = [[1,2],[4,5],[3j,4]] s = svdvals(a) assert_(len(s) == 2) assert_(s[0] >= s[1]) def test_check_finite(self): a = [[1,2,3],[1,2,3],[2,5,6]] s = svdvals(a, check_finite=False) assert_(len(s) == 3) assert_(s[0] >= s[1] >= s[2]) @dec.slow def test_crash_2609(self): np.random.seed(1234) a = np.random.rand(1500, 2800) # Shouldn't crash: svdvals(a) class TestDiagSVD(TestCase): def test_simple(self): assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]]) class TestQR(TestCase): def setUp(self): seed(1234) def test_simple(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_simple_left(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a) c = [1, 2, 3] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) qc,r2 = qr_multiply(a, identity(3), "left") assert_array_almost_equal(q, qc) def test_simple_right(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a) c = [1, 2, 3] qc,r2 = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), qc) assert_array_almost_equal(r, r2) qc,r = qr_multiply(a, identity(3)) assert_array_almost_equal(q, qc) def test_simple_pivoting(self): a = np.asarray([[8,2,3],[2,9,3],[5,3,6]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_left_pivoting(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3] qc,r,jpvt = qr_multiply(a, c, "left", True) assert_array_almost_equal(dot(q, c), qc) def test_simple_right_pivoting(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3] qc,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), qc) def test_simple_trap(self): a = [[8,2,3],[2,9,3]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) def test_simple_trap_pivoting(self): a = np.asarray([[8,2,3],[2,9,3]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_tall(self): # full version a = [[8,2],[2,9],[5,3]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_simple_tall_pivoting(self): # full version pivoting a = np.asarray([[8,2],[2,9],[5,3]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_tall_e(self): # economy version a = [[8,2],[2,9],[5,3]] q,r = qr(a, mode='economic') assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (3,2)) assert_equal(r.shape, (2,2)) def test_simple_tall_e_pivoting(self): # economy version pivoting a = np.asarray([[8,2],[2,9],[5,3]]) q,r,p = qr(a, pivoting=True, mode='economic') d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p], mode='economic') assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_tall_left(self): a = [[8,2],[2,9],[5,3]] q,r = qr(a, mode="economic") c = [1, 2] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) c = array([1,2,0]) qc,r2 = qr_multiply(a, c, "left", overwrite_c=True) assert_array_almost_equal(dot(q, c[:2]), qc) qc,r = qr_multiply(a, identity(2), "left") assert_array_almost_equal(qc, q) def test_simple_tall_left_pivoting(self): a = [[8,2],[2,9],[5,3]] q,r,jpvt = qr(a, mode="economic", pivoting=True) c = [1, 2] qc,r,kpvt = qr_multiply(a, c, "left", True) assert_array_equal(jpvt, kpvt) assert_array_almost_equal(dot(q, c), qc) qc,r,jpvt = qr_multiply(a, identity(2), "left", True) assert_array_almost_equal(qc, q) def test_simple_tall_right(self): a = [[8,2],[2,9],[5,3]] q,r = qr(a, mode="economic") c = [1, 2, 3] cq,r2 = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) assert_array_almost_equal(r, r2) cq,r = qr_multiply(a, identity(3)) assert_array_almost_equal(cq, q) def test_simple_tall_right_pivoting(self): a = [[8,2],[2,9],[5,3]] q,r,jpvt = qr(a, pivoting=True, mode="economic") c = [1, 2, 3] cq,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), cq) cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True) assert_array_almost_equal(cq, q) def test_simple_fat(self): # full version a = [[8,2,5],[2,9,3]] q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) def test_simple_fat_pivoting(self): # full version pivoting a = np.asarray([[8,2,5],[2,9,3]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_fat_e(self): # economy version a = [[8,2,3],[2,9,5]] q,r = qr(a, mode='economic') assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) def test_simple_fat_e_pivoting(self): # economy version pivoting a = np.asarray([[8,2,3],[2,9,5]]) q,r,p = qr(a, pivoting=True, mode='economic') d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(q,r),a[:,p]) assert_equal(q.shape, (2,2)) assert_equal(r.shape, (2,3)) q2,r2 = qr(a[:,p], mode='economic') assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_fat_left(self): a = [[8,2,3],[2,9,5]] q,r = qr(a, mode="economic") c = [1, 2] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) qc,r = qr_multiply(a, identity(2), "left") assert_array_almost_equal(qc, q) def test_simple_fat_left_pivoting(self): a = [[8,2,3],[2,9,5]] q,r,jpvt = qr(a, mode="economic", pivoting=True) c = [1, 2] qc,r,jpvt = qr_multiply(a, c, "left", True) assert_array_almost_equal(dot(q, c), qc) qc,r,jpvt = qr_multiply(a, identity(2), "left", True) assert_array_almost_equal(qc, q) def test_simple_fat_right(self): a = [[8,2,3],[2,9,5]] q,r = qr(a, mode="economic") c = [1, 2] cq,r2 = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) assert_array_almost_equal(r, r2) cq,r = qr_multiply(a, identity(2)) assert_array_almost_equal(cq, q) def test_simple_fat_right_pivoting(self): a = [[8,2,3],[2,9,5]] q,r,jpvt = qr(a, pivoting=True, mode="economic") c = [1, 2] cq,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), cq) cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True) assert_array_almost_equal(cq, q) def test_simple_complex(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_simple_complex_left(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(3), "left") assert_array_almost_equal(q, qc) def test_simple_complex_right(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), qc) qc,r = qr_multiply(a, identity(3)) assert_array_almost_equal(q, qc) def test_simple_tall_complex_left(self): a = [[8,2+3j],[2,9],[5+7j,3]] q,r = qr(a, mode="economic") c = [1, 2+2j] qc,r2 = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) assert_array_almost_equal(r, r2) c = array([1,2,0]) qc,r2 = qr_multiply(a, c, "left", overwrite_c=True) assert_array_almost_equal(dot(q, c[:2]), qc) qc,r = qr_multiply(a, identity(2), "left") assert_array_almost_equal(qc, q) def test_simple_complex_left_conjugate(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c, "left", conjugate=True) assert_array_almost_equal(dot(q.conjugate(), c), qc) def test_simple_complex_tall_left_conjugate(self): a = [[3,3+4j],[5,2+2j],[3,2]] q,r = qr(a, mode='economic') c = [1, 3+4j] qc,r = qr_multiply(a, c, "left", conjugate=True) assert_array_almost_equal(dot(q.conjugate(), c), qc) def test_simple_complex_right_conjugate(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] q,r = qr(a) c = [1, 2, 3+4j] qc,r = qr_multiply(a, c, conjugate=True) assert_array_almost_equal(dot(c, q.conjugate()), qc) def test_simple_complex_pivoting(self): a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_simple_complex_left_pivoting(self): a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3+4j] qc,r,jpvt = qr_multiply(a, c, "left", True) assert_array_almost_equal(dot(q, c), qc) def test_simple_complex_right_pivoting(self): a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]]) q,r,jpvt = qr(a, pivoting=True) c = [1, 2, 3+4j] qc,r,jpvt = qr_multiply(a, c, pivoting=True) assert_array_almost_equal(dot(c, q), qc) def test_random(self): n = 20 for k in range(2): a = random([n,n]) q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a) def test_random_left(self): n = 20 for k in range(2): a = random([n,n]) q,r = qr(a) c = random([n]) qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(n), "left") assert_array_almost_equal(q, qc) def test_random_right(self): n = 20 for k in range(2): a = random([n,n]) q,r = qr(a) c = random([n]) cq,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) cq,r = qr_multiply(a, identity(n)) assert_array_almost_equal(q, cq) def test_random_pivoting(self): n = 20 for k in range(2): a = random([n,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_tall(self): # full version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a) def test_random_tall_left(self): # full version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a, mode="economic") c = random([n]) qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(n), "left") assert_array_almost_equal(qc, q) def test_random_tall_right(self): # full version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a, mode="economic") c = random([m]) cq,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) cq,r = qr_multiply(a, identity(m)) assert_array_almost_equal(cq, q) def test_random_tall_pivoting(self): # full version pivoting m = 200 n = 100 for k in range(2): a = random([m,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_tall_e(self): # economy version m = 200 n = 100 for k in range(2): a = random([m,n]) q,r = qr(a, mode='economic') assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a) assert_equal(q.shape, (m,n)) assert_equal(r.shape, (n,n)) def test_random_tall_e_pivoting(self): # economy version pivoting m = 200 n = 100 for k in range(2): a = random([m,n]) q,r,p = qr(a, pivoting=True, mode='economic') d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(n)) assert_array_almost_equal(dot(q,r),a[:,p]) assert_equal(q.shape, (m,n)) assert_equal(r.shape, (n,n)) q2,r2 = qr(a[:,p], mode='economic') assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_trap(self): m = 100 n = 200 for k in range(2): a = random([m,n]) q,r = qr(a) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a) def test_random_trap_pivoting(self): m = 100 n = 200 for k in range(2): a = random([m,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(transpose(q),q),identity(m)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_random_complex(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r = qr(a) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) assert_array_almost_equal(dot(q,r),a) def test_random_complex_left(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r = qr(a) c = random([n])+1j*random([n]) qc,r = qr_multiply(a, c, "left") assert_array_almost_equal(dot(q, c), qc) qc,r = qr_multiply(a, identity(n), "left") assert_array_almost_equal(q, qc) def test_random_complex_right(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r = qr(a) c = random([n])+1j*random([n]) cq,r = qr_multiply(a, c) assert_array_almost_equal(dot(c, q), cq) cq,r = qr_multiply(a, identity(n)) assert_array_almost_equal(q, cq) def test_random_complex_pivoting(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) q,r,p = qr(a, pivoting=True) d = abs(diag(r)) assert_(all(d[1:] <= d[:-1])) assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n)) assert_array_almost_equal(dot(q,r),a[:,p]) q2,r2 = qr(a[:,p]) assert_array_almost_equal(q,q2) assert_array_almost_equal(r,r2) def test_check_finite(self): a = [[8,2,3],[2,9,3],[5,3,6]] q,r = qr(a, check_finite=False) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(q,r),a) def test_lwork(self): a = [[8,2,3],[2,9,3],[5,3,6]] # Get comparison values q,r = qr(a, lwork=None) # Test against minimum valid lwork q2,r2 = qr(a, lwork=3) assert_array_almost_equal(q2,q) assert_array_almost_equal(r2,r) # Test against larger lwork q3,r3 = qr(a, lwork=10) assert_array_almost_equal(q3,q) assert_array_almost_equal(r3,r) # Test against explicit lwork=-1 q4,r4 = qr(a, lwork=-1) assert_array_almost_equal(q4,q) assert_array_almost_equal(r4,r) # Test against invalid lwork assert_raises(Exception, qr, (a,), {'lwork':0}) assert_raises(Exception, qr, (a,), {'lwork':2}) class TestRQ(TestCase): def setUp(self): seed(1234) def test_simple(self): a = [[8,2,3],[2,9,3],[5,3,6]] r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_r(self): a = [[8,2,3],[2,9,3],[5,3,6]] r,q = rq(a) r2 = rq(a, mode='r') assert_array_almost_equal(r, r2) def test_random(self): n = 20 for k in range(2): a = random([n,n]) r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_simple_trap(self): a = [[8,2,3],[2,9,3]] r,q = rq(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_simple_tall(self): a = [[8,2],[2,9],[5,3]] r,q = rq(a) assert_array_almost_equal(dot(transpose(q),q),identity(2)) assert_array_almost_equal(dot(r,q),a) def test_simple_fat(self): a = [[8,2,5],[2,9,3]] r,q = rq(a) assert_array_almost_equal(dot(transpose(q),q),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_simple_complex(self): a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]] r,q = rq(a) assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3)) assert_array_almost_equal(dot(r,q),a) def test_random_tall(self): m = 200 n = 100 for k in range(2): a = random([m,n]) r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_random_trap(self): m = 100 n = 200 for k in range(2): a = random([m,n]) r,q = rq(a) assert_array_almost_equal(dot(q, transpose(q)),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_random_trap_economic(self): m = 100 n = 200 for k in range(2): a = random([m,n]) r,q = rq(a, mode='economic') assert_array_almost_equal(dot(q,transpose(q)),identity(m)) assert_array_almost_equal(dot(r,q),a) assert_equal(q.shape, (m, n)) assert_equal(r.shape, (m, m)) def test_random_complex(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) r,q = rq(a) assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n)) assert_array_almost_equal(dot(r,q),a) def test_random_complex_economic(self): m = 100 n = 200 for k in range(2): a = random([m,n])+1j*random([m,n]) r,q = rq(a, mode='economic') assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m)) assert_array_almost_equal(dot(r,q),a) assert_equal(q.shape, (m, n)) assert_equal(r.shape, (m, m)) def test_check_finite(self): a = [[8,2,3],[2,9,3],[5,3,6]] r,q = rq(a, check_finite=False) assert_array_almost_equal(dot(q, transpose(q)),identity(3)) assert_array_almost_equal(dot(r,q),a) transp = transpose any = sometrue class TestSchur(TestCase): def test_simple(self): a = [[8,12,3],[2,9,3],[10,3,6]] t,z = schur(a) assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) tc,zc = schur(a,'complex') assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc)))) assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a) tc2,zc2 = rsf2csf(tc,zc) assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a) def test_sort(self): a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] s,u,sdim = schur(a,sort='lhp') assert_array_almost_equal([[0.1134,0.5436,0.8316,0.], [-0.1134,-0.8245,0.5544,0.], [-0.8213,0.1308,0.0265,-0.5547], [-0.5475,0.0872,0.0177,0.8321]], u,3) assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174], [0.,-0.5000,9.4472,-0.7184], [0.,0.,1.4142,-0.1456], [0.,0.,0.,0.5]], s,3) assert_equal(2,sdim) s,u,sdim = schur(a,sort='rhp') assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], [-0.4862,0.4930,-0.1434,-0.7071], [0.6042,0.3944,-0.6924,0.], [0.4028,0.5986,0.6924,0.]], u,3) assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], [0.,0.5,6.5809,-3.1870], [0.,0.,-1.4142,0.9270], [0.,0.,0.,-0.5]], s,3) assert_equal(2,sdim) s,u,sdim = schur(a,sort='iuc') assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042], [-0.8321,0.,-0.3814,-0.4028], [0.,0.7071,-0.5134,0.4862], [0.,0.7071,0.5134,-0.4862]], u,3) assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974], [0.,0.5000,-3.3191,-14.4130], [0.,0.,1.4142,2.1573], [0.,0.,0.,-1.4142]], s,3) assert_equal(2,sdim) s,u,sdim = schur(a,sort='ouc') assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.], [-0.4862,0.5134,0.7071,0.], [0.6042,0.5721,0.,-0.5547], [0.4028,0.3814,0.,0.8321]], u,3) assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974], [0.,-1.4142,3.3191,6.5809], [0.,0.,-0.5000,0.], [0.,0.,0.,0.5000]], s,3) assert_equal(2,sdim) rhp_function = lambda x: x >= 0.0 s,u,sdim = schur(a,sort=rhp_function) assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071], [-0.4862,0.4930,-0.1434,-0.7071], [0.6042,0.3944,-0.6924,0.], [0.4028,0.5986,0.6924,0.]], u,3) assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130], [0.,0.5,6.5809,-3.1870], [0.,0.,-1.4142,0.9270], [0.,0.,0.,-0.5]], s,3) assert_equal(2,sdim) def test_sort_errors(self): a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]] assert_raises(ValueError, schur, a, sort='unsupported') assert_raises(ValueError, schur, a, sort=1) def test_check_finite(self): a = [[8,12,3],[2,9,3],[10,3,6]] t,z = schur(a, check_finite=False) assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a) class TestHessenberg(TestCase): def test_simple(self): a = [[-149, -50,-154], [537, 180, 546], [-27, -9, -25]] h1 = [[-149.0000,42.2037,-156.3165], [-537.6783,152.5511,-554.9272], [0,0.0728, 2.4489]] h,q = hessenberg(a,calc_q=1) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) assert_array_almost_equal(h,h1,decimal=4) def test_simple_complex(self): a = [[-149, -50,-154], [537, 180j, 546], [-27j, -9, -25]] h,q = hessenberg(a,calc_q=1) h1 = dot(transp(conj(q)),dot(a,q)) assert_array_almost_equal(h1,h) def test_simple2(self): a = [[1,2,3,4,5,6,7], [0,2,3,4,6,7,2], [0,2,2,3,0,3,2], [0,0,2,8,0,0,2], [0,3,1,2,0,1,2], [0,1,2,3,0,1,0], [0,0,0,0,0,1,2]] h,q = hessenberg(a,calc_q=1) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) def test_simple3(self): a = np.eye(3) a[-1, 0] = 2 h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(dot(transp(q), dot(a, q)), h) def test_random(self): n = 20 for k in range(2): a = random([n,n]) h,q = hessenberg(a,calc_q=1) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) def test_random_complex(self): n = 20 for k in range(2): a = random([n,n])+1j*random([n,n]) h,q = hessenberg(a,calc_q=1) h1 = dot(transp(conj(q)),dot(a,q)) assert_array_almost_equal(h1,h) def test_check_finite(self): a = [[-149, -50,-154], [537, 180, 546], [-27, -9, -25]] h1 = [[-149.0000,42.2037,-156.3165], [-537.6783,152.5511,-554.9272], [0,0.0728, 2.4489]] h,q = hessenberg(a,calc_q=1, check_finite=False) assert_array_almost_equal(dot(transp(q),dot(a,q)),h) assert_array_almost_equal(h,h1,decimal=4) def test_2x2(self): a = [[2, 1], [7, 12]] h, q = hessenberg(a, calc_q=1) assert_array_almost_equal(q, np.eye(2)) assert_array_almost_equal(h, a) b = [[2-7j, 1+2j], [7+3j, 12-2j]] h2, q2 = hessenberg(b, calc_q=1) assert_array_almost_equal(q2, np.eye(2)) assert_array_almost_equal(h2, b) class TestQZ(TestCase): def setUp(self): seed(12345) def test_qz_single(self): n = 5 A = random([n,n]).astype(float32) B = random([n,n]).astype(float32) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) assert_array_almost_equal(dot(Q,Q.T), eye(n)) assert_array_almost_equal(dot(Z,Z.T), eye(n)) assert_(all(diag(BB) >= 0)) def test_qz_double(self): n = 5 A = random([n,n]) B = random([n,n]) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) assert_array_almost_equal(dot(Q,Q.T), eye(n)) assert_array_almost_equal(dot(Z,Z.T), eye(n)) assert_(all(diag(BB) >= 0)) def test_qz_complex(self): n = 5 A = random([n,n]) + 1j*random([n,n]) B = random([n,n]) + 1j*random([n,n]) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B) assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n)) assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n)) assert_(all(diag(BB) >= 0)) assert_(all(diag(BB).imag == 0)) def test_qz_complex64(self): n = 5 A = (random([n,n]) + 1j*random([n,n])).astype(complex64) B = (random([n,n]) + 1j*random([n,n])).astype(complex64) AA,BB,Q,Z = qz(A,B) assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5) assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5) assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5) assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5) assert_(all(diag(BB) >= 0)) assert_(all(diag(BB).imag == 0)) def test_qz_double_complex(self): n = 5 A = random([n,n]) B = random([n,n]) AA,BB,Q,Z = qz(A,B, output='complex') aa = dot(dot(Q,AA),Z.conjugate().T) assert_array_almost_equal(aa.real, A) assert_array_almost_equal(aa.imag, 0) bb = dot(dot(Q,BB),Z.conjugate().T) assert_array_almost_equal(bb.real, B) assert_array_almost_equal(bb.imag, 0) assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n)) assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n)) assert_(all(diag(BB) >= 0)) def test_qz_double_sort(self): # from http://www.nag.com/lapack-ex/node119.html # NOTE: These matrices may be ill-conditioned and lead to a # seg fault on certain python versions when compiled with # sse2 or sse3 older ATLAS/LAPACK binaries for windows # A = np.array([[3.9, 12.5, -34.5, -0.5], # [ 4.3, 21.5, -47.5, 7.5], # [ 4.3, 21.5, -43.5, 3.5], # [ 4.4, 26.0, -46.0, 6.0 ]]) # B = np.array([[ 1.0, 2.0, -3.0, 1.0], # [1.0, 3.0, -5.0, 4.0], # [1.0, 3.0, -4.0, 3.0], # [1.0, 3.0, -4.0, 4.0]]) A = np.array([[3.9, 12.5, -34.5, 2.5], [4.3, 21.5, -47.5, 7.5], [4.3, 1.5, -43.5, 3.5], [4.4, 6.0, -46.0, 6.0]]) B = np.array([[1.0, 1.0, -3.0, 1.0], [1.0, 3.0, -5.0, 4.4], [1.0, 2.0, -4.0, 1.0], [1.2, 3.0, -4.0, 4.0]]) sort = lambda ar,ai,beta: ai == 0 assert_raises(ValueError, qz, A, B, sort=sort) if False: AA,BB,Q,Z,sdim = qz(A,B,sort=sort) # assert_(sdim == 2) assert_(sdim == 4) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) # test absolute values bc the sign is ambiguous and might be platform # dependent assert_array_almost_equal(np.abs(AA), np.abs(np.array( [[35.7864, -80.9061, -12.0629, -9.498], [0., 2.7638, -2.3505, 7.3256], [0., 0., 0.6258, -0.0398], [0., 0., 0., -12.8217]])), 4) assert_array_almost_equal(np.abs(BB), np.abs(np.array( [[4.5324, -8.7878, 3.2357, -3.5526], [0., 1.4314, -2.1894, 0.9709], [0., 0., 1.3126, -0.3468], [0., 0., 0., 0.559]])), 4) assert_array_almost_equal(np.abs(Q), np.abs(np.array( [[-0.4193, -0.605, -0.1894, -0.6498], [-0.5495, 0.6987, 0.2654, -0.3734], [-0.4973, -0.3682, 0.6194, 0.4832], [-0.5243, 0.1008, -0.7142, 0.4526]])), 4) assert_array_almost_equal(np.abs(Z), np.abs(np.array( [[-0.9471, -0.2971, -0.1217, 0.0055], [-0.0367, 0.1209, 0.0358, 0.9913], [0.3171, -0.9041, -0.2547, 0.1312], [0.0346, 0.2824, -0.9587, 0.0014]])), 4) # test absolute values bc the sign is ambiguous and might be platform # dependent # assert_array_almost_equal(abs(AA), abs(np.array([ # [3.8009, -69.4505, 50.3135, -43.2884], # [0.0000, 9.2033, -0.2001, 5.9881], # [0.0000, 0.0000, 1.4279, 4.4453], # [0.0000, 0.0000, 0.9019, -1.1962]])), 4) # assert_array_almost_equal(abs(BB), abs(np.array([ # [1.9005, -10.2285, 0.8658, -5.2134], # [0.0000, 2.3008, 0.7915, 0.4262], # [0.0000, 0.0000, 0.8101, 0.0000], # [0.0000, 0.0000, 0.0000, -0.2823]])), 4) # assert_array_almost_equal(abs(Q), abs(np.array([ # [0.4642, 0.7886, 0.2915, -0.2786], # [0.5002, -0.5986, 0.5638, -0.2713], # [0.5002, 0.0154, -0.0107, 0.8657], # [0.5331, -0.1395, -0.7727, -0.3151]])), 4) # assert_array_almost_equal(dot(Q,Q.T), eye(4)) # assert_array_almost_equal(abs(Z), abs(np.array([ # [0.9961, -0.0014, 0.0887, -0.0026], # [0.0057, -0.0404, -0.0938, -0.9948], # [0.0626, 0.7194, -0.6908, 0.0363], # [0.0626, -0.6934, -0.7114, 0.0956]])), 4) # assert_array_almost_equal(dot(Z,Z.T), eye(4)) # def test_qz_complex_sort(self): # cA = np.array([ # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j], # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j], # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j], # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]]) # cB = np.array([ # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j], # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j], # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j], # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]]) # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp') # eigenvalues = diag(AAS)/diag(BBS) # assert_(all(np.real(eigenvalues[:sdim] < 0))) # assert_(all(np.real(eigenvalues[sdim:] > 0))) def test_check_finite(self): n = 5 A = random([n,n]) B = random([n,n]) AA,BB,Q,Z = qz(A,B,check_finite=False) assert_array_almost_equal(dot(dot(Q,AA),Z.T), A) assert_array_almost_equal(dot(dot(Q,BB),Z.T), B) assert_array_almost_equal(dot(Q,Q.T), eye(n)) assert_array_almost_equal(dot(Z,Z.T), eye(n)) assert_(all(diag(BB) >= 0)) def _make_pos(X): # the decompositions can have different signs than verified results return np.sign(X)*X class TestOrdQZ(TestCase): @classmethod def setupClass(cls): # http://www.nag.com/lapack-ex/node119.html cls.A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j, 7.5 + 0.5j], [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j, -10.5 - 1.5j], [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j, -7.5 - 3.5j], [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j, -19.0 - 32.5j]]) cls.B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j], [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j], [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j], [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]]) # http://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml cls.A2 = np.array([[3.9, 12.5, -34.5, -0.5], [4.3, 21.5, -47.5, 7.5], [4.3, 21.5, -43.5, 3.5], [4.4, 26.0, -46.0, 6.0]]) cls.B2 = np.array([[1, 2, -3, 1], [1, 3, -5, 4], [1, 3, -4, 3], [1, 3, -4, 4]]) # example with the eigenvalues # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j, # 0.61244091 # thus featuring: # * one complex conjugate eigenvalue pair, # * one eigenvalue in the lhp # * 2 eigenvalues in the unit circle # * 2 non-real eigenvalues cls.A3 = np.array([[5., 1., 3., 3.], [4., 4., 2., 7.], [7., 4., 1., 3.], [0., 4., 8., 7.]]) cls.B3 = np.array([[8., 10., 6., 10.], [7., 7., 2., 9.], [9., 1., 6., 6.], [5., 1., 4., 7.]]) def qz_decomp(self, sort): retc = ordqz(self.A1, self.B1, sort=sort) ret1 = ordqz(self.A2, self.B2, sort=sort) ret2 = ordqz(self.A3, self.B3, sort=sort) return retc, ret1, ret2 def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z): I = np.eye(*A.shape) # make sure Q and Z are orthogonal assert_array_almost_equal(Q.dot(Q.T.conj()), I) assert_array_almost_equal(Z.dot(Z.T.conj()), I) # check factorization assert_array_almost_equal(Q.dot(AA), A.dot(Z)) assert_array_almost_equal(Q.dot(BB), B.dot(Z)) # check shape of AA and BB assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape)) assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape)) # check eigenvalues for i in range(A.shape[0]): # does the current diagonal element belong to a 2-by-2 block # that was already checked? if i > 0 and A[i, i - 1] != 0: continue # take care of 2-by-2 blocks if i < AA.shape[0] - 1 and AA[i + 1, i] != 0: evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2]) # make sure the pair of complex conjugate eigenvalues # is ordered consistently (positive imaginary part first) if evals[0].imag < 0: evals = evals[[1, 0]] tmp = alpha[i:i + 2]/beta[i:i + 2] if tmp[0].imag < 0: tmp = tmp[[1, 0]] assert_array_almost_equal(evals, tmp) else: assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i]) sortfun = sort if sortfun == 'lhp': sortfun = lambda x, y: (x/y).real < 0 if sortfun == 'rhp': sortfun = lambda x, y: (x/y).real > 0 if sortfun == 'iuc': sortfun = lambda x, y: np.abs(x/y) < 1 if sortfun == 'ouc': sortfun = lambda x, y: np.abs(x/y) > 1 lastsort = True for i in range(A.shape[0]): cursort = sortfun(alpha[i], beta[i]) # once the sorting criterion was not matched all subsequent # eigenvalues also shouldn't match if not lastsort: assert(not cursort) lastsort = cursort def test_lhp(self): retc, ret1, ret2 = self.qz_decomp('lhp') self.check(self.A1, self.B1, 'lhp', *retc) self.check(self.A2, self.B2, 'lhp', *ret1) self.check(self.A3, self.B3, 'lhp', *ret2) def test_rhp(self): retc, ret1, ret2 = self.qz_decomp('rhp') self.check(self.A1, self.B1, 'rhp', *retc) self.check(self.A2, self.B2, 'rhp', *ret1) self.check(self.A3, self.B3, 'rhp', *ret2) def test_iuc(self): retc, ret1, ret2 = self.qz_decomp('iuc') self.check(self.A1, self.B1, 'iuc', *retc) self.check(self.A2, self.B2, 'iuc', *ret1) self.check(self.A3, self.B3, 'iuc', *ret2) def test_ouc(self): retc, ret1, ret2 = self.qz_decomp('ouc') self.check(self.A1, self.B1, 'ouc', *retc) self.check(self.A2, self.B2, 'ouc', *ret1) self.check(self.A3, self.B3, 'ouc', *ret2) def test_ref(self): # real eigenvalues first (top-left corner) sort = lambda x, y: (x/y).imag == 0 retc, ret1, ret2 = self.qz_decomp(sort) self.check(self.A1, self.B1, sort, *retc) self.check(self.A2, self.B2, sort, *ret1) self.check(self.A3, self.B3, sort, *ret2) def test_cef(self): # complex eigenvalues first (top-left corner) sort = lambda x, y: (x/y).imag != 0 retc, ret1, ret2 = self.qz_decomp(sort) self.check(self.A1, self.B1, sort, *retc) self.check(self.A2, self.B2, sort, *ret1) self.check(self.A3, self.B3, sort, *ret2) def test_diff_input_types(self): ret = ordqz(self.A1, self.B2, sort='lhp') self.check(self.A1, self.B2, 'lhp', *ret) ret = ordqz(self.B2, self.A1, sort='lhp') self.check(self.B2, self.A1, 'lhp', *ret) class TestOrdQZWorkspaceSize(TestCase): def setUp(self): seed(12345) def test_decompose(self): N = 202 # raises error if lwork parameter to dtrsen is too small for ddtype in [np.float32, np.float64]: A = random((N,N)).astype(ddtype) B = random((N,N)).astype(ddtype) # sort = lambda alphar, alphai, beta: alphar**2 + alphai**2< beta**2 sort = lambda alpha, beta: alpha < beta [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='real') for ddtype in [np.complex, np.complex64]: A = random((N,N)).astype(ddtype) B = random((N,N)).astype(ddtype) sort = lambda alpha, beta: alpha < beta [S,T,alpha,beta,U,V] = ordqz(A,B,sort=sort, output='complex') @dec.slow def test_decompose_ouc(self): N = 202 # segfaults if lwork parameter to dtrsen is too small for ddtype in [np.float32, np.float64, np.complex, np.complex64]: A = random((N,N)).astype(ddtype) B = random((N,N)).astype(ddtype) [S,T,alpha,beta,U,V] = ordqz(A,B,sort='ouc') class TestDatacopied(TestCase): def test_datacopied(self): from scipy.linalg.decomp import _datacopied M = matrix([[0,1],[2,3]]) A = asarray(M) L = M.tolist() M2 = M.copy() class Fake1: def __array__(self): return A class Fake2: __array_interface__ = A.__array_interface__ F1 = Fake1() F2 = Fake2() for item, status in [(M, False), (A, False), (L, True), (M2, False), (F1, False), (F2, False)]: arr = asarray(item) assert_equal(_datacopied(arr, item), status, err_msg=repr(item)) def test_aligned_mem_float(): """Check linalg works with non-aligned memory""" # Allocate 402 bytes of memory (allocated on boundary) a = arange(402, dtype=np.uint8) # Create an array with boundary offset 4 z = np.frombuffer(a.data, offset=2, count=100, dtype=float32) z.shape = 10, 10 eig(z, overwrite_a=True) eig(z.T, overwrite_a=True) def test_aligned_mem(): """Check linalg works with non-aligned memory""" # Allocate 804 bytes of memory (allocated on boundary) a = arange(804, dtype=np.uint8) # Create an array with boundary offset 4 z = np.frombuffer(a.data, offset=4, count=100, dtype=float) z.shape = 10, 10 eig(z, overwrite_a=True) eig(z.T, overwrite_a=True) def test_aligned_mem_complex(): """Check that complex objects don't need to be completely aligned""" # Allocate 1608 bytes of memory (allocated on boundary) a = zeros(1608, dtype=np.uint8) # Create an array with boundary offset 8 z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) z.shape = 10, 10 eig(z, overwrite_a=True) # This does not need special handling eig(z.T, overwrite_a=True) def check_lapack_misaligned(func, args, kwargs): args = list(args) for i in range(len(args)): a = args[:] if isinstance(a[i],np.ndarray): # Try misaligning a[i] aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8) aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype) aa.shape = a[i].shape aa[...] = a[i] a[i] = aa func(*a,**kwargs) if len(a[i].shape) > 1: a[i] = a[i].T func(*a,**kwargs) @dec.knownfailureif(True, "Ticket #1152, triggers a segfault in rare cases.") def test_lapack_misaligned(): M = np.eye(10,dtype=float) R = np.arange(100) R.shape = 10,10 S = np.arange(20000,dtype=np.uint8) S = np.frombuffer(S.data, offset=4, count=100, dtype=float) S.shape = 10, 10 b = np.ones(10) LU, piv = lu_factor(S) for (func, args, kwargs) in [ (eig,(S,),dict(overwrite_a=True)), # crash (eigvals,(S,),dict(overwrite_a=True)), # no crash (lu,(S,),dict(overwrite_a=True)), # no crash (lu_factor,(S,),dict(overwrite_a=True)), # no crash (lu_solve,((LU,piv),b),dict(overwrite_b=True)), (solve,(S,b),dict(overwrite_a=True,overwrite_b=True)), (svd,(M,),dict(overwrite_a=True)), # no crash (svd,(R,),dict(overwrite_a=True)), # no crash (svd,(S,),dict(overwrite_a=True)), # crash (svdvals,(S,),dict()), # no crash (svdvals,(S,),dict(overwrite_a=True)), # crash (cholesky,(M,),dict(overwrite_a=True)), # no crash (qr,(S,),dict(overwrite_a=True)), # crash (rq,(S,),dict(overwrite_a=True)), # crash (hessenberg,(S,),dict(overwrite_a=True)), # crash (schur,(S,),dict(overwrite_a=True)), # crash ]: yield check_lapack_misaligned, func, args, kwargs # not properly tested # cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd class TestOverwrite(object): def test_eig(self): assert_no_overwrite(eig, [(3,3)]) assert_no_overwrite(eig, [(3,3), (3,3)]) def test_eigh(self): assert_no_overwrite(eigh, [(3,3)]) assert_no_overwrite(eigh, [(3,3), (3,3)]) def test_eig_banded(self): assert_no_overwrite(eig_banded, [(3,2)]) def test_eigvals(self): assert_no_overwrite(eigvals, [(3,3)]) def test_eigvalsh(self): assert_no_overwrite(eigvalsh, [(3,3)]) def test_eigvals_banded(self): assert_no_overwrite(eigvals_banded, [(3,2)]) def test_hessenberg(self): assert_no_overwrite(hessenberg, [(3,3)]) def test_lu_factor(self): assert_no_overwrite(lu_factor, [(3,3)]) def test_lu_solve(self): x = np.array([[1,2,3], [4,5,6], [7,8,8]]) xlu = lu_factor(x) assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)]) def test_lu(self): assert_no_overwrite(lu, [(3,3)]) def test_qr(self): assert_no_overwrite(qr, [(3,3)]) def test_rq(self): assert_no_overwrite(rq, [(3,3)]) def test_schur(self): assert_no_overwrite(schur, [(3,3)]) def test_schur_complex(self): assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)], dtypes=[np.float32, np.float64]) def test_svd(self): assert_no_overwrite(svd, [(3,3)]) assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3,3)]) def test_svdvals(self): assert_no_overwrite(svdvals, [(3,3)]) def _check_orth(n): X = np.ones((n, 2), dtype=float) Y = orth(X) assert_equal(Y.shape, (n, 1)) assert_allclose(Y, Y.mean(), atol=1e-10) Y = orth(X.T) assert_equal(Y.shape, (2, 1)) assert_allclose(Y, Y.mean()) @dec.slow @dec.skipif(np.dtype(np.intp).itemsize < 8, "test only on 64-bit, else too slow") def test_orth_memory_efficiency(): # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable. # Keep in mind that @dec.slow tests are likely to be running # under configurations that support 4Gb+ memory for tests related to # 32 bit overflow. n = 10*1000*1000 try: _check_orth(n) except MemoryError: raise AssertionError('memory error perhaps caused by orth regression') def test_orth(): for n in 1, 2, 3, 10, 100: _check_orth(n) if __name__ == "__main__": run_module_suite()
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0])) sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
Filter.py
import copy def
(remaining, environment): returned = copy.copy(remaining) for i in range(len(returned)-1, -1, -1): r = returned[i] if any(not(r[e]==environment[e]) for e in environment if e in r): del returned[i] else: runs = copy.copy(r['runs']) for j in range(len(runs)-1, -1, -1): u = runs[j] if any(not(u[e]==environment[e]) for e in environment): del runs[j] if len(runs)==0: del returned[i] else: r = copy.deepcopy(r) r['runs'] = runs returned[i] = r return returned
filterRemaining
test_processor.py
import unittest from unittest.mock import patch from PIL import Image import processor class
(unittest.TestCase): def test_normalize_input(self): img = Image.new("RGB", (400, 400), "#ff0000") normalized = processor.normalize_input(img) assert normalized.shape == (224, 224, 3), "Incorrect shape" @patch('processor.load_model', return_value="OK") def test_prepare(self, MockLoadModel): processor.prepare() assert MockLoadModel.called, "load_model not called" assert processor.model == "OK", "model not assigned"
ProcessorTest
wigners.rs
#![allow(clippy::needless_return)] use std::time::{Duration, Instant}; use wigners::wigner_3j; use criterion::{Criterion, criterion_group, criterion_main}; fn compute_all_wigner_3j(max_angular: i32) { for l1 in 0..=max_angular { for l2 in 0..=max_angular { for l3 in 0..=max_angular { for m1 in -l1..=l1 { for m2 in -l2..=l2 { for m3 in -l3..=l3 { wigner_3j(l1 as u32, l2 as u32, l3 as u32, m1, m2, m3); } } } } } } } fn bench_wigner3j(c: &mut Criterion)
criterion_group!(wigner3j, bench_wigner3j); criterion_main!(wigner3j);
{ let mut group = c.benchmark_group("wigners"); group.sample_size(10); group.sampling_mode(criterion::SamplingMode::Flat); group.warm_up_time(Duration::from_secs(1)); for &max_angular in &[4, 8, 12,16, 20] { group.bench_function(&format!("max_angular={}", max_angular), |b| { b.iter_custom(|n_iters| { let mut duration = Duration::new(0, 0); for _ in 0..n_iters { wigners::clear_wigner_3j_cache(); // only benchmark `compute_all_wigner_3j`, not including // previously filled cache let start = Instant::now(); compute_all_wigner_3j(max_angular); duration += start.elapsed(); } return duration }) }); } }
error.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct CreateGroupError { pub kind: CreateGroupErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum CreateGroupErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for CreateGroupError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { CreateGroupErrorKind::BadRequestException(_inner) => _inner.fmt(f), CreateGroupErrorKind::ForbiddenException(_inner) => _inner.fmt(f), CreateGroupErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), CreateGroupErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), CreateGroupErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), CreateGroupErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for CreateGroupError { fn code(&self) -> Option<&str> { CreateGroupError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl CreateGroupError { pub fn new(kind: CreateGroupErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: CreateGroupErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: CreateGroupErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, CreateGroupErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, CreateGroupErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, CreateGroupErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, CreateGroupErrorKind::MethodNotAllowedException(_) ) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, CreateGroupErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for CreateGroupError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { CreateGroupErrorKind::BadRequestException(_inner) => Some(_inner), CreateGroupErrorKind::ForbiddenException(_inner) => Some(_inner), CreateGroupErrorKind::InternalServerErrorException(_inner) => Some(_inner), CreateGroupErrorKind::MethodNotAllowedException(_inner) => Some(_inner), CreateGroupErrorKind::TooManyRequestsException(_inner) => Some(_inner), CreateGroupErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct DeleteGroupError { pub kind: DeleteGroupErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum DeleteGroupErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for DeleteGroupError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { DeleteGroupErrorKind::BadRequestException(_inner) => _inner.fmt(f), DeleteGroupErrorKind::ForbiddenException(_inner) => _inner.fmt(f), DeleteGroupErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), DeleteGroupErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), DeleteGroupErrorKind::NotFoundException(_inner) => _inner.fmt(f), DeleteGroupErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), DeleteGroupErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for DeleteGroupError { fn code(&self) -> Option<&str> { DeleteGroupError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl DeleteGroupError { pub fn new(kind: DeleteGroupErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: DeleteGroupErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: DeleteGroupErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, DeleteGroupErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, DeleteGroupErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, DeleteGroupErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, DeleteGroupErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, DeleteGroupErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, DeleteGroupErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for DeleteGroupError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { DeleteGroupErrorKind::BadRequestException(_inner) => Some(_inner), DeleteGroupErrorKind::ForbiddenException(_inner) => Some(_inner), DeleteGroupErrorKind::InternalServerErrorException(_inner) => Some(_inner), DeleteGroupErrorKind::MethodNotAllowedException(_inner) => Some(_inner), DeleteGroupErrorKind::NotFoundException(_inner) => Some(_inner), DeleteGroupErrorKind::TooManyRequestsException(_inner) => Some(_inner), DeleteGroupErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetGroupError { pub kind: GetGroupErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetGroupErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetGroupError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetGroupErrorKind::BadRequestException(_inner) => _inner.fmt(f), GetGroupErrorKind::ForbiddenException(_inner) => _inner.fmt(f), GetGroupErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), GetGroupErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), GetGroupErrorKind::NotFoundException(_inner) => _inner.fmt(f), GetGroupErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), GetGroupErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetGroupError { fn code(&self) -> Option<&str> { GetGroupError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetGroupError { pub fn new(kind: GetGroupErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetGroupErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetGroupErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, GetGroupErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, GetGroupErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, GetGroupErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!(&self.kind, GetGroupErrorKind::MethodNotAllowedException(_)) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, GetGroupErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!(&self.kind, GetGroupErrorKind::TooManyRequestsException(_)) } } impl std::error::Error for GetGroupError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetGroupErrorKind::BadRequestException(_inner) => Some(_inner), GetGroupErrorKind::ForbiddenException(_inner) => Some(_inner), GetGroupErrorKind::InternalServerErrorException(_inner) => Some(_inner), GetGroupErrorKind::MethodNotAllowedException(_inner) => Some(_inner), GetGroupErrorKind::NotFoundException(_inner) => Some(_inner), GetGroupErrorKind::TooManyRequestsException(_inner) => Some(_inner), GetGroupErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetGroupConfigurationError { pub kind: GetGroupConfigurationErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetGroupConfigurationErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetGroupConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetGroupConfigurationErrorKind::BadRequestException(_inner) => _inner.fmt(f), GetGroupConfigurationErrorKind::ForbiddenException(_inner) => _inner.fmt(f), GetGroupConfigurationErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), GetGroupConfigurationErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), GetGroupConfigurationErrorKind::NotFoundException(_inner) => _inner.fmt(f), GetGroupConfigurationErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), GetGroupConfigurationErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetGroupConfigurationError { fn code(&self) -> Option<&str> { GetGroupConfigurationError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetGroupConfigurationError { pub fn new(kind: GetGroupConfigurationErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetGroupConfigurationErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetGroupConfigurationErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!( &self.kind, GetGroupConfigurationErrorKind::BadRequestException(_) ) } pub fn is_forbidden_exception(&self) -> bool { matches!( &self.kind, GetGroupConfigurationErrorKind::ForbiddenException(_) ) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, GetGroupConfigurationErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, GetGroupConfigurationErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!( &self.kind, GetGroupConfigurationErrorKind::NotFoundException(_) ) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, GetGroupConfigurationErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for GetGroupConfigurationError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetGroupConfigurationErrorKind::BadRequestException(_inner) => Some(_inner), GetGroupConfigurationErrorKind::ForbiddenException(_inner) => Some(_inner), GetGroupConfigurationErrorKind::InternalServerErrorException(_inner) => Some(_inner), GetGroupConfigurationErrorKind::MethodNotAllowedException(_inner) => Some(_inner), GetGroupConfigurationErrorKind::NotFoundException(_inner) => Some(_inner), GetGroupConfigurationErrorKind::TooManyRequestsException(_inner) => Some(_inner), GetGroupConfigurationErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetGroupQueryError { pub kind: GetGroupQueryErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetGroupQueryErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetGroupQueryError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetGroupQueryErrorKind::BadRequestException(_inner) => _inner.fmt(f), GetGroupQueryErrorKind::ForbiddenException(_inner) => _inner.fmt(f), GetGroupQueryErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), GetGroupQueryErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), GetGroupQueryErrorKind::NotFoundException(_inner) => _inner.fmt(f), GetGroupQueryErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), GetGroupQueryErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetGroupQueryError { fn code(&self) -> Option<&str> { GetGroupQueryError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetGroupQueryError { pub fn new(kind: GetGroupQueryErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetGroupQueryErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetGroupQueryErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, GetGroupQueryErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, GetGroupQueryErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, GetGroupQueryErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, GetGroupQueryErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, GetGroupQueryErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, GetGroupQueryErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for GetGroupQueryError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetGroupQueryErrorKind::BadRequestException(_inner) => Some(_inner), GetGroupQueryErrorKind::ForbiddenException(_inner) => Some(_inner), GetGroupQueryErrorKind::InternalServerErrorException(_inner) => Some(_inner), GetGroupQueryErrorKind::MethodNotAllowedException(_inner) => Some(_inner), GetGroupQueryErrorKind::NotFoundException(_inner) => Some(_inner), GetGroupQueryErrorKind::TooManyRequestsException(_inner) => Some(_inner), GetGroupQueryErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GetTagsError { pub kind: GetTagsErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GetTagsErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GetTagsError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GetTagsErrorKind::BadRequestException(_inner) => _inner.fmt(f), GetTagsErrorKind::ForbiddenException(_inner) => _inner.fmt(f), GetTagsErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), GetTagsErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), GetTagsErrorKind::NotFoundException(_inner) => _inner.fmt(f), GetTagsErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), GetTagsErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GetTagsError { fn code(&self) -> Option<&str> { GetTagsError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GetTagsError { pub fn new(kind: GetTagsErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GetTagsErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GetTagsErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, GetTagsErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, GetTagsErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, GetTagsErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!(&self.kind, GetTagsErrorKind::MethodNotAllowedException(_)) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, GetTagsErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!(&self.kind, GetTagsErrorKind::TooManyRequestsException(_)) } } impl std::error::Error for GetTagsError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GetTagsErrorKind::BadRequestException(_inner) => Some(_inner), GetTagsErrorKind::ForbiddenException(_inner) => Some(_inner), GetTagsErrorKind::InternalServerErrorException(_inner) => Some(_inner), GetTagsErrorKind::MethodNotAllowedException(_inner) => Some(_inner), GetTagsErrorKind::NotFoundException(_inner) => Some(_inner), GetTagsErrorKind::TooManyRequestsException(_inner) => Some(_inner), GetTagsErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct GroupResourcesError { pub kind: GroupResourcesErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum GroupResourcesErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for GroupResourcesError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { GroupResourcesErrorKind::BadRequestException(_inner) => _inner.fmt(f), GroupResourcesErrorKind::ForbiddenException(_inner) => _inner.fmt(f), GroupResourcesErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), GroupResourcesErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), GroupResourcesErrorKind::NotFoundException(_inner) => _inner.fmt(f), GroupResourcesErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), GroupResourcesErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for GroupResourcesError { fn code(&self) -> Option<&str> { GroupResourcesError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl GroupResourcesError { pub fn new(kind: GroupResourcesErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: GroupResourcesErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: GroupResourcesErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, GroupResourcesErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, GroupResourcesErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, GroupResourcesErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, GroupResourcesErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, GroupResourcesErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, GroupResourcesErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for GroupResourcesError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { GroupResourcesErrorKind::BadRequestException(_inner) => Some(_inner), GroupResourcesErrorKind::ForbiddenException(_inner) => Some(_inner), GroupResourcesErrorKind::InternalServerErrorException(_inner) => Some(_inner), GroupResourcesErrorKind::MethodNotAllowedException(_inner) => Some(_inner), GroupResourcesErrorKind::NotFoundException(_inner) => Some(_inner), GroupResourcesErrorKind::TooManyRequestsException(_inner) => Some(_inner), GroupResourcesErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct ListGroupResourcesError { pub kind: ListGroupResourcesErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum ListGroupResourcesErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), UnauthorizedException(crate::error::UnauthorizedException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for ListGroupResourcesError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { ListGroupResourcesErrorKind::BadRequestException(_inner) => _inner.fmt(f), ListGroupResourcesErrorKind::ForbiddenException(_inner) => _inner.fmt(f), ListGroupResourcesErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), ListGroupResourcesErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), ListGroupResourcesErrorKind::NotFoundException(_inner) => _inner.fmt(f), ListGroupResourcesErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), ListGroupResourcesErrorKind::UnauthorizedException(_inner) => _inner.fmt(f), ListGroupResourcesErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for ListGroupResourcesError { fn code(&self) -> Option<&str> { ListGroupResourcesError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl ListGroupResourcesError { pub fn new(kind: ListGroupResourcesErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: ListGroupResourcesErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: ListGroupResourcesErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!( &self.kind, ListGroupResourcesErrorKind::BadRequestException(_) ) } pub fn is_forbidden_exception(&self) -> bool { matches!( &self.kind, ListGroupResourcesErrorKind::ForbiddenException(_) ) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, ListGroupResourcesErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, ListGroupResourcesErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!( &self.kind, ListGroupResourcesErrorKind::NotFoundException(_) ) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, ListGroupResourcesErrorKind::TooManyRequestsException(_) ) } pub fn is_unauthorized_exception(&self) -> bool { matches!( &self.kind, ListGroupResourcesErrorKind::UnauthorizedException(_) ) } } impl std::error::Error for ListGroupResourcesError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { ListGroupResourcesErrorKind::BadRequestException(_inner) => Some(_inner), ListGroupResourcesErrorKind::ForbiddenException(_inner) => Some(_inner), ListGroupResourcesErrorKind::InternalServerErrorException(_inner) => Some(_inner), ListGroupResourcesErrorKind::MethodNotAllowedException(_inner) => Some(_inner), ListGroupResourcesErrorKind::NotFoundException(_inner) => Some(_inner), ListGroupResourcesErrorKind::TooManyRequestsException(_inner) => Some(_inner), ListGroupResourcesErrorKind::UnauthorizedException(_inner) => Some(_inner), ListGroupResourcesErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct ListGroupsError { pub kind: ListGroupsErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum ListGroupsErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for ListGroupsError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { ListGroupsErrorKind::BadRequestException(_inner) => _inner.fmt(f), ListGroupsErrorKind::ForbiddenException(_inner) => _inner.fmt(f), ListGroupsErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), ListGroupsErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), ListGroupsErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), ListGroupsErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for ListGroupsError { fn code(&self) -> Option<&str> { ListGroupsError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl ListGroupsError { pub fn new(kind: ListGroupsErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: ListGroupsErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: ListGroupsErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, ListGroupsErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, ListGroupsErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, ListGroupsErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, ListGroupsErrorKind::MethodNotAllowedException(_) ) } pub fn is_too_many_requests_exception(&self) -> bool { matches!(&self.kind, ListGroupsErrorKind::TooManyRequestsException(_)) } } impl std::error::Error for ListGroupsError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { ListGroupsErrorKind::BadRequestException(_inner) => Some(_inner), ListGroupsErrorKind::ForbiddenException(_inner) => Some(_inner), ListGroupsErrorKind::InternalServerErrorException(_inner) => Some(_inner), ListGroupsErrorKind::MethodNotAllowedException(_inner) => Some(_inner), ListGroupsErrorKind::TooManyRequestsException(_inner) => Some(_inner), ListGroupsErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct PutGroupConfigurationError { pub kind: PutGroupConfigurationErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum PutGroupConfigurationErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for PutGroupConfigurationError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { PutGroupConfigurationErrorKind::BadRequestException(_inner) => _inner.fmt(f), PutGroupConfigurationErrorKind::ForbiddenException(_inner) => _inner.fmt(f), PutGroupConfigurationErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), PutGroupConfigurationErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), PutGroupConfigurationErrorKind::NotFoundException(_inner) => _inner.fmt(f), PutGroupConfigurationErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), PutGroupConfigurationErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for PutGroupConfigurationError { fn code(&self) -> Option<&str> { PutGroupConfigurationError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl PutGroupConfigurationError { pub fn new(kind: PutGroupConfigurationErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: PutGroupConfigurationErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: PutGroupConfigurationErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!( &self.kind, PutGroupConfigurationErrorKind::BadRequestException(_) ) } pub fn is_forbidden_exception(&self) -> bool { matches!( &self.kind, PutGroupConfigurationErrorKind::ForbiddenException(_) ) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, PutGroupConfigurationErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, PutGroupConfigurationErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!( &self.kind, PutGroupConfigurationErrorKind::NotFoundException(_) ) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, PutGroupConfigurationErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for PutGroupConfigurationError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { PutGroupConfigurationErrorKind::BadRequestException(_inner) => Some(_inner), PutGroupConfigurationErrorKind::ForbiddenException(_inner) => Some(_inner), PutGroupConfigurationErrorKind::InternalServerErrorException(_inner) => Some(_inner), PutGroupConfigurationErrorKind::MethodNotAllowedException(_inner) => Some(_inner), PutGroupConfigurationErrorKind::NotFoundException(_inner) => Some(_inner), PutGroupConfigurationErrorKind::TooManyRequestsException(_inner) => Some(_inner), PutGroupConfigurationErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct SearchResourcesError { pub kind: SearchResourcesErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum SearchResourcesErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), TooManyRequestsException(crate::error::TooManyRequestsException), UnauthorizedException(crate::error::UnauthorizedException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for SearchResourcesError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { SearchResourcesErrorKind::BadRequestException(_inner) => _inner.fmt(f), SearchResourcesErrorKind::ForbiddenException(_inner) => _inner.fmt(f), SearchResourcesErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), SearchResourcesErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), SearchResourcesErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), SearchResourcesErrorKind::UnauthorizedException(_inner) => _inner.fmt(f), SearchResourcesErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for SearchResourcesError { fn code(&self) -> Option<&str> { SearchResourcesError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl SearchResourcesError { pub fn new(kind: SearchResourcesErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: SearchResourcesErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: SearchResourcesErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, SearchResourcesErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, SearchResourcesErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, SearchResourcesErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, SearchResourcesErrorKind::MethodNotAllowedException(_) ) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, SearchResourcesErrorKind::TooManyRequestsException(_) ) } pub fn is_unauthorized_exception(&self) -> bool { matches!( &self.kind, SearchResourcesErrorKind::UnauthorizedException(_) ) } } impl std::error::Error for SearchResourcesError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { SearchResourcesErrorKind::BadRequestException(_inner) => Some(_inner), SearchResourcesErrorKind::ForbiddenException(_inner) => Some(_inner), SearchResourcesErrorKind::InternalServerErrorException(_inner) => Some(_inner), SearchResourcesErrorKind::MethodNotAllowedException(_inner) => Some(_inner), SearchResourcesErrorKind::TooManyRequestsException(_inner) => Some(_inner), SearchResourcesErrorKind::UnauthorizedException(_inner) => Some(_inner), SearchResourcesErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct TagError { pub kind: TagErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum TagErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for TagError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { TagErrorKind::BadRequestException(_inner) => _inner.fmt(f), TagErrorKind::ForbiddenException(_inner) => _inner.fmt(f), TagErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), TagErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), TagErrorKind::NotFoundException(_inner) => _inner.fmt(f), TagErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), TagErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for TagError { fn code(&self) -> Option<&str> { TagError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl TagError { pub fn new(kind: TagErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: TagErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: TagErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, TagErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, TagErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!(&self.kind, TagErrorKind::InternalServerErrorException(_)) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!(&self.kind, TagErrorKind::MethodNotAllowedException(_)) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, TagErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!(&self.kind, TagErrorKind::TooManyRequestsException(_)) } } impl std::error::Error for TagError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { TagErrorKind::BadRequestException(_inner) => Some(_inner), TagErrorKind::ForbiddenException(_inner) => Some(_inner), TagErrorKind::InternalServerErrorException(_inner) => Some(_inner), TagErrorKind::MethodNotAllowedException(_inner) => Some(_inner), TagErrorKind::NotFoundException(_inner) => Some(_inner), TagErrorKind::TooManyRequestsException(_inner) => Some(_inner), TagErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UngroupResourcesError { pub kind: UngroupResourcesErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UngroupResourcesErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UngroupResourcesError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UngroupResourcesErrorKind::BadRequestException(_inner) => _inner.fmt(f), UngroupResourcesErrorKind::ForbiddenException(_inner) => _inner.fmt(f), UngroupResourcesErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), UngroupResourcesErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), UngroupResourcesErrorKind::NotFoundException(_inner) => _inner.fmt(f), UngroupResourcesErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), UngroupResourcesErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UngroupResourcesError { fn code(&self) -> Option<&str> { UngroupResourcesError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UngroupResourcesError { pub fn new(kind: UngroupResourcesErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UngroupResourcesErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UngroupResourcesErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!( &self.kind, UngroupResourcesErrorKind::BadRequestException(_) ) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, UngroupResourcesErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, UngroupResourcesErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, UngroupResourcesErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, UngroupResourcesErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, UngroupResourcesErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for UngroupResourcesError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UngroupResourcesErrorKind::BadRequestException(_inner) => Some(_inner), UngroupResourcesErrorKind::ForbiddenException(_inner) => Some(_inner), UngroupResourcesErrorKind::InternalServerErrorException(_inner) => Some(_inner), UngroupResourcesErrorKind::MethodNotAllowedException(_inner) => Some(_inner), UngroupResourcesErrorKind::NotFoundException(_inner) => Some(_inner), UngroupResourcesErrorKind::TooManyRequestsException(_inner) => Some(_inner), UngroupResourcesErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UntagError { pub kind: UntagErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UntagErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UntagError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UntagErrorKind::BadRequestException(_inner) => _inner.fmt(f), UntagErrorKind::ForbiddenException(_inner) => _inner.fmt(f), UntagErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), UntagErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), UntagErrorKind::NotFoundException(_inner) => _inner.fmt(f), UntagErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), UntagErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UntagError { fn code(&self) -> Option<&str> { UntagError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UntagError { pub fn new(kind: UntagErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UntagErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UntagErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, UntagErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, UntagErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!(&self.kind, UntagErrorKind::InternalServerErrorException(_)) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!(&self.kind, UntagErrorKind::MethodNotAllowedException(_)) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, UntagErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!(&self.kind, UntagErrorKind::TooManyRequestsException(_)) } } impl std::error::Error for UntagError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UntagErrorKind::BadRequestException(_inner) => Some(_inner), UntagErrorKind::ForbiddenException(_inner) => Some(_inner), UntagErrorKind::InternalServerErrorException(_inner) => Some(_inner), UntagErrorKind::MethodNotAllowedException(_inner) => Some(_inner), UntagErrorKind::NotFoundException(_inner) => Some(_inner), UntagErrorKind::TooManyRequestsException(_inner) => Some(_inner), UntagErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UpdateGroupError { pub kind: UpdateGroupErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UpdateGroupErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UpdateGroupError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UpdateGroupErrorKind::BadRequestException(_inner) => _inner.fmt(f), UpdateGroupErrorKind::ForbiddenException(_inner) => _inner.fmt(f), UpdateGroupErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), UpdateGroupErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), UpdateGroupErrorKind::NotFoundException(_inner) => _inner.fmt(f), UpdateGroupErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), UpdateGroupErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UpdateGroupError { fn code(&self) -> Option<&str> { UpdateGroupError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UpdateGroupError { pub fn new(kind: UpdateGroupErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UpdateGroupErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UpdateGroupErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!(&self.kind, UpdateGroupErrorKind::BadRequestException(_)) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, UpdateGroupErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, UpdateGroupErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, UpdateGroupErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, UpdateGroupErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, UpdateGroupErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for UpdateGroupError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UpdateGroupErrorKind::BadRequestException(_inner) => Some(_inner), UpdateGroupErrorKind::ForbiddenException(_inner) => Some(_inner), UpdateGroupErrorKind::InternalServerErrorException(_inner) => Some(_inner), UpdateGroupErrorKind::MethodNotAllowedException(_inner) => Some(_inner), UpdateGroupErrorKind::NotFoundException(_inner) => Some(_inner), UpdateGroupErrorKind::TooManyRequestsException(_inner) => Some(_inner), UpdateGroupErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } #[non_exhaustive] #[derive(std::fmt::Debug)] pub struct UpdateGroupQueryError { pub kind: UpdateGroupQueryErrorKind, pub(crate) meta: smithy_types::Error, } #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum UpdateGroupQueryErrorKind { BadRequestException(crate::error::BadRequestException), ForbiddenException(crate::error::ForbiddenException), InternalServerErrorException(crate::error::InternalServerErrorException), MethodNotAllowedException(crate::error::MethodNotAllowedException), NotFoundException(crate::error::NotFoundException), TooManyRequestsException(crate::error::TooManyRequestsException), /// An unexpected error, eg. invalid JSON returned by the service or an unknown error code Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for UpdateGroupQueryError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.kind { UpdateGroupQueryErrorKind::BadRequestException(_inner) => _inner.fmt(f), UpdateGroupQueryErrorKind::ForbiddenException(_inner) => _inner.fmt(f), UpdateGroupQueryErrorKind::InternalServerErrorException(_inner) => _inner.fmt(f), UpdateGroupQueryErrorKind::MethodNotAllowedException(_inner) => _inner.fmt(f), UpdateGroupQueryErrorKind::NotFoundException(_inner) => _inner.fmt(f), UpdateGroupQueryErrorKind::TooManyRequestsException(_inner) => _inner.fmt(f), UpdateGroupQueryErrorKind::Unhandled(_inner) => _inner.fmt(f), } } } impl smithy_types::retry::ProvideErrorKind for UpdateGroupQueryError { fn code(&self) -> Option<&str> { UpdateGroupQueryError::code(self) } fn retryable_error_kind(&self) -> Option<smithy_types::retry::ErrorKind> { None } } impl UpdateGroupQueryError { pub fn new(kind: UpdateGroupQueryErrorKind, meta: smithy_types::Error) -> Self { Self { kind, meta } } pub fn unhandled(err: impl Into<Box<dyn std::error::Error + Send + Sync + 'static>>) -> Self { Self { kind: UpdateGroupQueryErrorKind::Unhandled(err.into()), meta: Default::default(), } } pub fn generic(err: smithy_types::Error) -> Self { Self { meta: err.clone(), kind: UpdateGroupQueryErrorKind::Unhandled(err.into()), } } // Consider if this should actually be `Option<Cow<&str>>`. This would enable us to use display // as implemented by std::Error to generate a message in that case. pub fn message(&self) -> Option<&str> { self.meta.message() } pub fn meta(&self) -> &smithy_types::Error { &self.meta } pub fn request_id(&self) -> Option<&str> { self.meta.request_id() } pub fn code(&self) -> Option<&str> { self.meta.code() } pub fn is_bad_request_exception(&self) -> bool { matches!( &self.kind, UpdateGroupQueryErrorKind::BadRequestException(_) ) } pub fn is_forbidden_exception(&self) -> bool { matches!(&self.kind, UpdateGroupQueryErrorKind::ForbiddenException(_)) } pub fn is_internal_server_error_exception(&self) -> bool { matches!( &self.kind, UpdateGroupQueryErrorKind::InternalServerErrorException(_) ) } pub fn is_method_not_allowed_exception(&self) -> bool { matches!( &self.kind, UpdateGroupQueryErrorKind::MethodNotAllowedException(_) ) } pub fn is_not_found_exception(&self) -> bool { matches!(&self.kind, UpdateGroupQueryErrorKind::NotFoundException(_)) } pub fn is_too_many_requests_exception(&self) -> bool { matches!( &self.kind, UpdateGroupQueryErrorKind::TooManyRequestsException(_) ) } } impl std::error::Error for UpdateGroupQueryError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match &self.kind { UpdateGroupQueryErrorKind::BadRequestException(_inner) => Some(_inner), UpdateGroupQueryErrorKind::ForbiddenException(_inner) => Some(_inner), UpdateGroupQueryErrorKind::InternalServerErrorException(_inner) => Some(_inner), UpdateGroupQueryErrorKind::MethodNotAllowedException(_inner) => Some(_inner), UpdateGroupQueryErrorKind::NotFoundException(_inner) => Some(_inner), UpdateGroupQueryErrorKind::TooManyRequestsException(_inner) => Some(_inner), UpdateGroupQueryErrorKind::Unhandled(_inner) => Some(_inner.as_ref()), } } } /// <p>You've exceeded throttling limits by making too many requests in a period of /// time.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct TooManyRequestsException { pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for TooManyRequestsException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("TooManyRequestsException"); formatter.field("message", &self.message); formatter.finish() } } impl TooManyRequestsException { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for TooManyRequestsException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "TooManyRequestsException")?; if let Some(inner_1) = &self.message { write!(f, ": {}", inner_1)?; } Ok(()) } } impl std::error::Error for TooManyRequestsException {} /// See [`TooManyRequestsException`](crate::error::TooManyRequestsException) pub mod too_many_requests_exception { /// A builder for [`TooManyRequestsException`](crate::error::TooManyRequestsException) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`TooManyRequestsException`](crate::error::TooManyRequestsException) pub fn build(self) -> crate::error::TooManyRequestsException { crate::error::TooManyRequestsException { message: self.message, } } } } impl TooManyRequestsException { /// Creates a new builder-style object to manufacture [`TooManyRequestsException`](crate::error::TooManyRequestsException) pub fn builder() -> crate::error::too_many_requests_exception::Builder { crate::error::too_many_requests_exception::Builder::default() } } /// <p>One or more of the specified resources don't exist.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct NotFoundException { pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for NotFoundException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("NotFoundException"); formatter.field("message", &self.message); formatter.finish() } } impl NotFoundException { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for NotFoundException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "NotFoundException")?; if let Some(inner_2) = &self.message { write!(f, ": {}", inner_2)?; } Ok(()) } } impl std::error::Error for NotFoundException {} /// See [`NotFoundException`](crate::error::NotFoundException) pub mod not_found_exception { /// A builder for [`NotFoundException`](crate::error::NotFoundException) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`NotFoundException`](crate::error::NotFoundException) pub fn build(self) -> crate::error::NotFoundException { crate::error::NotFoundException { message: self.message, } } } } impl NotFoundException { /// Creates a new builder-style object to manufacture [`NotFoundException`](crate::error::NotFoundException) pub fn builder() -> crate::error::not_found_exception::Builder { crate::error::not_found_exception::Builder::default() } } /// <p>The request uses an HTTP method that isn't allowed for the specified resource.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct MethodNotAllowedException { pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for MethodNotAllowedException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("MethodNotAllowedException"); formatter.field("message", &self.message); formatter.finish() } } impl MethodNotAllowedException { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for MethodNotAllowedException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "MethodNotAllowedException")?; if let Some(inner_3) = &self.message { write!(f, ": {}", inner_3)?; } Ok(()) } } impl std::error::Error for MethodNotAllowedException {} /// See [`MethodNotAllowedException`](crate::error::MethodNotAllowedException) pub mod method_not_allowed_exception { /// A builder for [`MethodNotAllowedException`](crate::error::MethodNotAllowedException) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`MethodNotAllowedException`](crate::error::MethodNotAllowedException) pub fn build(self) -> crate::error::MethodNotAllowedException { crate::error::MethodNotAllowedException { message: self.message, } } } } impl MethodNotAllowedException { /// Creates a new builder-style object to manufacture [`MethodNotAllowedException`](crate::error::MethodNotAllowedException) pub fn builder() -> crate::error::method_not_allowed_exception::Builder { crate::error::method_not_allowed_exception::Builder::default() } } /// <p>An internal error occurred while processing the request. Try again later.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct InternalServerErrorException { pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for InternalServerErrorException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("InternalServerErrorException"); formatter.field("message", &self.message); formatter.finish() } } impl InternalServerErrorException { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for InternalServerErrorException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "InternalServerErrorException")?; if let Some(inner_4) = &self.message { write!(f, ": {}", inner_4)?; } Ok(()) } } impl std::error::Error for InternalServerErrorException {} /// See [`InternalServerErrorException`](crate::error::InternalServerErrorException) pub mod internal_server_error_exception { /// A builder for [`InternalServerErrorException`](crate::error::InternalServerErrorException) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct
{ pub(crate) message: std::option::Option<std::string::String>, } impl Builder { pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`InternalServerErrorException`](crate::error::InternalServerErrorException) pub fn build(self) -> crate::error::InternalServerErrorException { crate::error::InternalServerErrorException { message: self.message, } } } } impl InternalServerErrorException { /// Creates a new builder-style object to manufacture [`InternalServerErrorException`](crate::error::InternalServerErrorException) pub fn builder() -> crate::error::internal_server_error_exception::Builder { crate::error::internal_server_error_exception::Builder::default() } } /// <p>The caller isn't authorized to make the request. Check permissions.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ForbiddenException { pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for ForbiddenException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ForbiddenException"); formatter.field("message", &self.message); formatter.finish() } } impl ForbiddenException { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for ForbiddenException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ForbiddenException")?; if let Some(inner_5) = &self.message { write!(f, ": {}", inner_5)?; } Ok(()) } } impl std::error::Error for ForbiddenException {} /// See [`ForbiddenException`](crate::error::ForbiddenException) pub mod forbidden_exception { /// A builder for [`ForbiddenException`](crate::error::ForbiddenException) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`ForbiddenException`](crate::error::ForbiddenException) pub fn build(self) -> crate::error::ForbiddenException { crate::error::ForbiddenException { message: self.message, } } } } impl ForbiddenException { /// Creates a new builder-style object to manufacture [`ForbiddenException`](crate::error::ForbiddenException) pub fn builder() -> crate::error::forbidden_exception::Builder { crate::error::forbidden_exception::Builder::default() } } /// <p>The request includes one or more parameters that violate validation rules.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct BadRequestException { pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for BadRequestException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("BadRequestException"); formatter.field("message", &self.message); formatter.finish() } } impl BadRequestException { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for BadRequestException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "BadRequestException")?; if let Some(inner_6) = &self.message { write!(f, ": {}", inner_6)?; } Ok(()) } } impl std::error::Error for BadRequestException {} /// See [`BadRequestException`](crate::error::BadRequestException) pub mod bad_request_exception { /// A builder for [`BadRequestException`](crate::error::BadRequestException) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`BadRequestException`](crate::error::BadRequestException) pub fn build(self) -> crate::error::BadRequestException { crate::error::BadRequestException { message: self.message, } } } } impl BadRequestException { /// Creates a new builder-style object to manufacture [`BadRequestException`](crate::error::BadRequestException) pub fn builder() -> crate::error::bad_request_exception::Builder { crate::error::bad_request_exception::Builder::default() } } /// <p>The request was rejected because it doesn't have valid credentials for the target /// resource.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UnauthorizedException { pub message: std::option::Option<std::string::String>, } impl std::fmt::Debug for UnauthorizedException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UnauthorizedException"); formatter.field("message", &self.message); formatter.finish() } } impl UnauthorizedException { pub fn message(&self) -> Option<&str> { self.message.as_deref() } } impl std::fmt::Display for UnauthorizedException { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "UnauthorizedException")?; if let Some(inner_7) = &self.message { write!(f, ": {}", inner_7)?; } Ok(()) } } impl std::error::Error for UnauthorizedException {} /// See [`UnauthorizedException`](crate::error::UnauthorizedException) pub mod unauthorized_exception { /// A builder for [`UnauthorizedException`](crate::error::UnauthorizedException) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) message: std::option::Option<std::string::String>, } impl Builder { pub fn message(mut self, input: impl Into<std::string::String>) -> Self { self.message = Some(input.into()); self } pub fn set_message(mut self, input: std::option::Option<std::string::String>) -> Self { self.message = input; self } /// Consumes the builder and constructs a [`UnauthorizedException`](crate::error::UnauthorizedException) pub fn build(self) -> crate::error::UnauthorizedException { crate::error::UnauthorizedException { message: self.message, } } } } impl UnauthorizedException { /// Creates a new builder-style object to manufacture [`UnauthorizedException`](crate::error::UnauthorizedException) pub fn builder() -> crate::error::unauthorized_exception::Builder { crate::error::unauthorized_exception::Builder::default() } }
Builder
webhdfs.py
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Interfaces for Hadoop filesystem access via HttpFs/WebHDFS """ import errno import logging import posixpath import stat import threading import time from django.utils.encoding import smart_str from django.utils.translation import ugettext as _ from desktop.lib.rest import http_client, resource from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END from hadoop.fs.hadoopfs import Hdfs from hadoop.fs.exceptions import WebHdfsException from hadoop.fs.webhdfs_types import WebHdfsStat, WebHdfsContentSummary from hadoop.conf import UPLOAD_CHUNK_SIZE from hadoop.hdfs_site import get_nn_sentry_prefixes import hadoop.conf import desktop.conf DEFAULT_HDFS_SUPERUSER = desktop.conf.DEFAULT_HDFS_SUPERUSER.get() # The number of bytes to read if not specified DEFAULT_READ_SIZE = 1024*1024 # 1MB LOG = logging.getLogger(__name__) class WebHdfs(Hdfs): """ WebHdfs implements the filesystem interface via the WebHDFS rest protocol. """ DEFAULT_USER = desktop.conf.DEFAULT_USER.get() # This should be the user running Hue TRASH_CURRENT = 'Current' def __init__(self, url, fs_defaultfs, logical_name=None, hdfs_superuser=None, security_enabled=False, temp_dir="/tmp", umask=01022): self._url = url self._superuser = hdfs_superuser self._security_enabled = security_enabled self._temp_dir = temp_dir self._umask = umask self._fs_defaultfs = fs_defaultfs self._logical_name = logical_name self._client = self._make_client(url, security_enabled) self._root = resource.Resource(self._client) # To store user info self._thread_local = threading.local() LOG.debug("Initializing Hadoop WebHdfs: %s (security: %s, superuser: %s)" % (self._url, self._security_enabled, self._superuser)) @classmethod def from_config(cls, hdfs_config): fs_defaultfs = hdfs_config.FS_DEFAULTFS.get() return cls(url=_get_service_url(hdfs_config), fs_defaultfs=fs_defaultfs, logical_name=hdfs_config.LOGICAL_NAME.get(), security_enabled=hdfs_config.SECURITY_ENABLED.get(), temp_dir=hdfs_config.TEMP_DIR.get(), umask=hdfs_config.UMASK.get()) def __str__(self): return "WebHdfs at %s" % self._url def _make_client(self, url, security_enabled): client = http_client.HttpClient( url, exc_class=WebHdfsException, logger=LOG) if security_enabled: client.set_kerberos_auth() return client @property def uri(self): return self._url @property def logical_name(self): return self._logical_name @classmethod def is_sentry_managed(cls, path): prefixes = get_nn_sentry_prefixes().split(',') return any([path.startswith(p) for p in prefixes if p]) @property def fs_defaultfs(self): return self._fs_defaultfs @property def umask(self): return self._umask @property def security_enabled(self): return self._security_enabled @property def superuser(self): if self._superuser is None: try: # The owner of '/' is usually the superuser sb = self.stats('/') self._superuser = sb.user except Exception, ex: LOG.exception('Failed to determine superuser of %s: %s' % (self, ex)) self._superuser = DEFAULT_HDFS_SUPERUSER return self._superuser @property def user(self): try: return self._thread_local.user except AttributeError: return WebHdfs.DEFAULT_USER @property def trash_path(self): return self.join(self.get_home_dir(), '.Trash') @property def current_trash_path(self): return self.join(self.trash_path, self.TRASH_CURRENT) def _getparams(self): return { "user.name" : WebHdfs.DEFAULT_USER, "doas" : self.user } def setuser(self, user): """Set a new user. Return the current user.""" curr = self.user self._thread_local.user = user return curr def listdir_stats(self, path, glob=None): """ listdir_stats(path, glob=None) -> [ WebHdfsStat ] Get directory listing with stats. """ path = Hdfs.normpath(path) params = self._getparams() if glob is not None: params['filter'] = glob params['op'] = 'LISTSTATUS' json = self._root.get(path, params) filestatus_list = json['FileStatuses']['FileStatus'] return [ WebHdfsStat(st, path) for st in filestatus_list ] def listdir(self, path, glob=None): """ listdir(path, glob=None) -> [ entry names ] Get directory entry names without stats. """ dirents = self.listdir_stats(path, glob) return [Hdfs.basename(x.path) for x in dirents] def get_content_summary(self, path): """ get_content_summary(path) -> WebHdfsContentSummary """ path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'GETCONTENTSUMMARY' json = self._root.get(path, params) return WebHdfsContentSummary(json['ContentSummary']) def _stats(self, path): """This version of stats returns None if the entry is not found""" path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'GETFILESTATUS' try: json = self._root.get(path, params) return WebHdfsStat(json['FileStatus'], path) except WebHdfsException, ex: if ex.server_exc == 'FileNotFoundException' or ex.code == 404: return None raise ex def stats(self, path): """ stats(path) -> WebHdfsStat """ res = self._stats(path) if res is not None: return res raise IOError(errno.ENOENT, _("File %s not found") % path) def exists(self, path): return self._stats(path) is not None def isdir(self, path): sb = self._stats(path) if sb is None: return False return sb.isDir def isfile(self, path): sb = self._stats(path) if sb is None: return False return not sb.isDir def _ensure_current_trash_directory(self): """Create trash directory for a user if it doesn't exist.""" if self.exists(self.current_trash_path): self.mkdir(self.current_trash_path) return self.current_trash_path def _trash(self, path, recursive=False): """ _trash(path, recursive=False) Move a file or directory to trash. Will create a timestamped directory underneath /user/<username>/.Trash. Trash must be enabled for this to work. """ if not self.exists(path): raise IOError(errno.ENOENT, _("File %s not found") % path) if not recursive and self.isdir(path): raise IOError(errno.EISDIR, _("File %s is a directory") % path) if path.startswith(self.trash_path): raise IOError(errno.EPERM, _("File %s is already trashed") % path) # Make path (with timestamp suffix if necessary) base_trash_path = self.join(self._ensure_current_trash_directory(), path[1:]) trash_path = base_trash_path while self.exists(trash_path): trash_path = base_trash_path + str(time.time()) # Move path to trash path self.mkdir(self.dirname(trash_path)) self.rename(path, trash_path) def _delete(self, path, recursive=False): """ _delete(path, recursive=False) Delete a file or directory. """ path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'DELETE' params['recursive'] = recursive and 'true' or 'false' result = self._root.delete(path, params) # This part of the API is nonsense. # The lack of exception should indicate success. if not result['boolean']: raise IOError(_('Delete failed: %s') % path) def remove(self, path, skip_trash=False): """Delete a file.""" if skip_trash: self._delete(path, recursive=False) else: self._trash(path, recursive=False) def rmdir(self, path, skip_trash=False): """Delete a directory.""" self.remove(path, skip_trash) def rmtree(self, path, skip_trash=False): """Delete a tree recursively.""" if skip_trash: self._delete(path, recursive=True) else: self._trash(path, recursive=True) def restore(self, path): """ restore(path) The root of ``path`` will be /users/<current user>/.Trash/<timestamp>. Removing the root from ``path`` will provide the original path. Ensure parent directories exist and rename path. """ if not path.startswith(self.trash_path): raise IOError(errno.EPERM, _("File %s is not in trash") % path) # Build original path original_path = [] split_path = self.split(path) while split_path[0] != self.trash_path: original_path.append(split_path[1]) split_path = self.split(split_path[0]) original_path.reverse() original_path = self.join(posixpath.sep, *original_path) # move to original path # the path could have been expunged. if self.exists(original_path): raise IOError(errno.EEXIST, _("Path %s already exists.") % str(smart_str(original_path))) self.rename(path, original_path) def purge_trash(self): """ purge_trash() Purge all trash in users ``trash_path`` """ for timestamped_directory in self.listdir(self.trash_path): self.rmtree(self.join(self.trash_path, timestamped_directory), True) def mkdir(self, path, mode=None): """ mkdir(path, mode=None) Creates a directory and any parent directory if necessary. """ path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'MKDIRS' if mode is None: mode = self.getDefaultDirPerms() params['permission'] = safe_octal(mode) success = self._root.put(path, params) if not success: raise IOError(_("Mkdir failed: %s") % path) def rename(self, old, new): """rename(old, new)""" old = Hdfs.normpath(old) if not new.startswith('/'): new = Hdfs.join(Hdfs.dirname(old), new) new = Hdfs.normpath(new) params = self._getparams() params['op'] = 'RENAME' # Encode `new' because it's in the params params['destination'] = smart_str(new) result = self._root.put(old, params) if not result['boolean']: raise IOError(_("Rename failed: %s -> %s") % (str(smart_str(old)), str(smart_str(new)))) def rename_star(self, old_dir, new_dir): """Equivalent to `mv old_dir/* new""" if not self.isdir(old_dir): raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % old_dir) if not self.exists(new_dir): self.mkdir(new_dir) elif not self.isdir(new_dir): raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % new_dir) ls = self.listdir(old_dir) for dirent in ls: self.rename(Hdfs.join(old_dir, dirent), Hdfs.join(new_dir, dirent)) def chown(self, path, user=None, group=None, recursive=False): """chown(path, user=None, group=None, recursive=False)""" path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'SETOWNER' if user is not None: params['owner'] = user if group is not None: params['group'] = group if recursive: for xpath in self.listdir_recursive(path): self._root.put(xpath, params) else: self._root.put(path, params) def chmod(self, path, mode, recursive=False): """ chmod(path, mode, recursive=False) `mode' should be an octal integer or string. """ path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'SETPERMISSION' params['permission'] = safe_octal(mode) if recursive: for xpath in self.listdir_recursive(path): self._root.put(xpath, params) else: self._root.put(path, params) def get_home_dir(self): """get_home_dir() -> Home directory for the current user""" params = self._getparams() params['op'] = 'GETHOMEDIRECTORY' res = self._root.get(params=params) return res['Path'] def read(self, path, offset, length, bufsize=None): """ read(path, offset, length[, bufsize]) -> data Read data from a file. """ path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'OPEN' params['offset'] = long(offset) params['length'] = long(length) if bufsize is not None: params['bufsize'] = bufsize try: return self._root.get(path, params) except WebHdfsException, ex: if "out of the range" in ex.message: return "" raise ex def open(self, path, mode='r'): """ DEPRECATED! open(path, mode='r') -> File object This exists for legacy support and backwards compatibility only. Please use read(). """ return File(self, path, mode) def getDefaultFilePerms(self): return 0666 & (01777 ^ self.umask) def getDefaultDirPerms(self): return 01777 & (01777 ^ self.umask) def create(self, path, overwrite=False, blocksize=None, replication=None, permission=None, data=None): """ create(path, overwrite=False, blocksize=None, replication=None, permission=None) Creates a file with the specified parameters. `permission' should be an octal integer or string. """ path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'CREATE' params['overwrite'] = overwrite and 'true' or 'false' if blocksize is not None: params['blocksize'] = long(blocksize) if replication is not None: params['replication'] = int(replication) if permission is None: permission = self.getDefaultFilePerms() params['permission'] = safe_octal(permission) self._invoke_with_redirect('PUT', path, params, data) def append(self, path, data): """ append(path, data) Append data to a given file. """ path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'APPEND' self._invoke_with_redirect('POST', path, params, data) # e.g. ACLSPEC = user:joe:rwx,user::rw- def modify_acl_entries(self, path, aclspec): path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'MODIFYACLENTRIES' params['aclspec'] = aclspec return self._root.put(path, params) def remove_acl_entries(self, path, aclspec): path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'REMOVEACLENTRIES' params['aclspec'] = aclspec return self._root.put(path, params) def remove_default_acl(self, path): path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'REMOVEDEFAULTACL' return self._root.put(path, params) def remove_acl(self, path): path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'REMOVEACL' return self._root.put(path, params) def set_acl(self, path, aclspec): path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'SETACL' params['aclspec'] = aclspec return self._root.put(path, params) def get_acl_status(self, path): path = Hdfs.normpath(path) params = self._getparams() params['op'] = 'GETACLSTATUS' return self._root.get(path, params) def copyfile(self, src, dst, skip_header=False): sb = self._stats(src) if sb is None: raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src) if sb.isDir: raise IOError(errno.INVAL, _("Copy src '%s' is a directory") % src) if self.isdir(dst): raise IOError(errno.INVAL, _("Copy dst '%s' is a directory") % dst) offset = 0 while True: data = self.read(src, offset, UPLOAD_CHUNK_SIZE.get()) if offset == 0: if skip_header: n = data.index('\n') if n > 0: data = data[n + 1:] self.create(dst, overwrite=True, blocksize=sb.blockSize, replication=sb.replication, permission=oct(stat.S_IMODE(sb.mode)), data=data) if offset != 0: self.append(dst, data) cnt = len(data) if cnt < UPLOAD_CHUNK_SIZE.get(): break offset += cnt def copy_remote_dir(self, source, destination, dir_mode=None, owner=None): if owner is None: owner = self.DEFAULT_USER if dir_mode is None: dir_mode = self.getDefaultDirPerms() self.do_as_user(owner, self.mkdir, destination, mode=dir_mode) for stat in self.listdir_stats(source): source_file = stat.path destination_file = posixpath.join(destination, stat.name) if stat.isDir: self.copy_remote_dir(source_file, destination_file, dir_mode, owner) else: self.do_as_user(owner, self.copyfile, source_file, destination_file) self.do_as_superuser(self.chown, destination_file, owner, owner) def copy(self, src, dest, recursive=False, dir_mode=None, owner=None): """ Copy file, or directory, in HDFS to another location in HDFS. ``src`` -- The directory, or file, to copy from. ``dest`` -- the directory, or file, to copy to. If 'dest' is a directory that exists, copy 'src' into dest. If 'dest' is a file that exists and 'src' is a file, overwrite dest. If 'dest' does not exist, create 'src' as 'dest'. ``recursive`` -- Recursively copy contents of 'src' to 'dest'. This is required for directories. ``dir_mode`` and ``owner`` are used to define permissions on the newly copied files and directories. This method will overwrite any pre-existing files that collide with what is being copied. Copying a directory to a file is not allowed. """ if owner is None: owner = self.user # Hue was defauling permissions on copying files to the permissions # of the original file, but was not doing the same for directories # changed below for directories to remain consistent if dir_mode is None: sb = self._stats(src) dir_mode=oct(stat.S_IMODE(sb.mode)) src = self.abspath(src) dest = self.abspath(dest) if not self.exists(src): raise IOError(errno.ENOENT, _("File not found: %s") % src) if self.isdir(src): # 'src' is directory. # Skip if not recursive copy and 'src' is directory. if not recursive: LOG.debug("Skipping contents of %s" % src) return None # If 'dest' is a directory change 'dest' # to include 'src' basename. # create 'dest' if it doesn't already exist. if self.exists(dest): if self.isdir(dest): dest = self.join(dest, self.basename(src)) else: raise IOError(errno.EEXIST, _("Destination file %s exists and is not a directory.") % dest) self.do_as_user(owner, self.mkdir, dest, mode=dir_mode) # Copy files in 'src' directory to 'dest'. self.copy_remote_dir(src, dest, dir_mode, owner) else: # 'src' is a file. # If 'dest' is a directory, then copy 'src' into that directory. # Other wise, copy to 'dest'. if self.exists(dest) and self.isdir(dest): self.copyfile(src, self.join(dest, self.basename(src))) else: self.copyfile(src, dest) @staticmethod def urlsplit(url): return Hdfs.urlsplit(url) def get_hdfs_path(self, path): return posixpath.join(self.fs_defaultfs, path.lstrip('/')) def _invoke_with_redirect(self, method, path, params=None, data=None): """ Issue a request, and expect a redirect, and then submit the data to the redirected location. This is used for create, write, etc. Returns the response from the redirected request. """ next_url = None try: # Do not pass data in the first leg. self._root.invoke(method, path, params) except WebHdfsException, ex: # This is expected. We get a 307 redirect. # The following call may throw. next_url = self._get_redirect_url(ex) if next_url is None: raise WebHdfsException( _("Failed to create '%s'. HDFS did not return a redirect") % path) # Now talk to the real thing. The redirect url already includes the params. client = self._make_client(next_url, self.security_enabled) headers = {'Content-Type': 'application/octet-stream'} return resource.Resource(client).invoke(method, data=data, headers=headers) def _get_redirect_url(self, webhdfs_ex): """Retrieve the redirect url from an exception object""" try: # The actual HttpError (307) is wrapped inside http_error = webhdfs_ex.get_parent_ex() if http_error is None: raise webhdfs_ex if http_error.response.status_code not in (301, 302, 303, 307): LOG.error("Response is not a redirect: %s" % webhdfs_ex) raise webhdfs_ex return http_error.response.headers['location'] except Exception, ex: LOG.error("Failed to read redirect from response: %s (%s)" %
"""get_delegation_token(user) -> Delegation token""" # Workaround for HDFS-3988 if self._security_enabled: self.get_home_dir() params = self._getparams() params['op'] = 'GETDELEGATIONTOKEN' params['renewer'] = renewer res = self._root.get(params=params) return res['Token']['urlString'] def do_as_user(self, username, fn, *args, **kwargs): prev_user = self.user try: self.setuser(username) return fn(*args, **kwargs) finally: self.setuser(prev_user) def do_as_superuser(self, fn, *args, **kwargs): return self.do_as_user(self.superuser, fn, *args, **kwargs) def do_recursively(self, fn, path, *args, **kwargs): for stat in self.listdir_stats(path): try: if stat.isDir: self.do_recursively(fn, stat.path, *args, **kwargs) fn(stat.path, *args, **kwargs) except Exception: pass class File(object): """ DEPRECATED! Represent an open file on HDFS. This exists to mirror the old thriftfs interface, for backwards compatibility only. """ def __init__(self, fs, path, mode='r'): self._fs = fs self._path = normpath(path) self._pos = 0 self._mode = mode try: self._stat = fs.stats(path) if self._stat.isDir: raise IOError(errno.EISDIR, _("Is a directory: '%s'") % path) except IOError, ex: if ex.errno == errno.ENOENT and 'w' in self._mode: self._fs.create(self._path) self.stat() else: raise ex def seek(self, offset, whence=0): """Set the file pointer to the given spot. @see file.seek""" if whence == SEEK_SET: self._pos = offset elif whence == SEEK_CUR: self._pos += offset elif whence == SEEK_END: self.stat() self._pos = self._fs.stats(self._path).size + offset else: raise IOError(errno.EINVAL, _("Invalid argument to seek for whence")) def stat(self): self._stat = self._fs.stats(self._path) return self._stat def tell(self): return self._pos def read(self, length=DEFAULT_READ_SIZE): data = self._fs.read(self._path, self._pos, length) self._pos += len(data) return data def write(self, data): """Append the data to the end of the file""" self.append(data) def append(self, data): if 'w' not in self._mode: raise IOError(errno.EINVAL, _("File not open for writing")) self._fs.append(self._path, data=data) def flush(self): pass def close(self): pass def safe_octal(octal_value): """ safe_octal(octal_value) -> octal value in string This correctly handles octal values specified as a string or as a numeric. """ try: return oct(octal_value) except TypeError: return str(octal_value) def _get_service_url(hdfs_config): override = hdfs_config.WEBHDFS_URL.get() if override: return override fs_defaultfs = hdfs_config.FS_DEFAULTFS.get() netloc = Hdfs.urlsplit(fs_defaultfs)[1] host = netloc.split(':')[0] port = hadoop.conf.DEFAULT_NN_HTTP_PORT return "http://%s:%s/webhdfs/v1" % (host, port) def test_fs_configuration(fs_config): """ This is a config validation method. Returns a list of [ (config_variable, error_message) ] """ fs = WebHdfs.from_config(fs_config) fs.setuser(fs.superuser) # Access root try: statbuf = fs.stats('/') if statbuf.user != DEFAULT_HDFS_SUPERUSER: return [(fs_config.WEBHDFS_URL, _("Filesystem root '/' should be owned by 'hdfs'"))] except Exception, ex: LOG.info("%s -- Validation error: %s" % (fs, ex)) return [(fs_config.WEBHDFS_URL, _('Failed to access filesystem root'))] # Write a file tmpname = fs.mktemp(prefix='hue_config_validation') try: fs.create(tmpname) except Exception, ex: LOG.info("%s -- Validation error: %s" % (fs, ex)) return [(fs_config.WEBHDFS_URL, _('Failed to create temporary file "%s"') % tmpname)] # Check superuser has super power try: try: fs.chown(tmpname, fs.superuser) except Exception, ex: LOG.info("%s -- Validation error: %s" % (fs, ex)) return [(fs_config.WEBHDFS_URL, 'Failed to chown file. Please make sure that the filesystem root ' 'is owned by the cluster superuser ("hdfs" in most cases).')] finally: try: fs.remove(tmpname) except Exception, ex: LOG.error("Failed to remove '%s': %s" % (tmpname, ex)) return [(fs_config.WEBHDFS_URL, _('Failed to remove temporary file "%s"') % tmpname)] return [ ]
(webhdfs_ex, ex)) raise webhdfs_ex def get_delegation_token(self, renewer):
postgresql.go
package repository import ( "database/sql" "errors" "fmt" "strings" "time" "github.com/erizkiatama/rasalibrary-backend/models" "github.com/jmoiron/sqlx" ) type postgreSQLRepository struct { db *sqlx.DB } func NewPostgreSQLRepository(db *sqlx.DB) models.AuthRepository
type DBUserWithProfile struct { UserID uint `db:"user_id"` ProfileID uint `db:"profile_id"` Email string `db:"email"` Password string `db:"password"` IsAdmin bool `db:"is_admin"` Name string `db:"name"` DateOfBirth time.Time `db:"dob"` Address string `db:"address"` Sex string `db:"sex"` PhoneNumber string `db:"phone_number"` ProfilePhoto string `db:"profile_photo"` } func (ths *postgreSQLRepository) CreateUser(user models.User) (uint, uint, error) { dbUser := DBUserWithProfile{ UserID: user.ID, ProfileID: user.Profile.ID, Email: user.Email, Password: user.Password, IsAdmin: user.IsAdmin, Name: user.Profile.Name, DateOfBirth: user.Profile.DateOfBirth, Address: user.Profile.Address, Sex: user.Profile.Sex, PhoneNumber: user.Profile.PhoneNumber, ProfilePhoto: user.Profile.ProfilePhoto, } rows, err := ths.db.NamedQuery( `WITH new_user AS (INSERT INTO "auth".user (email, password, is_admin) VALUES (:email, :password, :is_admin) RETURNING id) INSERT INTO "auth".user_profile (name, dob, address, sex, phone_number, profile_photo, user_id) VALUES (:name, :dob, :address, :sex, :phone_number, :profile_photo, (SELECT id FROM new_user)) RETURNING user_id, id AS profile_id`, dbUser, ) if err != nil { if strings.Contains(err.Error(), "duplicate") { return 0, 0, models.NewClientError( "0100011", "user with email "+user.Email+" already exists", 400, ) } return 0, 0, models.NewServerError("0100011", 500, err) } var userID, profileID uint for rows.Next() { err = rows.Scan(&userID, &profileID) if err != nil { return 0, 0, models.NewServerError("0100012", 500, err) } } return userID, profileID, nil } func (ths *postgreSQLRepository) GetUserWithProfileByEmail(email string) (*models.User, error) { var dbUser DBUserWithProfile err := ths.db.Get(&dbUser, `SELECT u.id AS user_id, password, p.id AS profile_id FROM "auth".user AS u, "auth".user_profile AS p WHERE u.id = p.user_id AND email=$1`, email, ) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, models.NewClientError( "0100013", "no user found for email "+email, 404, ) } return nil, models.NewServerError("0100013", 500, err) } return &models.User{ ID: dbUser.UserID, Password: dbUser.Password, Profile: models.UserProfile{ ID: dbUser.ProfileID, }, }, nil } func (ths *postgreSQLRepository) GetUserWithProfileByID(userID uint) (*models.User, error) { var dbUser DBUserWithProfile err := ths.db.Get(&dbUser, `SELECT u.id AS user_id, email, p.id AS profile_id, name, dob, address, sex, phone_number, profile_photo FROM "auth".user AS u, "auth".user_profile AS p WHERE u.id = p.user_id AND u.id=$1`, userID, ) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, models.NewClientError( "0100013", fmt.Sprintf("no user found for ID %d", userID), 404, ) } return nil, models.NewServerError("0100013", 500, err) } return &models.User{ ID: dbUser.UserID, Email: dbUser.Email, IsAdmin: dbUser.IsAdmin, Profile: models.UserProfile{ ID: dbUser.ProfileID, Name: dbUser.Name, DateOfBirth: dbUser.DateOfBirth, Address: dbUser.Address, Sex: dbUser.Sex, PhoneNumber: dbUser.PhoneNumber, ProfilePhoto: dbUser.ProfilePhoto, }, }, nil }
{ return &postgreSQLRepository{ db: db, } }
SerialStateMachine.py
############################################################################### # Author: CallMeCCLemon # Date: 2019 # Copyright: 2019 Thomas Littlejohn (@CallMeCCLemon) - Modified BSD License ############################################################################### from enum import Enum from PythonApp.pillar.MessageClient import MessageClient from PythonApp.pillar.PillarMessageTransformer import PillarMessageTransformer from PythonApp.qc_serial.SerialDao import SerialDao from PythonApp.qc_serial.SerialUtil import SerialUtil from PythonApp.qc_serial.model.HeaderMessage import HeaderMessage from PythonApp.qc_serial.model.OpCode import OpCode from PythonApp.qc_serial.model.PayloadMessage import PayloadMessage from PythonApp.util.Config import Config class
(Enum): DISCONNECTED = 0 CONNECTED = 1 class SerialStateMachine: def __init__(self, serial_dao: SerialDao): self.active_state = States.DISCONNECTED self.config = Config() self.states = { States.DISCONNECTED: self.disconnected, States.CONNECTED: self.connected, } self.serial_dao = serial_dao self.message_client = MessageClient() self.header_message_length = 11 self.done = False def run(self): while not self.done: self.states[self.active_state]() def disconnected(self): # Send HELO Messages waiting for an ACK.You hello_message = HeaderMessage( OpCode.HELO, 0, int(self.config.get_master_config_value("PillarID")), 0) self.serial_dao.write(hello_message.to_serial_payload()) message = self.serial_dao.read(self.header_message_length) try: SerialUtil.validate_message_header(message) except TimeoutError as ex: return except ValueError as ex: print(ex) return header_message = HeaderMessage.build_header_object(message[1:]) if header_message.opcode == OpCode.ACK: print("Received ACK! Now connected to badge {}!".format(header_message.from_id)) self.active_state = States.CONNECTED else: print("Received unknown message! Skipping..") def connected(self): # Send DUMPQ messages waiting for a DUMPA. dump_q_message = HeaderMessage( OpCode.DUMPQ, 1, int(self.config.get_master_config_value("PillarID")), 0) dump_q_payload = PayloadMessage(int(self.config.get_master_config_value("PillarType"))) print("Sending dump Q message!") print("Dump Q Header: {}".format(dump_q_message.to_serial_payload(dump_q_payload))) self.serial_dao.write(dump_q_message.to_serial_payload(dump_q_payload)) print("Dump q payload: {}".format(dump_q_payload.to_serial_payload())) self.serial_dao.write_no_sync(dump_q_payload.to_serial_payload()) message = self.serial_dao.read(self.header_message_length) try: SerialUtil.validate_message_header(message) header_message = HeaderMessage.build_header_object(message[1:]) if header_message.opcode == OpCode.DUMPA: print("Received DUMPA! Sending update to cloud!") message = self.serial_dao.read(header_message.payload_len) payload_message = PayloadMessage.build_payload_object(message) pillar_message = PillarMessageTransformer\ .transform_serial_message_to_pillar_message(header_message, payload_message) self.message_client.send_message_to_queue(pillar_message) self.done = True else: print("Unexpected message type!") except TimeoutError as ex: print(ex) except ValueError as ex: print(ex) self.active_state = States.DISCONNECTED
States
stringid_string.go
// Code generated by "stringer -type=StringID"; DO NOT EDIT. package resources import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[YourJournalIsNowOpen-0] _ = x[NewEntryDraftExists-1] _ = x[YouCanNowCreateYourEntry-2] _ = x[YouCanNowCreateYourEntry_succinct-3] _ = x[ForDate-4] _ = x[IRepeat-5] _ = x[NextPartPleaseReprompt-6] _ = x[YourEntryIsEmptyNoRepeat-7] _ = x[YourEntryIsEmptyNoCorrect-8] _ = x[OkayCorrectPart-9] _ = x[CorrectPartReprompt-10] _ = x[NewEntryAborted-11] _ = x[YourEntryIsEmptyNoSave-12] _ = x[NewEntryConfirmation-13] _ = x[NewEntryConfirmationReprompt-14] _ = x[OkaySaved-15] _ = x[OkayNotSaved-16] _ = x[SuccinctModeExplanation-17] _ = x[WhatDoYouWantToDoNext-18]
_ = x[CouldNotGetEntry-22] _ = x[CouldNotGetEntries-23] _ = x[NoEntriesInTimeRangeFound-24] _ = x[EntriesInTimeRange-25] _ = x[ReadEntry-26] _ = x[JournalIsEmpty-27] _ = x[NewEntryExample-28] _ = x[EntryForDateNotFound-29] _ = x[SearchError-30] _ = x[SearchNoResultsFound-31] _ = x[SearchResults-32] _ = x[DeleteEntryNotFound-33] _ = x[DeleteEntryCouldNotGetEntry-34] _ = x[DeleteEntryConfirmation-35] _ = x[DeleteEntryError-36] _ = x[OkayDeleted-37] _ = x[OkayNotDeleted-38] _ = x[LinkWithGoogleAccount-39] _ = x[OkayWillBeSuccinct-40] _ = x[OkayWillBeVerbose-41] _ = x[InvalidDate-42] _ = x[InternalError-43] _ = x[Help-44] _ = x[Done-45] _ = x[Correct1-46] _ = x[Correct2-47] _ = x[Repeat1-48] _ = x[Repeat2-49] _ = x[Abort-50] _ = x[ShortPause-51] _ = x[LongPause-52] _ = x[DriveCannotCreateFileError-53] _ = x[DriveMultipleFilesFoundError-54] _ = x[DriveSheetNotFoundError-55] _ = x[DriveUnknownError-56] _ = x[Journal-57] _ = x[EndMarker-58] } const _StringID_name = "YourJournalIsNowOpenNewEntryDraftExistsYouCanNowCreateYourEntryYouCanNowCreateYourEntry_succinctForDateIRepeatNextPartPleaseRepromptYourEntryIsEmptyNoRepeatYourEntryIsEmptyNoCorrectOkayCorrectPartCorrectPartRepromptNewEntryAbortedYourEntryIsEmptyNoSaveNewEntryConfirmationNewEntryConfirmationRepromptOkaySavedOkayNotSavedSuccinctModeExplanationWhatDoYouWantToDoNextDidNotUnderstandTryAgainExampleRelativeDateQueryExampleDateQueryCouldNotGetEntryCouldNotGetEntriesNoEntriesInTimeRangeFoundEntriesInTimeRangeReadEntryJournalIsEmptyNewEntryExampleEntryForDateNotFoundSearchErrorSearchNoResultsFoundSearchResultsDeleteEntryNotFoundDeleteEntryCouldNotGetEntryDeleteEntryConfirmationDeleteEntryErrorOkayDeletedOkayNotDeletedLinkWithGoogleAccountOkayWillBeSuccinctOkayWillBeVerboseInvalidDateInternalErrorHelpDoneCorrect1Correct2Repeat1Repeat2AbortShortPauseLongPauseDriveCannotCreateFileErrorDriveMultipleFilesFoundErrorDriveSheetNotFoundErrorDriveUnknownErrorJournalEndMarker" var _StringID_index = [...]uint16{0, 20, 39, 63, 96, 103, 110, 132, 156, 181, 196, 215, 230, 252, 272, 300, 309, 321, 344, 365, 389, 413, 429, 445, 463, 488, 506, 515, 529, 544, 564, 575, 595, 608, 627, 654, 677, 693, 704, 718, 739, 757, 774, 785, 798, 802, 806, 814, 822, 829, 836, 841, 851, 860, 886, 914, 937, 954, 961, 970} func (i StringID) String() string { if i < 0 || i >= StringID(len(_StringID_index)-1) { return "StringID(" + strconv.FormatInt(int64(i), 10) + ")" } return _StringID_name[_StringID_index[i]:_StringID_index[i+1]] }
_ = x[DidNotUnderstandTryAgain-19] _ = x[ExampleRelativeDateQuery-20] _ = x[ExampleDateQuery-21]
manager.go
package csrf import ( "sync" "time" "github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2/internal/memory" ) // go:generate msgp // msgp -file="manager.go" -o="manager_msgp.go" -tests=false -unexported // don't forget to replace the msgp import path to: // "github.com/gofiber/fiber/v2/internal/msgp" type item struct { } //msgp:ignore manager type manager struct { pool sync.Pool memory *memory.Storage storage fiber.Storage } func newManager(storage fiber.Storage) *manager
// acquire returns an *entry from the sync.Pool func (m *manager) acquire() *item { return m.pool.Get().(*item) } // release and reset *entry to sync.Pool func (m *manager) release(e *item) { // don't release item if we using memory storage if m.storage != nil { return } m.pool.Put(e) } // get data from storage or memory func (m *manager) get(key string) (it *item) { if m.storage != nil { it = m.acquire() if raw, _ := m.storage.Get(key); raw != nil { if _, err := it.UnmarshalMsg(raw); err != nil { return } } return } if it, _ = m.memory.Get(key).(*item); it == nil { it = m.acquire() } return } // get raw data from storage or memory func (m *manager) getRaw(key string) (raw []byte) { if m.storage != nil { raw, _ = m.storage.Get(key) } else { raw, _ = m.memory.Get(key).([]byte) } return } // set data to storage or memory func (m *manager) set(key string, it *item, exp time.Duration) { if m.storage != nil { if raw, err := it.MarshalMsg(nil); err == nil { _ = m.storage.Set(key, raw, exp) } } else { m.memory.Set(key, it, exp) } } // set data to storage or memory func (m *manager) setRaw(key string, raw []byte, exp time.Duration) { if m.storage != nil { _ = m.storage.Set(key, raw, exp) } else { m.memory.Set(key, raw, exp) } } // delete data from storage or memory func (m *manager) delete(key string) { if m.storage != nil { _ = m.storage.Delete(key) } else { m.memory.Delete(key) } }
{ // Create new storage handler manager := &manager{ pool: sync.Pool{ New: func() interface{} { return new(item) }, }, } if storage != nil { // Use provided storage if provided manager.storage = storage } else { // Fallback too memory storage manager.memory = memory.New() } return manager }
navbar.tsx
import * as React from "react" import { BottomNavigation, BottomNavigationAction } from "@material-ui/core" import { User, Heart, Share2 } from "react-feather" export function
(props: any) { return ( <BottomNavigation value={props.value} showLabels> <BottomNavigationAction onClick={props.routes[0]} label="Profile" icon={<User />} /> <BottomNavigationAction onClick={props.routes[1]} label="Suggestions" icon={<Heart />} /> <BottomNavigationAction onClick={props.routes[2]} label="Social" icon={<Share2 />} /> </BottomNavigation> ) }
Navbar
SymbolFactory.py
#!/usr/bin/env python """ generated source for module SymbolFactory """ # package: org.ggp.base.util.symbol.factory import java.util.ArrayList import java.util.LinkedList import java.util.List import org.ggp.base.util.symbol.factory.exceptions.SymbolFormatException import org.ggp.base.util.symbol.grammar.Symbol import org.ggp.base.util.symbol.grammar.SymbolAtom import org.ggp.base.util.symbol.grammar.SymbolList import org.ggp.base.util.symbol.grammar.SymbolPool class SymbolFactory(object): """ generated source for class SymbolFactory """ @classmethod def create(cls, string): """ generated source for method create """ try: return convert(LinkedList(tokens)) except Exception as e: raise SymbolFormatException(string) # Private, implementation-specific methods below here @classmethod def convert(cls, tokens):
return convertAtom(tokens) @classmethod def convertAtom(cls, tokens): """ generated source for method convertAtom """ return SymbolPool.getAtom(tokens.removeFirst()) @classmethod def convertList(cls, tokens): """ generated source for method convertList """ contents = ArrayList() tokens.removeFirst() while not tokens.getFirst() == ""): contents.add(cls.convert(tokens)) tokens.removeFirst() return SymbolPool.getList(contents) @classmethod def lex(cls, string): """ generated source for method lex """ tokens = ArrayList() for token in string.split(" "): tokens.add(token) return tokens @classmethod def preprocess(cls, string): """ generated source for method preprocess """ string = string.replaceAll("\\(", " ( ") string = string.replaceAll("\\)", " ) ") string = string.replaceAll("\\s+", " ") string = string.trim() return string
""" generated source for method convert """ if tokens.getFirst() == "(": return convertList(tokens) else:
query.rs
// Copyright 2018 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. //! This module handles performing iterative queries about the network. use fnv::FnvHashSet; use futures::{future, Future, stream, Stream}; use kbucket::KBucketsPeerId; use libp2p_core::PeerId; use multiaddr::{Protocol, Multiaddr}; use protocol; use rand; use smallvec::SmallVec; use std::cmp::Ordering; use std::io::Error as IoError; use std::mem; /// Parameters of a query. Allows plugging the query-related code with the rest of the /// infrastructure. pub struct
<FBuckets, FFindNode> { /// Identifier of the local peer. pub local_id: PeerId, /// Called whenever we need to obtain the peers closest to a certain peer. pub kbuckets_find_closest: FBuckets, /// Level of parallelism for networking. If this is `N`, then we can dial `N` nodes at a time. pub parallelism: usize, /// Called whenever we want to send a `FIND_NODE` RPC query. pub find_node: FFindNode, } /// Event that happens during a query. #[derive(Debug, Clone)] pub enum QueryEvent<TOut> { /// Learned about new mutiaddresses for the given peers. PeersReported(Vec<(PeerId, Vec<Multiaddr>)>), /// Finished the processing of the query. Contains the result. Finished(TOut), } /// Starts a query for an iterative `FIND_NODE` request. #[inline] pub fn find_node<'a, FBuckets, FFindNode>( query_params: QueryParams<FBuckets, FFindNode>, searched_key: PeerId, ) -> Box<Stream<Item = QueryEvent<Vec<PeerId>>, Error = IoError> + Send + 'a> where FBuckets: Fn(PeerId) -> Vec<PeerId> + 'a + Clone, FFindNode: Fn(Multiaddr, PeerId) -> Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send> + 'a + Clone, { query(query_params, searched_key, 20) // TODO: constant } /// Refreshes a specific bucket by performing an iterative `FIND_NODE` on a random ID of this /// bucket. /// /// Returns a dummy no-op future if `bucket_num` is out of range. pub fn refresh<'a, FBuckets, FFindNode>( query_params: QueryParams<FBuckets, FFindNode>, bucket_num: usize, ) -> Box<Stream<Item = QueryEvent<()>, Error = IoError> + Send + 'a> where FBuckets: Fn(PeerId) -> Vec<PeerId> + 'a + Clone, FFindNode: Fn(Multiaddr, PeerId) -> Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send> + 'a + Clone, { let peer_id = match gen_random_id(&query_params.local_id, bucket_num) { Ok(p) => p, Err(()) => return Box::new(stream::once(Ok(QueryEvent::Finished(())))), }; let stream = find_node(query_params, peer_id).map(|event| { match event { QueryEvent::PeersReported(peers) => QueryEvent::PeersReported(peers), QueryEvent::Finished(_) => QueryEvent::Finished(()), } }); Box::new(stream) as Box<_> } // Generates a random `PeerId` that belongs to the given bucket. // // Returns an error if `bucket_num` is out of range. fn gen_random_id(my_id: &PeerId, bucket_num: usize) -> Result<PeerId, ()> { let my_id_len = my_id.as_bytes().len(); // TODO: this 2 is magic here ; it is the length of the hash of the multihash let bits_diff = bucket_num + 1; if bits_diff > 8 * (my_id_len - 2) { return Err(()); } let mut random_id = [0; 64]; for byte in 0..my_id_len { match byte.cmp(&(my_id_len - bits_diff / 8 - 1)) { Ordering::Less => { random_id[byte] = my_id.as_bytes()[byte]; } Ordering::Equal => { let mask: u8 = (1 << (bits_diff % 8)) - 1; random_id[byte] = (my_id.as_bytes()[byte] & !mask) | (rand::random::<u8>() & mask); } Ordering::Greater => { random_id[byte] = rand::random(); } } } let peer_id = PeerId::from_bytes(random_id[..my_id_len].to_owned()) .expect("randomly-generated peer ID should always be valid"); Ok(peer_id) } // Generic query-performing function. fn query<'a, FBuckets, FFindNode>( query_params: QueryParams<FBuckets, FFindNode>, searched_key: PeerId, num_results: usize, ) -> Box<Stream<Item = QueryEvent<Vec<PeerId>>, Error = IoError> + Send + 'a> where FBuckets: Fn(PeerId) -> Vec<PeerId> + 'a + Clone, FFindNode: Fn(Multiaddr, PeerId) -> Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send> + 'a + Clone, { debug!("Start query for {:?} ; num results = {}", searched_key, num_results); // State of the current iterative process. struct State<'a> { // At which stage we are. stage: Stage, // Final output of the iteration. result: Vec<PeerId>, // For each open connection, a future with the response of the remote. // Note that don't use a `SmallVec` here because `select_all` produces a `Vec`. current_attempts_fut: Vec<Box<Future<Item = Vec<protocol::Peer>, Error = IoError> + Send + 'a>>, // For each open connection, the peer ID that we are connected to. // Must always have the same length as `current_attempts_fut`. current_attempts_addrs: SmallVec<[PeerId; 32]>, // Nodes that need to be attempted. pending_nodes: Vec<PeerId>, // Peers that we tried to contact but failed. failed_to_contact: FnvHashSet<PeerId>, } // General stage of the state. #[derive(Copy, Clone, PartialEq, Eq)] enum Stage { // We are still in the first step of the algorithm where we try to find the closest node. FirstStep, // We are contacting the k closest nodes in order to fill the list with enough results. SecondStep, // The results are complete, and the next stream iteration will produce the outcome. FinishingNextIter, // We are finished and the stream shouldn't return anything anymore. Finished, } let initial_state = State { stage: Stage::FirstStep, result: Vec::with_capacity(num_results), current_attempts_fut: Vec::new(), current_attempts_addrs: SmallVec::new(), pending_nodes: { let kbuckets_find_closest = query_params.kbuckets_find_closest.clone(); kbuckets_find_closest(searched_key.clone()) // TODO: suboptimal }, failed_to_contact: Default::default(), }; let parallelism = query_params.parallelism; // Start of the iterative process. let stream = stream::unfold(initial_state, move |mut state| -> Option<_> { match state.stage { Stage::FinishingNextIter => { let result = mem::replace(&mut state.result, Vec::new()); debug!("Query finished with {} results", result.len()); state.stage = Stage::Finished; let future = future::ok((Some(QueryEvent::Finished(result)), state)); return Some(future::Either::A(future)); }, Stage::Finished => { return None; }, _ => () }; let searched_key = searched_key.clone(); let find_node_rpc = query_params.find_node.clone(); // Find out which nodes to contact at this iteration. let to_contact = { let wanted_len = if state.stage == Stage::FirstStep { parallelism.saturating_sub(state.current_attempts_fut.len()) } else { num_results.saturating_sub(state.current_attempts_fut.len()) }; let mut to_contact = SmallVec::<[_; 16]>::new(); while to_contact.len() < wanted_len && !state.pending_nodes.is_empty() { // Move the first element of `pending_nodes` to `to_contact`, but ignore nodes that // are already part of the results or of a current attempt or if we failed to // contact it before. let peer = state.pending_nodes.remove(0); if state.result.iter().any(|p| p == &peer) { continue; } if state.current_attempts_addrs.iter().any(|p| p == &peer) { continue; } if state.failed_to_contact.iter().any(|p| p == &peer) { continue; } to_contact.push(peer); } to_contact }; debug!("New query round ; {} queries in progress ; contacting {} new peers", state.current_attempts_fut.len(), to_contact.len()); // For each node in `to_contact`, start an RPC query and a corresponding entry in the two // `state.current_attempts_*` fields. for peer in to_contact { let multiaddr: Multiaddr = Protocol::P2p(peer.clone().into_bytes()).into(); let searched_key2 = searched_key.clone(); let current_attempt = find_node_rpc(multiaddr.clone(), searched_key2); // TODO: suboptimal state.current_attempts_addrs.push(peer.clone()); state .current_attempts_fut .push(Box::new(current_attempt) as Box<_>); } debug_assert_eq!( state.current_attempts_addrs.len(), state.current_attempts_fut.len() ); // Extract `current_attempts_fut` so that we can pass it to `select_all`. We will push the // values back when inside the loop. let current_attempts_fut = mem::replace(&mut state.current_attempts_fut, Vec::new()); if current_attempts_fut.is_empty() { // If `current_attempts_fut` is empty, then `select_all` would panic. It happens // when we have no additional node to query. debug!("Finishing query early because no additional node available"); state.stage = Stage::FinishingNextIter; let future = future::ok((None, state)); return Some(future::Either::A(future)); } // This is the future that continues or breaks the `loop_fn`. let future = future::select_all(current_attempts_fut.into_iter()).then(move |result| { let (message, trigger_idx, other_current_attempts) = match result { Err((err, trigger_idx, other_current_attempts)) => { (Err(err), trigger_idx, other_current_attempts) } Ok((message, trigger_idx, other_current_attempts)) => { (Ok(message), trigger_idx, other_current_attempts) } }; // Putting back the extracted elements in `state`. let remote_id = state.current_attempts_addrs.remove(trigger_idx); debug_assert!(state.current_attempts_fut.is_empty()); state.current_attempts_fut = other_current_attempts; // `message` contains the reason why the current future was woken up. let closer_peers = match message { Ok(msg) => msg, Err(err) => { trace!("RPC query failed for {:?}: {:?}", remote_id, err); state.failed_to_contact.insert(remote_id); return future::ok((None, state)); } }; // Inserting the node we received a response from into `state.result`. // The code is non-trivial because `state.result` is ordered by distance and is limited // by `num_results` elements. if let Some(insert_pos) = state.result.iter().position(|e| { e.distance_with(&searched_key) >= remote_id.distance_with(&searched_key) }) { if state.result[insert_pos] != remote_id { if state.result.len() >= num_results { state.result.pop(); } state.result.insert(insert_pos, remote_id); } } else if state.result.len() < num_results { state.result.push(remote_id); } // The loop below will set this variable to `true` if we find a new element to put at // the top of the result. This would mean that we have to continue looping. let mut local_nearest_node_updated = false; // Update `state` with the actual content of the message. let mut new_known_multiaddrs = Vec::with_capacity(closer_peers.len()); for mut peer in closer_peers { // Update the peerstore with the information sent by // the remote. { let multiaddrs = mem::replace(&mut peer.multiaddrs, Vec::new()); trace!("Reporting multiaddresses for {:?}: {:?}", peer.node_id, multiaddrs); new_known_multiaddrs.push((peer.node_id.clone(), multiaddrs)); } if peer.node_id.distance_with(&searched_key) <= state.result[0].distance_with(&searched_key) { local_nearest_node_updated = true; } if state.result.iter().any(|ma| ma == &peer.node_id) { continue; } // Insert the node into `pending_nodes` at the right position, or do not // insert it if it is already in there. if let Some(insert_pos) = state.pending_nodes.iter().position(|e| { e.distance_with(&searched_key) >= peer.node_id.distance_with(&searched_key) }) { if state.pending_nodes[insert_pos] != peer.node_id { state.pending_nodes.insert(insert_pos, peer.node_id.clone()); } } else { state.pending_nodes.push(peer.node_id.clone()); } } if state.result.len() >= num_results || (state.stage != Stage::FirstStep && state.current_attempts_fut.is_empty()) { state.stage = Stage::FinishingNextIter; } else { if !local_nearest_node_updated { trace!("Loop didn't update closer node ; jumping to step 2"); state.stage = Stage::SecondStep; } } future::ok((Some(QueryEvent::PeersReported(new_known_multiaddrs)), state)) }); Some(future::Either::B(future)) }).filter_map(|val| val); Box::new(stream) as Box<_> }
QueryParams
mirror_repos.js
import $ from 'jquery'; import { debounce } from 'lodash'; import { __ } from '~/locale'; import { deprecatedCreateFlash as Flash } from '~/flash'; import axios from '~/lib/utils/axios_utils'; import SSHMirror from './ssh_mirror'; import { hide } from '~/tooltips'; export default class
{ constructor(container) { this.$container = $(container); this.$password = null; this.$form = $('.js-mirror-form', this.$container); this.$urlInput = $('.js-mirror-url', this.$form); this.$protectedBranchesInput = $('.js-mirror-protected', this.$form); this.$table = $('.js-mirrors-table-body', this.$container); this.mirrorEndpoint = this.$form.data('projectMirrorEndpoint'); } init() { this.initMirrorPush(); this.registerUpdateListeners(); } initMirrorPush() { this.$keepDivergentRefsInput = $('.js-mirror-keep-divergent-refs', this.$form); this.$passwordGroup = $('.js-password-group', this.$container); this.$password = $('.js-password', this.$passwordGroup); this.$authMethod = $('.js-auth-method', this.$form); this.$keepDivergentRefsInput.on('change', () => this.updateKeepDivergentRefs()); this.$authMethod.on('change', () => this.togglePassword()); this.$password.on('input.updateUrl', () => this.debouncedUpdateUrl()); this.initMirrorSSH(); this.updateProtectedBranches(); this.updateKeepDivergentRefs(); } initMirrorSSH() { if (this.$password) { // eslint-disable-next-line @gitlab/no-global-event-off this.$password.off('input.updateUrl'); } this.$password = undefined; this.sshMirror = new SSHMirror('.js-mirror-form'); this.sshMirror.init(); } updateUrl() { let val = this.$urlInput.val(); if (this.$password) { const password = this.$password.val(); if (password) val = val.replace('@', `:${password}@`); } $('.js-mirror-url-hidden', this.$form).val(val); } updateProtectedBranches() { const val = this.$protectedBranchesInput.get(0).checked ? this.$protectedBranchesInput.val() : '0'; $('.js-mirror-protected-hidden', this.$form).val(val); } updateKeepDivergentRefs() { const field = this.$keepDivergentRefsInput.get(0); // This field only exists after the form is switched to 'Push' mode if (field) { const val = field.checked ? this.$keepDivergentRefsInput.val() : '0'; $('.js-mirror-keep-divergent-refs-hidden', this.$form).val(val); } } registerUpdateListeners() { this.debouncedUpdateUrl = debounce(() => this.updateUrl(), 200); this.$urlInput.on('input', () => this.debouncedUpdateUrl()); this.$protectedBranchesInput.on('change', () => this.updateProtectedBranches()); this.$table.on('click', '.js-delete-mirror', (event) => this.deleteMirror(event)); } togglePassword() { const isPassword = this.$authMethod.val() === 'password'; if (!isPassword) { this.$password.val(''); this.updateUrl(); } this.$passwordGroup.collapse(isPassword ? 'show' : 'hide'); } deleteMirror(event, existingPayload) { const $target = $(event.currentTarget); let payload = existingPayload; if (!payload) { payload = { project: { remote_mirrors_attributes: { id: $target.data('mirrorId'), _destroy: 1, }, }, }; } return axios .put(this.mirrorEndpoint, payload) .then(() => this.removeRow($target)) .catch(() => Flash(__('Failed to remove mirror.'))); } /* eslint-disable class-methods-use-this */ removeRow($target) { const row = $target.closest('tr'); hide($('.js-delete-mirror', row)); row.remove(); } /* eslint-enable class-methods-use-this */ }
MirrorRepos
ext.rs
//! Safe wrapper around externalities invokes. use pwasm_std::{ self, types::{H256, U256, Address} }; /// Generic wasm error #[derive(Debug)] pub struct Error; mod external { extern "C" { // Various call variants /// Direct/classic call. /// Corresponds to "CALL" opcode in EVM pub fn ccall( gas: i64, address: *const u8, val_ptr: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Delegate call. /// Corresponds to "CALLCODE" opcode in EVM pub fn dcall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; /// Static call. /// Corresponds to "STACICCALL" opcode in EVM pub fn scall( gas: i64, address: *const u8, input_ptr: *const u8, input_len: u32, result_ptr: *mut u8, result_len: u32, ) -> i32; // environmental blockchain functions (runtime might not provide all of these!) pub fn blockhash(number: i64, dest: *mut u8); pub fn balance(address: *const u8, dest: *mut u8); pub fn coinbase(dest: *mut u8); pub fn timestamp() -> i64; pub fn blocknumber() -> i64; pub fn difficulty(dest: *mut u8); pub fn gaslimit(dest: *mut u8); #[cfg(feature = "kip6")] pub fn gasleft() -> i64; pub fn sender(dest: *mut u8); pub fn address(dest: *mut u8); pub fn value(dest: *mut u8); pub fn origin(dest: *mut u8); pub fn elog( topic_ptr: *const u8, topic_count: u32, data_ptr: *const u8, data_len: u32 ); pub fn create( endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; #[cfg(feature = "kip4")] pub fn create2( endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8 ) -> i32; pub fn suicide(refund: *const u8) -> !; pub fn ret(ptr: *const u8, len: u32) -> !; pub fn input_length() -> u32; pub fn fetch_input(dst: *mut u8); } } /// Halt execution and register account for deletion. /// /// Value of the current account will be tranfered to `refund` address. pub fn suicide(refund: &Address) -> ! { unsafe { external::suicide(refund.as_ptr()); } } /// Get balance of the given account. /// /// If an account is not registered in the chain yet, /// it is considered as an account with `balance = 0`. pub fn balance(address: &Address) -> U256 { unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) } } /// Create a new account with the given code /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::zero(); unsafe { if external::create( endowment_arr.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } #[cfg(feature = "kip4")] /// Create a new account with the given code and salt, requires KIP-4. /// /// # Errors /// /// Returns [`Error`] in case contract constructor failed. /// /// [`Error`]: struct.Error.html pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> { let mut endowment_arr = [0u8; 32]; endowment.to_big_endian(&mut endowment_arr); let mut result = Address::new(); unsafe { if external::create2( endowment_arr.as_ptr(), salt.as_ptr(), code.as_ptr(), code.len() as u32, (&mut result).as_mut_ptr() ) == 0 { Ok(result) } else { Err(Error) } } } /// Message-call into an account /// /// # Arguments: /// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount /// * `address` - an address of contract to send a call /// * `value` - a value in Wei to send with a call /// * `input` - a data to send with a call /// * `result` - a mutable reference to be filled with a result data /// /// # Returns: /// /// Call is succeed if it returns `Result::Ok(())` /// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> { let mut value_arr = [0u8; 32]; value.to_big_endian(&mut value_arr); unsafe { if external::ccall( gas as i64, address.as_ptr(), value_arr.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but with code at the given `address` /// /// Effectively this function is like calling current account but with /// different code (i.e. like `DELEGATECALL` EVM instruction). /// /// [`call`]: fn.call.html pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::dcall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage. /// /// It will return an error in this case. /// /// [`call`]: fn.call.html pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> { unsafe { if external::scall( gas as i64, address.as_ptr(), input.as_ptr(), input.len() as u32, result.as_mut_ptr(), result.len() as u32 ) == 0 { Ok(()) } else { Err(Error) } } } /// Returns hash of the given block or H256::zero() /// /// Only works for 256 most recent blocks excluding current /// Returns H256::zero() in case of failure pub fn block_hash(block_number: u64) -> H256 { let mut res = H256::zero(); unsafe { external::blockhash(block_number as i64, res.as_mut_ptr()) } res } /// Get the current block’s beneficiary address (the current miner account address) pub fn coinbase() -> Address { unsafe { fetch_address(|x| external::coinbase(x) ) } } /// Get the block's timestamp /// /// It can be viewed as an output of Unix's `time()` function at /// current block's inception. pub fn timestamp() -> u64 { unsafe { external::timestamp() as u64 } } /// Get the block's number /// /// This value represents number of ancestor blocks. /// The genesis block has a number of zero. pub fn block_number() -> u64 { unsafe { external::blocknumber() as u64 } } /// Get the block's difficulty. pub fn difficulty() -> U256 { unsafe { fetch_u256(|x| external::difficulty(x) ) } } /// Get the block's gas limit. pub fn gas_limit() -> U256 { unsafe { fetch_u256(|x| external::gaslimit(x) ) } } #[cfg(feature = "kip6")] /// Get amount of gas left. pub fn gas_left() -> u64 { unsafe { external::gasleft() as u64 } } /// Get caller address /// /// This is the address of the account that is directly responsible for this execution. /// Use [`origin`] to get an address of external account - an original initiator of a transaction pub fn sender() -> Address { unsafe { fetch_address(|x| external::sender(x) ) } } /// Get execution origination address /// /// This is the sender of original transaction. /// It could be only external account, not a contract pub fn origin() -> Address { unsafe { fetch_address(|x| external::origin(x) ) } } /// Get deposited value by the instruction/transaction responsible for this execution. pub fn value() -> U256 { unsafe { fetch_u256(|x| external::value(x) ) } } /// Get address of currently executing account pub fn address() -> Address { unsafe { fetch_address(|x| external::address(x) ) } } /// Creates log entry with given topics and data. /// /// There could be only up to 4 topics. /// /// # Panics /// /// If `topics` contains more than 4 elements then this function will trap. pub fn log(topics: &[H256], data: &[u8]) { unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); } } /// Allocates and requests [`call`] arguments (input) /// /// Input data comes either with external transaction or from [`call`] input value. pub fn in
-> pwasm_std::Vec<u8> { let len = unsafe { external::input_length() }; match len { 0 => pwasm_std::Vec::new(), non_zero => { let mut data = pwasm_std::Vec::with_capacity(non_zero as usize); unsafe { data.set_len(non_zero as usize); external::fetch_input(data.as_mut_ptr()); } data } } } /// Sets a [`call`] return value /// /// Pass return data to the runtime. Runtime SHOULD trap the execution. /// pub fn ret(data: &[u8]) -> ! { unsafe { external::ret(data.as_ptr(), data.len() as u32); } } unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) { let mut res = Address::zero(); f(res.as_mut_ptr()); res } unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) { let mut res = [0u8; 32]; f(res.as_mut_ptr()); U256::from_big_endian(&res) }
put()
streambuffer.go
package renter // NOTE: This stream buffer is uninfished in a couple of ways. The first way is // that it's not possible to cancel fetches. The second way is that fetches are // not prioritized, there should be a higher priority on data that is closer to // the current stream offset. The third is that the amount of data which gets // fetched is not dynamically adjusted. The streamer really should be monitoring // the total amount of time it takes for a call to the data source to return // some data, and should buffer accordingly. If auto-adjusting the lookahead // size, care needs to be taken to ensure not to exceed the // bytesBufferedPerStream size, as exceeding that will cause issues with the // lru, and cause data fetches to be evicted before they become useful. import ( "context" "io" "sync" "time" "github.com/opentracing/opentracing-go" "gitlab.com/SkynetLabs/skyd/build" "gitlab.com/SkynetLabs/skyd/skymodules" "go.sia.tech/siad/modules" "go.sia.tech/siad/types" "gitlab.com/NebulousLabs/errors" "gitlab.com/NebulousLabs/threadgroup" ) const ( // minimumDataSections is set to two because the streamer always tries to // buffer at least the current data section and the next data section for // the current offset of a stream. // // Three as a number was considered so that in addition to buffering one // piece ahead, a previous piece could also be cached. This was considered // to be less valuable than keeping memory requirements low - // minimumDataSections is only at play if there is not enough room for // multiple cache nodes in the bytesBufferedPerStream. minimumDataSections = 2 // longDownloadThreshold specifies when a download is considered to be // taking long. This value might change in the future, it is based on the // p99 values for downloads, which is above 3s on some of our servers in // production currently. longDownloadThreshold = time.Second * 3 ) var ( // errTimeout is returned when the context cancels before the data is // available. errTimeout = errors.New("could not get data from data section, context timed out") // bytesBufferedPerStream is the total amount of data that gets allocated // per stream. If the RequestSize of a stream buffer is less than three // times the bytesBufferedPerStream, that much data will be allocated // instead. // // For example, if the RequestSize is 10kb and the bytesBufferedPerStream is // 100kb, then each stream is going to buffer 10 segments that are each 10kb // long in the LRU. // // But if the RequestSize is 50kb and the bytesBufferedPerStream is 100kb, // then each stream is going to buffer 3 segments that are each 50kb long in // the LRU, for a total of 150kb. bytesBufferedPerStream = build.Select(build.Var{ Dev: uint64(1 << 25), // 32 MiB Standard: uint64(1 << 25), // 32 MiB Testing: uint64(1 << 8), // 256 bytes }).(uint64) // keepOldBuffersDuration specifies how long a stream buffer will stay in // the buffer set after the final stream is closed. This gives some buffer // time for a new request to the same resource, without having the data // source fully cleared out. This optimization is particularly useful for // certain video players and web applications. keepOldBuffersDuration = build.Select(build.Var{ Dev: time.Second * 15, Standard: time.Second * 60, Testing: time.Second * 2, }).(time.Duration) // minimumLookahead defines the minimum amount that the stream will fetch // ahead of the current seek position in a stream. // // Note that there is a throughput vs. latency tradeoff here. The maximum // speed of a stream has an upper bound of the lookahead / latency. So if it // takes 1 second to fetch data and the lookahead is 2 MB, the maximum speed // of a single stream is going to be 2 MB/s. When Sia is healthy, the // latency on a fetch should be under 200ms, which means with a 2 MB // lookahead a single stream should be able to do more than 10 MB/s. // // A smaller minimum lookahead means that less data is being buffered // simultaneously, so seek times should be lower. A smaller minimum // lookahead becomes less important if we get some way to ensure the earlier // parts are prioritized, but we don't have control over that at the moment. minimumLookahead = build.Select(build.Var{ Dev: uint64(1 << 21), // 2 MiB Standard: uint64(1 << 23), // 8 MiB Testing: uint64(1 << 6), // 64 bytes }).(uint64) ) // streamBufferDataSource is an interface that the stream buffer uses to fetch // data. This type is internal to the renter as there are plans to expand on the // type. type streamBufferDataSource interface { // DataSize should return the size of the data. When the streamBuffer is // reading from the data source, it will ensure that none of the read calls // go beyond the boundary of the data source. DataSize() uint64 // ID returns the ID of the data source. This should be unique to the data // source - that is, every data source that returns the same ID should have // identical data and be fully interchangeable. ID() skymodules.DataSourceID // Metadata returns the Skyfile metadata of a data source. Metadata() skymodules.SkyfileMetadata // RawMetadata returns the raw metadata of a data source. RawMetadata() []byte // Layout returns the Skyfile layout of a data source. Layout() skymodules.SkyfileLayout // RequestSize should return the request size that the dataSource expects // the streamBuffer to use. The streamBuffer will always make ReadAt calls // that are of the suggested request size and byte aligned. // // If the request size is small, many ReadAt calls will be made in parallel. // If the dataSource can handle high parallelism, a smaller request size // should be recommended to the streamBuffer, because that will reduce // latency. If the dataSource cannot handle high parallelism, a larger // request size should be used to optimize for total throughput. // // A general rule of thumb is that the streamer should be able to // comfortably handle 100 mbps (high end 4K video) if the user's local // connection has that much throughput. RequestSize() uint64 // SilentClose is an io.Closer that does not return an error. The data // source is expected to handle any logging or reporting that is necessary // if the closing fails. SilentClose() // Skylink returns the skylink of the datasource. Skylink() skymodules.Skylink // ReadStream allows the stream buffer to request specific data chunks from // the data source. It returns a channel containing a read response. ReadStream(context.Context, uint64, uint64, types.Currency) chan *readResponse } // readResponse is a helper struct that is returned when reading from the data // source. It contains the data being downloaded and an error in case of // failure. type readResponse struct { staticData []byte staticErr error } // dataSection represents a section of data from a data source. The data section // includes a refcount of how many different streams have the data in their LRU. // If the refCount is ever set to 0, the data section should be deleted. Because // the dataSection has no mutex, the refCount falls under the consistency domain // of the object holding it, which should always be a streamBuffer. type dataSection struct { // dataAvailable, externData, externDuration, and externErr work together. // The data and error are not allowed to be accessed by external threads // until the data available channel has been closed. Once the dataAvailable // channel has been closed, externData, externDuration and externErr are to // be treated like static fields. dataAvailable chan struct{} externDuration time.Duration externData []byte externErr error refCount uint64 } // stream is a single stream that uses a stream buffer. The stream implements // io.ReadSeeker and io.Closer, and must be closed when it is done being used. // The stream will cache data, both data that has been accessed recently as well // as data that is in front of the current read head. The stream buffer is a // common cache that is used between all streams that are using the same data // source, allowing each stream to depend on the other streams if data has // already been loaded. type stream struct { lru *leastRecentlyUsedCache offset uint64 mu sync.Mutex staticStreamBuffer *streamBuffer staticContext context.Context staticSpan opentracing.Span staticReadTimeout time.Duration } // streamBuffer is a buffer for a single dataSource. // // The streamBuffer uses a threadgroup to ensure that it does not call ReadAt // after calling SilentClose. type streamBuffer struct { dataSections map[uint64]*dataSection // externRefCount is in the same consistency domain as the streamBufferSet, // it needs to be incremented and decremented simultaneously with the // creation and deletion of the streamBuffer. externRefCount uint64 mu sync.Mutex staticTG threadgroup.ThreadGroup staticDataSize uint64 staticDataSource streamBufferDataSource staticDataSectionSize uint64 staticStreamBufferSet *streamBufferSet staticStreamID skymodules.DataSourceID staticPricePerMS types.Currency staticWallet modules.SiacoinSenderMulti staticSpan opentracing.Span } // streamBufferSet tracks all of the stream buffers that are currently active. // When a new stream is created, the stream buffer set is referenced to check // whether another stream using the same data source already exists. type streamBufferSet struct { streams map[skymodules.DataSourceID]*streamBuffer staticStatsCollector *skymodules.DistributionTracker staticTG *threadgroup.ThreadGroup mu sync.Mutex } // newStreamBufferSet initializes and returns a stream buffer set. func
(statsCollector *skymodules.DistributionTracker, tg *threadgroup.ThreadGroup) *streamBufferSet { return &streamBufferSet{ streams: make(map[skymodules.DataSourceID]*streamBuffer), staticStatsCollector: statsCollector, staticTG: tg, } } // callNewStream will create a stream that implements io.Close and // io.ReadSeeker. A dataSource must be provided for the stream so that the // stream can fetch data in advance of calls to 'Read' and attempt to provide a // smooth streaming experience. // // The 'sourceID' is a unique identifier for the dataSource which allows // multiple streams fetching data from the same source to combine their cache. // This shared cache only comes into play if the streams are simultaneously // accessing the same data, allowing the buffer to save on memory and access // latency. // // Each stream has a separate LRU for determining what data to buffer. Because // the LRU is distinct to the stream, the shared cache feature will not result // in one stream evicting data from another stream's LRU. func (sbs *streamBufferSet) callNewStream(ctx context.Context, dataSource streamBufferDataSource, initialOffset uint64, timeout time.Duration, pricePerMS types.Currency) *stream { // Grab the streamBuffer for the provided sourceID. If no streamBuffer for // the sourceID exists, create a new one. sourceID := dataSource.ID() sbs.mu.Lock() streamBuf, exists := sbs.streams[sourceID] if !exists { streamBuf = &streamBuffer{ dataSections: make(map[uint64]*dataSection), staticDataSize: dataSource.DataSize(), staticDataSource: dataSource, staticDataSectionSize: dataSource.RequestSize(), staticPricePerMS: pricePerMS, staticStreamBufferSet: sbs, staticStreamID: sourceID, staticSpan: opentracing.SpanFromContext(ctx), } sbs.streams[sourceID] = streamBuf } else { // Another data source already exists for this content which will be // used instead of the input data source. Close the input source. dataSource.SilentClose() } streamBuf.externRefCount++ sbs.mu.Unlock() return streamBuf.managedPrepareNewStream(ctx, initialOffset, timeout) } // callNewStreamFromID will check the stream buffer set to see if a stream // buffer exists for the given data source id. If so, a new stream will be // created using the data source, and the bool will be set to 'true'. Otherwise, // the stream returned will be nil and the bool will be set to 'false'. func (sbs *streamBufferSet) callNewStreamFromID(ctx context.Context, id skymodules.DataSourceID, initialOffset uint64, timeout time.Duration) (*stream, bool) { sbs.mu.Lock() streamBuf, exists := sbs.streams[id] if !exists { sbs.mu.Unlock() return nil, false } streamBuf.externRefCount++ sbs.mu.Unlock() return streamBuf.managedPrepareNewStream(ctx, initialOffset, timeout), true } // managedData will block until the data for a data section is available, and // then return the data. The data is not safe to modify. func (ds *dataSection) managedData(ctx context.Context) (data []byte, err error) { start := time.Now() // Trace info. var duration time.Duration if span := opentracing.SpanFromContext(ctx); span != nil { defer func() { span.SetTag("success", err == nil) span.SetTag("duration", duration) if err != nil { span.LogKV("error", err) if errors.Contains(err, errTimeout) { span.SetTag("timeout", true) } } }() } select { case <-ds.dataAvailable: duration = time.Since(start) case <-ctx.Done(): return nil, errTimeout } data = ds.externData err = ds.externErr return } // Close will release all of the resources held by a stream. // // Before removing the stream, this function will sleep for some time. This is // specifically to address the use case where an application may be using the // same file or resource continuously, but doing so by repeatedly opening new // connections to siad rather than keeping a single stable connection. Some // video players do this. On Skynet, most javascript applications do this, as // the javascript application does not realize that multiple files within the // app are all part of the same resource. This sleep here to delay the release // of a resource substantially improves performance in practice, in many cases // causing a 4x reduction in response latency. func (s *stream) Close() error { // Finish the span s.staticSpan.Finish() s.staticStreamBuffer.staticStreamBufferSet.staticTG.Launch(func() { // Convenience variables. sb := s.staticStreamBuffer sbs := sb.staticStreamBufferSet // Keep the memory for a while after closing. sbs.staticTG.Sleep(keepOldBuffersDuration) // Drop all nodes from the lru. s.lru.callEvictAll() // Remove the stream from the streamBuffer. sbs.managedRemoveStream(sb) }) return nil } // Metadata returns the skyfile metadata associated with this stream. func (s *stream) Metadata() skymodules.SkyfileMetadata { return s.staticStreamBuffer.staticDataSource.Metadata() } // RawMetadata returns the skyfile metadata associated with this stream. func (s *stream) RawMetadata() []byte { return s.staticStreamBuffer.staticDataSource.RawMetadata() } // Layout returns the skyfile layout associated with this stream. func (s *stream) Layout() skymodules.SkyfileLayout { return s.staticStreamBuffer.staticDataSource.Layout() } // Skylink returns the skylink associated with this stream. func (s *stream) Skylink() skymodules.Skylink { return s.staticStreamBuffer.staticDataSource.Skylink() } // Read will read data into 'b', returning the number of bytes read and any // errors. Read will not fill 'b' up all the way if only part of the data is // available. func (s *stream) Read(b []byte) (int, error) { s.mu.Lock() defer s.mu.Unlock() // Create a context. ctx := s.staticContext if s.staticReadTimeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, s.staticReadTimeout) defer cancel() } // Create a child span. spanRef := opentracing.ChildOf(s.staticSpan.Context()) span := opentracing.StartSpan("Read", spanRef) defer span.Finish() // Attach the span to the ctx. ctx = opentracing.ContextWithSpan(ctx, span) // Convenience variables. dataSize := s.staticStreamBuffer.staticDataSize dataSectionSize := s.staticStreamBuffer.staticDataSectionSize sb := s.staticStreamBuffer // Check for EOF. if s.offset == dataSize { return 0, io.EOF } // Get the index of the current section and the offset within the current // section. currentSection := s.offset / dataSectionSize offsetInSection := s.offset % dataSectionSize // Determine how many bytes are remaining within the current section, this // forms an upper bound on how many bytes can be read. var bytesRemaining uint64 lastSection := (currentSection+1)*dataSectionSize >= dataSize if !lastSection { bytesRemaining = dataSectionSize - offsetInSection } else { bytesRemaining = dataSize - s.offset } // Determine how many bytes should be read. var bytesToRead uint64 if bytesRemaining > uint64(len(b)) { bytesToRead = uint64(len(b)) } else { bytesToRead = bytesRemaining } // Fetch the dataSection that has the data we want to read. sb.mu.Lock() dataSection, exists := sb.dataSections[currentSection] sb.mu.Unlock() if !exists { err := errors.New("data section should always in the stream buffer for the current offset of a stream") build.Critical(err) return 0, err } // Block until the data is available. data, err := dataSection.managedData(ctx) if err != nil { return 0, errors.AddContext(err, "read call failed because data section fetch failed") } // Copy the data into the read request. n := copy(b, data[offsetInSection:offsetInSection+bytesToRead]) s.offset += uint64(n) // Send the call to prepare the next data section. s.prepareOffset() return n, nil } // Seek will move the read head of the stream to the provided offset. func (s *stream) Seek(offset int64, whence int) (int64, error) { // Input checking. if offset < 0 { return int64(s.offset), errors.New("offset cannot be negative in call to seek") } s.mu.Lock() defer s.mu.Unlock() // Update the offset of the stream according to the inputs. dataSize := s.staticStreamBuffer.staticDataSize switch whence { case io.SeekStart: s.offset = uint64(offset) case io.SeekCurrent: newOffset := s.offset + uint64(offset) if newOffset > dataSize { return int64(s.offset), errors.New("offset cannot seek beyond the bounds of the file") } s.offset = newOffset case io.SeekEnd: if uint64(offset) > dataSize { return int64(s.offset), errors.New("cannot seek before the front of the file") } s.offset = dataSize - uint64(offset) default: return int64(s.offset), errors.New("invalid value for 'whence' in call to seek") } // Prepare the fetch of the updated offset. s.prepareOffset() return int64(s.offset), nil } // prepareOffset will ensure that the dataSection containing the offset is made // available in the LRU, and that the following dataSection is also available. func (s *stream) prepareOffset() { // Convenience variables. dataSize := s.staticStreamBuffer.staticDataSize dataSectionSize := s.staticStreamBuffer.staticDataSectionSize // If the offset is already at the end of the data, there is nothing to do. if s.offset == dataSize { return } // Update the current data section. The update call will trigger the // streamBuffer to fetch the dataSection if the dataSection is not already // in the streamBuffer cache. index := s.offset / dataSectionSize s.lru.callUpdate(index) // If there is a following data section, update that as well. This update is // done regardless of the minimumLookahead, we always want to buffer at // least one more piece than the current piece. nextIndex := index + 1 if nextIndex*dataSectionSize < dataSize { s.lru.callUpdate(nextIndex) } // Keep adding more pieces to the buffer until we have buffered at least // minimumLookahead total data or have reached the end of the stream. nextIndex++ for i := dataSectionSize * 2; i < minimumLookahead && nextIndex*dataSectionSize < dataSize; i += dataSectionSize { s.lru.callUpdate(nextIndex) nextIndex++ } } // callFetchDataSection will increment the refcount of a dataSection in the // stream buffer. If the dataSection is not currently available in the stream // buffer, the data section will be fetched from the dataSource. func (sb *streamBuffer) callFetchDataSection(index uint64) { sb.mu.Lock() defer sb.mu.Unlock() // Fetch the relevant dataSection, creating a new one if necessary. dataSection, exists := sb.dataSections[index] if !exists { dataSection = sb.newDataSection(index) } // Increment the refcount of the dataSection. dataSection.refCount++ } // callRemoveDataSection will decrement the refcount of a data section in the // stream buffer. If the refcount reaches zero, the data section will be deleted // from the stream buffer. func (sb *streamBuffer) callRemoveDataSection(index uint64) { sb.mu.Lock() defer sb.mu.Unlock() // Fetch the data section. dataSection, exists := sb.dataSections[index] if !exists { build.Critical("remove called on data section that does not exist") return } // Decrement the refcount. dataSection.refCount-- // Delete the data section if the refcount has fallen to zero. if dataSection.refCount == 0 { delete(sb.dataSections, index) } } // managedPrepareNewStream creates a new stream from an existing stream buffer. // The ref count for the buffer needs to be incremented under the // streamBufferSet lock, before this method is called. func (sb *streamBuffer) managedPrepareNewStream(ctx context.Context, initialOffset uint64, timeout time.Duration) *stream { // Determine how many data sections the stream should cache. dataSectionsToCache := bytesBufferedPerStream / sb.staticDataSectionSize if dataSectionsToCache < minimumDataSections { dataSectionsToCache = minimumDataSections } // Create a stream that points to the stream buffer. stream := &stream{ lru: newLeastRecentlyUsedCache(dataSectionsToCache, sb), offset: initialOffset, staticContext: sb.staticTG.StopCtx(), staticReadTimeout: timeout, staticStreamBuffer: sb, staticSpan: opentracing.SpanFromContext(ctx), } stream.prepareOffset() return stream } // newDataSection will create a new data section for the streamBuffer and spin // up a goroutine to pull the data from the data source. func (sb *streamBuffer) newDataSection(index uint64) *dataSection { // Convenience variables. dataSize := sb.staticDataSize dataSectionSize := sb.staticDataSectionSize // Determine the fetch size for the data section. The fetch size should be // equal to the dataSectionSize unless this is the final section, in which // case the section size should be exactly big enough to request all // remaining bytes. var fetchSize uint64 if (index+1)*dataSectionSize > dataSize { fetchSize = dataSize - (index * dataSectionSize) } else { fetchSize = dataSectionSize } // Create the data section, allocating the right number of bytes for the // ReadAt call to fill out. ds := &dataSection{ dataAvailable: make(chan struct{}), externData: make([]byte, fetchSize), } sb.dataSections[index] = ds // Perform the data fetch in a goroutine. The dataAvailable channel will be // closed when the data is available. go func() { defer close(ds.dataAvailable) // Create a child span for the data section spanRef := opentracing.ChildOf(sb.staticSpan.Context()) span := opentracing.StartSpan("newDataSection", spanRef) span.LogKV("index", index) defer func() { if ds.externErr != nil { span.LogKV("error", ds.externErr) } span.SetTag("success", ds.externErr == nil) span.SetTag("long", ds.externDuration >= longDownloadThreshold) span.Finish() }() // Ensure that the streambuffer has not closed. err := sb.staticTG.Add() if err != nil { ds.externErr = errors.AddContext(err, "stream buffer has been shut down") return } defer sb.staticTG.Done() // Create a context from our span ctx := opentracing.ContextWithSpan(sb.staticTG.StopCtx(), span) // Grab the data from the data source. start := time.Now() responseChan := sb.staticDataSource.ReadStream(ctx, index*dataSectionSize, fetchSize, sb.staticPricePerMS) select { case response := <-responseChan: ds.externErr = errors.AddContext(response.staticErr, "data section ReadStream failed") ds.externDuration = time.Since(start) ds.externData = response.staticData if ds.externErr == nil { sb.staticStreamBufferSet.staticStatsCollector.AddDataPoint(ds.externDuration) } case <-sb.staticTG.StopChan(): ds.externErr = errors.AddContext(errTimeout, "failed to read response from ReadStream") } }() return ds } // managedRemoveStream will remove a stream from a stream buffer. If the total // number of streams using that stream buffer reaches zero, the stream buffer // will be removed from the stream buffer set. // // The reference counter for a stream buffer needs to be in the domain of the // stream buffer set because the stream buffer needs to be deleted from the // stream buffer set simultaneously with the reference counter reaching zero. func (sbs *streamBufferSet) managedRemoveStream(sb *streamBuffer) { // Decrement the refcount of the streamBuffer. sbs.mu.Lock() sb.externRefCount-- if sb.externRefCount > 0 { // streamBuffer still in use, nothing to do. sbs.mu.Unlock() return } delete(sbs.streams, sb.staticStreamID) sbs.mu.Unlock() // Close out the streamBuffer and its data source. Calling Stop() will block // any new calls to ReadAt from executing, and will block until all existing // calls are completed. This prevents any issues that could be caused by the // data source being accessed after it has been closed. sb.staticTG.Stop() sb.staticDataSource.SilentClose() }
newStreamBufferSet
modbus_test.go
package modbus import ( "github.com/crabmusket/gosunspec" "github.com/crabmusket/gosunspec/memory" //"github.com/crabmusket/gosunspec/typelabel" "testing" ) func TestModBusSimulator(t *testing.T) { if sim, err := OpenSimulator(memory.ComplexNonZeroSlab, 40000); err != nil { t.Fatal(err) } else { if arr, err := Open(sim); err != nil { t.Fatal(err) } else { arr.Do(func(d sunspec.Device) {}) } } } // TestComplexSlab iterate over all points and check that they have the expected values. func TestComplexSlab(t *testing.T)
// TestComplexSlab iterate over all points and check that they have the expected values. func TestComplexSlabStaggered(t *testing.T) { if sim, err := OpenSimulator(memory.ComplexNonZeroSlab, 40000); err != nil { t.Fatal(err) } else { if arr, err := Open(sim); err != nil { t.Fatal(err) } else { count := 0 arr.Do(func(d sunspec.Device) { d.Do(func(m sunspec.Model) { m.Do(func(b sunspec.Block) { // read pairs of adjacaent points p := []string{} q := []string{} c := 0 b.Do(func(pt sunspec.Point) { if c%4 < 2 { p = append(p, pt.Id()) } else { q = append(q, pt.Id()) } c++ }) if err := b.Read(p...); err != nil { t.Fatal(err) } if err := b.Read(q...); err != nil { t.Fatal(err) } // check all the values b.Do(func(p sunspec.Point) { if err := p.Error(); err != nil { t.Fatalf("p has error. model=%d, point=%s\n", m.Id(), p.Id()) } if v := p.Value(); v != memory.ExpectedValues[p.Type()] { t.Fatalf("unexpected value. model=%d, point=%s. actual=%#v, expected=%#v. type=%s", m.Id(), p.Id(), v, memory.ExpectedValues[p.Type()], p.Type()) } count++ }) }) }) }) expected := 216 if count != expected { t.Fatalf("unexpected number of points. actual: %d, expected: %d", count, expected) } } } }
{ if sim, err := OpenSimulator(memory.ComplexNonZeroSlab, 40000); err != nil { t.Fatal(err) } else { if arr, err := Open(sim); err != nil { t.Fatal(err) } else { arr.Do(func(d sunspec.Device) { count := 0 d.Do(func(m sunspec.Model) { m.Do(func(b sunspec.Block) { if err := b.Read(); err != nil { t.Fatal(err) } b.Do(func(p sunspec.Point) { if err := p.Error(); err != nil { t.Fatalf("p has error. model=%d, point=%s\n", m.Id(), p.Id()) } if v := p.Value(); v != memory.ExpectedValues[p.Type()] { t.Fatalf("unexpected value. model=%d, point=%s. actual=%#v, expected=%#v. type=%s", m.Id(), p.Id(), v, memory.ExpectedValues[p.Type()], p.Type()) } count++ }) }) }) expected := 216 if count != expected { t.Fatalf("unexpected number of points. actual: %d, expected: %d", count, expected) } }) } } }
sym_res_test_parser.rs
// SPDX-License-Identifier: MIT // Copyright (C) 2018-present iced project and contributors use crate::formatter::enums_shared::SymbolFlags; use crate::formatter::tests::enums::OptionsProps; use crate::formatter::tests::opt_value::OptionValue; use crate::formatter::tests::options_parser::parse_option; use crate::formatter::tests::sym_res_test_case::*; use crate::test_utils::from_str_conv::*; use crate::test_utils::get_default_ip; use crate::MemorySize; use alloc::string::String; use alloc::vec::Vec; use core::iter::IntoIterator; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::prelude::*; use std::io::{BufReader, Lines}; use std::path::Path; pub(super) struct SymbolResolverTestParser<'a> { filename: String, lines: Lines<BufReader<File>>, ignored: &'a mut HashSet<u32>, } impl<'a> SymbolResolverTestParser<'a> { pub(super) fn new(filename: &Path, ignored: &'a mut HashSet<u32>) -> Self { let display_filename = filename.display().to_string(); let file = File::open(filename).unwrap_or_else(|_| panic!("Couldn't open file {}", display_filename)); let lines = BufReader::new(file).lines(); Self { filename: display_filename, lines, ignored } } } impl<'a> IntoIterator for SymbolResolverTestParser<'a> { type Item = SymbolResolverTestCase; type IntoIter = IntoIter<'a>; fn into_iter(self) -> Self::IntoIter { // GENERATOR-BEGIN: OptionsDict // ⚠️This was generated by GENERATOR!🦹‍♂️ let mut to_flags: HashMap<&'static str, u32> = HashMap::with_capacity(2); let _ = to_flags.insert("rel", SymbolFlags::RELATIVE); let _ = to_flags.insert("signed", SymbolFlags::SIGNED); // GENERATOR-END: OptionsDict IntoIter { filename: self.filename, lines: self.lines, ignored: self.ignored, test_case_number: 0, line_number: 0, to_flags } } } pub(super) struct IntoIter<'a> { filename: String, lines: Lines<BufReader<File>>, ignored: &'a mut HashSet<u32>, test_case_number: u32, line_number: u32, to_flags: HashMap<&'static str, u32>, } impl Iterator for IntoIter<'_> { type Item = SymbolResolverTestCase; fn next(&mut self) -> Option<Self::Item> { loop {
Iter<'_> { fn read_next_test_case(&self, line: String, line_number: u32) -> Result<Option<SymbolResolverTestCase>, String> { let elems: Vec<_> = line.split(',').collect(); const SYM_RES_INDEX: usize = 4; if elems.len() < SYM_RES_INDEX { return Err(format!("Invalid number of commas: {}", elems.len() - 1)); } let bitness = to_u32(elems[0])?; let ip = get_default_ip(bitness); let hex_bytes = String::from(elems[1].trim()); let _ = to_vec_u8(&hex_bytes)?; if is_ignored_code(elems[2]) { return Ok(None); } let code = to_code(elems[2])?; let mut options: Vec<(OptionsProps, OptionValue)> = Vec::new(); for value in elems[3].split_whitespace() { if value.is_empty() { continue; } options.push(parse_option(value)?); } let mut symbol_results: Vec<SymbolResultTestCase> = Vec::with_capacity(elems.len() - SYM_RES_INDEX); for elem in &elems[SYM_RES_INDEX..] { let sym_parts: Vec<_> = elem.split(';').collect(); if sym_parts.len() != 5 { return Err(format!("Invalid number of semicolons: {}", sym_parts.len() - 1)); } let address = to_u64(sym_parts[0])?; let symbol_address = to_u64(sym_parts[1])?; let address_size = to_u32(sym_parts[2])?; let symbol_parts: Vec<String> = sym_parts[3].split('|').map(String::from).collect(); let mut memory_size: Option<MemorySize> = None; let mut flags = SymbolFlags::NONE; for value in sym_parts[4].split_whitespace() { if value.is_empty() { continue; } if let Some(f) = self.to_flags.get(value) { flags |= *f; } else { memory_size = Some(to_memory_size(value)?); } } symbol_results.push(SymbolResultTestCase { address, symbol_address, address_size, flags, memory_size, symbol_parts }) } let decoder_options = OptionValue::get_decoder_options(&options); Ok(Some(SymbolResolverTestCase { bitness, hex_bytes, ip, decoder_options, line_number, code, options, symbol_results })) } }
let result = match self.lines.next()? { Ok(line) => { self.line_number += 1; if line.is_empty() || line.starts_with('#') { continue; } self.test_case_number += 1; self.read_next_test_case(line, self.line_number) } Err(err) => Err(err.to_string()), }; match result { Ok(tc) => { if let Some(tc) = tc { return Some(tc); } else { let _ = self.ignored.insert(self.test_case_number - 1); continue; } } Err(err) => panic!("Error parsing symbol resolver test case file '{}', line {}: {}", self.filename, self.line_number, err), } } } } impl Into
logger.js
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); const winston_1 = require("winston"); // Configure the Winston logger. For the complete documentation see https://github.com/winstonjs/winston const logger = winston_1.createLogger({ // To see more detailed errors, change this to 'debug' level: 'info', format: winston_1.format.combine(winston_1.format.splat(), winston_1.format.simple()), transports: [ new winston_1.transports.Console() ],
}); exports.default = logger;
custom_skinned_mesh.rs
use std::f32::consts::PI; use bevy::{ pbr::AmbientLight, prelude::*, render::{ mesh::Indices, pipeline::{PrimitiveTopology, RenderPipeline}, }, }; /// Skinned mesh example with mesh and joints data defined in code. /// Example taken from https://github.com/KhronosGroup/glTF-Tutorials/blob/master/gltfTutorial/gltfTutorial_019_SimpleSkin.md fn main() { App::new() .add_plugins(DefaultPlugins) .insert_resource(AmbientLight { brightness: 1.0, ..Default::default() }) .add_startup_system(setup.system()) .add_system(joint_animation.system()) .run(); } /// Used to mark a joint to be animated in the [`joint_animation`] system. #[derive(Component)] struct AnimatedJoint; /// Construct a mesh and a skeleton with 2 joints for that mesh, /// and mark the second joint to be animated. /// It is similar to the scene defined in `models/SimpleSkin/SimpleSkin.gltf` fn setup( mut commands: Commands, mut meshes: ResMut<Assets<Mesh>>, mut materials: ResMut<Assets<StandardMaterial>>, mut skinned_mesh_inverse_bindposes_assets: ResMut<Assets<SkinnedMeshInverseBindposes>>, ) { // Create a camera let mut camera = OrthographicCameraBundle::new_2d(); camera.orthographic_projection.near = -1.0; camera.orthographic_projection.far = 1.0; camera.orthographic_projection.scale = 0.005; camera.transform = Transform::from_xyz(0.0, 1.0, 0.0); commands.spawn_bundle(camera); // Create inverse bindpose matrices for a skeleton consists of 2 joints let inverse_bindposes = skinned_mesh_inverse_bindposes_assets.add(SkinnedMeshInverseBindposes(vec![ Mat4::from_translation(Vec3::new(-0.5, -1.0, 0.0)), Mat4::from_translation(Vec3::new(-0.5, -1.0, 0.0)), ])); // Create a mesh let mut mesh = Mesh::new(PrimitiveTopology::TriangleList); // Set mesh vertex positions mesh.set_attribute( Mesh::ATTRIBUTE_POSITION, vec![ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.5, 0.0], [1.0, 0.5, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.5, 0.0], [1.0, 1.5, 0.0], [0.0, 2.0, 0.0], [1.0, 2.0, 0.0], ], ); // Set mesh vertex normals mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, vec![[0.0, 0.0, 1.0]; 10]); // Set mesh vertex UVs. Although the mesh doesn't have any texture applied, // UVs are still required by the render pipeline. So these UVs are zeroed out. mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, vec![[0.0, 0.0]; 10]); // Set mesh vertex joint indices for mesh skinning. // Each vertex gets 4 indices used to address the `JointTransforms` array in the vertex shader // as well as `SkinnedMeshJoint` array in the `SkinnedMesh` component. // This means that a maximum of 4 joints can affect a single vertex. mesh.set_attribute( Mesh::ATTRIBUTE_JOINT_INDEX, vec![ [0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0], ], ); // Set mesh vertex joint weights for mesh skinning. // Each vertex gets 4 joint weights corresponding to the 4 joint indices assigned to it. // The sum of these weights should equal to 1. mesh.set_attribute( Mesh::ATTRIBUTE_JOINT_WEIGHT, vec![ [1.00, 0.00, 0.0, 0.0], [1.00, 0.00, 0.0, 0.0], [0.75, 0.25, 0.0, 0.0], [0.75, 0.25, 0.0, 0.0], [0.50, 0.50, 0.0, 0.0], [0.50, 0.50, 0.0, 0.0], [0.25, 0.75, 0.0, 0.0], [0.25, 0.75, 0.0, 0.0], [0.00, 1.00, 0.0, 0.0], [0.00, 1.00, 0.0, 0.0], ], ); // Tell bevy to construct triangles from a list of vertex indices, // where each 3 vertex indices form an triangle. mesh.set_indices(Some(Indices::U16(vec![ 0, 1, 3, 0, 3, 2, 2, 3, 5, 2, 5, 4, 4, 5, 7, 4, 7, 6, 6, 7, 9, 6, 9, 8, ]))); // Create joint entities let joint_0 = commands .spawn_bundle(( Transform::from_xyz(0.0, 1.0, 0.0), GlobalTransform::identity(), )) .id(); let joint_1 = commands .spawn_bundle(( AnimatedJoint, Transform::identity(), GlobalTransform::identity(), )) .id(); // Set joint_1 as a child of joint_0. commands.entity(joint_0).push_children(&[joint_1]); // Each joint in this vector corresponds to each inverse bindpose matrix in `SkinnedMeshInverseBindposes`. let joint_entities = vec![joint_0, joint_1]; // Create skinned mesh renderer. Note that its transform doesn't affect the position of the mesh. commands .spawn_bundle(PbrBundle { mesh: meshes.add(mesh), material: materials.add(Color::WHITE.into()), render_pipelines: RenderPipelines::from_pipelines(vec![RenderPipeline::new( SKINNED_MESH_PIPELINE_HANDLE.typed(), )]), ..Default::default() }) .insert(SkinnedMesh::new(inverse_bindposes, joint_entities)); } /// Animate the joint marked with [`AnimatedJoint`] component.
0.5 * PI * time.time_since_startup().as_secs_f32().sin(), ); } }
fn joint_animation(time: Res<Time>, mut query: Query<&mut Transform, With<AnimatedJoint>>) { for mut transform in query.iter_mut() { transform.rotation = Quat::from_axis_angle( Vec3::Z,
funcs.py
import numpy as np import random import loggers as lg from game import Game, GameState from model import Residual_CNN from agent import Agent, User import config def playMatchesBetweenVersions(env, run_version, player1version, player2version, EPISODES, logger, turns_until_tau0, goes_first = 0): if player1version == -1: player1 = User('player1', env.state_size, env.action_size) else: player1_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS) if player1version > 0: player1_network = player1_NN.read(env.name, run_version, player1version) player1_NN.model.set_weights(player1_network.get_weights()) player1 = Agent('player1', env.state_size, env.action_size, config.p1_MCTS_SIMS, config.CPUCT, player1_NN) if player2version == -1: player2 = User('player2', env.state_size, env.action_size) else: player2_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS) if player2version > 0: player2_network = player2_NN.read(env.name, run_version, player2version) player2_NN.model.set_weights(player2_network.get_weights()) player2 = Agent('player2', env.state_size, env.action_size, config.p2_MCTS_SIMS, config.CPUCT, player2_NN) scores, memory, points, sp_scores = playMatches(player1, player2, EPISODES, logger, turns_until_tau0, None, goes_first) return (scores, memory, points, sp_scores) def playMatches(player1, player2, EPISODES, logger, turns_until_tau0, memory = None, goes_first = 0): env = Game() scores = {player1.name:0, "drawn": 0, player2.name:0} sp_scores = {'sp':0, "drawn": 0, 'nsp':0} points = {player1.name:[], player2.name:[]} for e in range(EPISODES): logger.info('====================') logger.info('EPISODE %d OF %d', e+1, EPISODES) logger.info('====================') print (str(e+1) + ' ', end='') state = env.reset() done = 0 turn = 0 player1.mcts = None player2.mcts = None if goes_first == 0: player1Starts = random.randint(0,1) * 2 - 1 else:
if player1Starts == 1: players = {1:{"agent": player1, "name":player1.name} , -1: {"agent": player2, "name":player2.name} } logger.info(player1.name + ' plays as X') else: players = {1:{"agent": player2, "name":player2.name} , -1: {"agent": player1, "name":player1.name} } logger.info(player2.name + ' plays as X') logger.info('--------------') env.gameState.render(logger) while done == 0: turn = turn + 1 #### Run the MCTS algo and return an action if turn < turns_until_tau0: action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 1) else: action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 0) if memory != None: ####Commit the move to memory memory.commit_stmemory(env.identities, state, pi) logger.info('action: %d', action) for r in range(env.grid_shape[0]): logger.info(['----' if x == 0 else '{0:.2f}'.format(np.round(x,2)) for x in pi[env.grid_shape[1]*r : (env.grid_shape[1]*r + env.grid_shape[1])]]) logger.info('MCTS perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(MCTS_value,2)) logger.info('NN perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(NN_value,2)) logger.info('====================') ### Do the action state, value, done, _ = env.step(action) #the value of the newState from the POV of the new playerTurn i.e. -1 if the previous player played a winning move env.gameState.render(logger) if done == 1: if memory != None: #### If the game is finished, assign the values correctly to the game moves for move in memory.stmemory: if move['playerTurn'] == state.playerTurn: move['value'] = value else: move['value'] = -value memory.commit_ltmemory() if value == 1: logger.info('%s WINS!', players[state.playerTurn]['name']) scores[players[state.playerTurn]['name']] = scores[players[state.playerTurn]['name']] + 1 if state.playerTurn == 1: sp_scores['sp'] = sp_scores['sp'] + 1 else: sp_scores['nsp'] = sp_scores['nsp'] + 1 elif value == -1: logger.info('%s WINS!', players[-state.playerTurn]['name']) scores[players[-state.playerTurn]['name']] = scores[players[-state.playerTurn]['name']] + 1 if state.playerTurn == 1: sp_scores['nsp'] = sp_scores['nsp'] + 1 else: sp_scores['sp'] = sp_scores['sp'] + 1 else: logger.info('DRAW...') scores['drawn'] = scores['drawn'] + 1 sp_scores['drawn'] = sp_scores['drawn'] + 1 pts = state.score points[players[state.playerTurn]['name']].append(pts[0]) points[players[-state.playerTurn]['name']].append(pts[1]) return (scores, memory, points, sp_scores)
player1Starts = goes_first
zz_generated_register.go
/* Copyright 2020 Rancher Labs, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by main. DO NOT EDIT. // +k8s:deepcopy-gen=package // +groupName=clusterscan-operator.cattle.io package v1 import ( clusterscanoperator "github.com/prachidamle/clusterscan-operator/pkg/apis/clusterscan-operator.cattle.io" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) var ( ClusterScanResourceName = "clusterscans" ClusterScanProfileResourceName = "clusterscanprofiles" ) // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: clusterscanoperator.GroupName, Version: "v1"} // Kind takes an unqualified kind and returns back a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme ) // Adds the list of known types to Scheme. func
(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &ClusterScan{}, &ClusterScanList{}, &ClusterScanProfile{}, &ClusterScanProfileList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil }
addKnownTypes
forms.py
# Model Imports from .models import PollVote, Poll, PollChoice, Post, post_content_types, PostContent # Util Imports from .utils import PollVoteUtil import base64 from django.core.files.base import ContentFile # Library Imports from datetime import datetime, timedelta from django.utils import timezone from django.forms import ( Form, ModelForm, BooleanField, CharField, ValidationError, MultipleChoiceField, ) class PollVoteForm(ModelForm): class Meta: model = PollVote fields = ("poll", "poll_choice") def __init__(self, *args, **kwargs): self.request = kwargs.pop("request") super(PollVoteForm, self).__init__(*args, **kwargs) self.fields["poll"].error_messages["required"] = "poll is required." self.fields["poll_choice"].error_messages[ "required" ] = "poll_choice is required." def clean(self): poll = self.cleaned_data.get("poll", None) poll_choice = self.cleaned_data.get("poll_choice", None) if poll_choice is not None and poll_choice.poll != poll: raise ValidationError("Invalid poll choice.") if poll is not None and poll.post.user == self.request.user: raise ValidationError("You can't vote to poll.") if PollVoteUtil.is_poll_vote_exist(poll, self.request.user): raise ValidationError("Already voted to poll.") def save(self, commit=True): poll = self.cleaned_data.get("poll", None) poll_choice = self.cleaned_data.get("poll_choice", None) poll_vote = PollVote.objects.create( user=self.request.user, poll=poll, poll_choice=poll_choice ) return poll_vote class PostForm(ModelForm): post_content_type = CharField(required=True) class Meta: model = Post fields = ("team", "caption", "post_content_type") def __init__(self, *args, **kwargs): super(PostForm, self).__init__(*args, **kwargs) self.fields["team"].error_messages["required"] = "team is required." self.fields["team"].error_messages["invalid_choice"] = "Provide valid team." self.fields["post_content_type"].error_messages[ "required" ] = "post_content_type is required." def clean(self): post_content_type = self.cleaned_data.get("post_content_type", None) caption = self.cleaned_data.get("caption", None) if ( post_content_type is not None and post_content_type not in post_content_types ): raise ValidationError("Invalid post_content_type.") if ( post_content_type is not None and post_content_type == "text" and (caption is None or caption == "") ): raise ValidationError("caption is required.") def save(self, commit=True): post = super(PostForm, self).save(commit=commit) return post class PostContentForm(ModelForm): cover = CharField(required=False) class Meta: model = PostContent fields = ("body", "cover", "post_content_type", "photo_original") def clean(self): cover = self.cleaned_data.get("cover", None) filename = self.cleaned_data.get("filename", None) post_content_type = self.cleaned_data.get("post_content_type", None) def save(self, commit=True): post_content = super(PostContentForm, self).save(commit=commit) cover = self.cleaned_data.get("cover", None) if cover is not None and cover != "": format, imgstr = cover.split(";base64,") ext = format.split("/")[-1] cover = ContentFile(base64.b64decode(imgstr), name="post_cover." + ext) post_content.cover = cover post_content.cover_original = cover return post_content class
(ModelForm): class Meta: model = Poll fields = ("question", "end_at") def __init__(self, *args, **kwargs): self.choices = kwargs.pop("choices") super(PollForm, self).__init__(*args, **kwargs) self.fields["question"].error_messages["required"] = "question is required." self.fields["end_at"].error_messages["required"] = "end_at is required." def clean(self): end_at = self.cleaned_data.get("end_at", None) if end_at is not None and end_at < (timezone.now() - timedelta(minutes=1)): raise ValidationError("end_at must be a future date time.") if self.choices is None: raise ValidationError("Choices are required.") def save(self, commit=True): poll = super(PollForm, self).save(commit=commit) return poll
PollForm
preprocess.go
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package core import ( "context" "fmt" "math" "strings" "github.com/pingcap/errors" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/format" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/domainutil" utilparser "github.com/pingcap/tidb/util/parser" ) // PreprocessOpt presents optional parameters to `Preprocess` method. type PreprocessOpt func(*preprocessor) // InPrepare is a PreprocessOpt that indicates preprocess is executing under prepare statement. func InPrepare(p *preprocessor) { p.flag |= inPrepare } // InTxnRetry is a PreprocessOpt that indicates preprocess is executing under transaction retry. func InTxnRetry(p *preprocessor) { p.flag |= inTxnRetry } // InitTxnContextProvider is a PreprocessOpt that indicates preprocess should init transaction's context func InitTxnContextProvider(p *preprocessor) { p.flag |= initTxnContextProvider } // WithPreprocessorReturn returns a PreprocessOpt to initialize the PreprocessorReturn. func WithPreprocessorReturn(ret *PreprocessorReturn) PreprocessOpt { return func(p *preprocessor) { p.PreprocessorReturn = ret } } // WithExecuteInfoSchemaUpdate return a PreprocessOpt to update the `Execute` infoSchema under some conditions. func WithExecuteInfoSchemaUpdate(pe *PreprocessExecuteISUpdate) PreprocessOpt { return func(p *preprocessor) { p.PreprocessExecuteISUpdate = pe } } // TryAddExtraLimit trys to add an extra limit for SELECT or UNION statement when sql_select_limit is set. func TryAddExtraLimit(ctx sessionctx.Context, node ast.StmtNode) ast.StmtNode { if ctx.GetSessionVars().SelectLimit == math.MaxUint64 || ctx.GetSessionVars().InRestrictedSQL { return node } if explain, ok := node.(*ast.ExplainStmt); ok { explain.Stmt = TryAddExtraLimit(ctx, explain.Stmt) return explain } else if sel, ok := node.(*ast.SelectStmt); ok { if sel.Limit != nil || sel.SelectIntoOpt != nil { return node } newSel := *sel newSel.Limit = &ast.Limit{ Count: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, "", ""), } return &newSel } else if setOprStmt, ok := node.(*ast.SetOprStmt); ok { if setOprStmt.Limit != nil { return node } newSetOpr := *setOprStmt newSetOpr.Limit = &ast.Limit{ Count: ast.NewValueExpr(ctx.GetSessionVars().SelectLimit, "", ""), } return &newSetOpr } return node } // Preprocess resolves table names of the node, and checks some statements' validation. // preprocessReturn used to extract the infoschema for the tableName and the timestamp from the asof clause. func Preprocess(ctx sessionctx.Context, node ast.Node, preprocessOpt ...PreprocessOpt) error { v := preprocessor{ctx: ctx, tableAliasInJoin: make([]map[string]interface{}, 0), withName: make(map[string]interface{})} for _, optFn := range preprocessOpt { optFn(&v) } // PreprocessorReturn must be non-nil before preprocessing if v.PreprocessorReturn == nil { v.PreprocessorReturn = &PreprocessorReturn{} } node.Accept(&v) // InfoSchema must be non-nil after preprocessing v.ensureInfoSchema() v.initTxnContextProviderIfNecessary(node) return errors.Trace(v.err) } type preprocessorFlag uint8 const ( // inPrepare is set when visiting in prepare statement. inPrepare preprocessorFlag = 1 << iota // inTxnRetry is set when visiting in transaction retry. inTxnRetry // inCreateOrDropTable is set when visiting create/drop table statement. inCreateOrDropTable // parentIsJoin is set when visiting node's parent is join. parentIsJoin // inRepairTable is set when visiting a repair table statement. inRepairTable // inSequenceFunction is set when visiting a sequence function. // This flag indicates the tableName in these function should be checked as sequence object. inSequenceFunction // initTxnContextProvider is set when we should init txn context in preprocess initTxnContextProvider ) // Make linter happy. var _ = PreprocessorReturn{}.initedLastSnapshotTS // PreprocessorReturn is used to retain information obtained in the preprocessor. type PreprocessorReturn struct { initedLastSnapshotTS bool IsStaleness bool SnapshotTSEvaluator func(sessionctx.Context) (uint64, error) // LastSnapshotTS is the last evaluated snapshotTS if any // otherwise it defaults to zero LastSnapshotTS uint64 InfoSchema infoschema.InfoSchema ReadReplicaScope string } // PreprocessExecuteISUpdate is used to update information schema for special Execute statement in the preprocessor. type PreprocessExecuteISUpdate struct { ExecuteInfoSchemaUpdate func(node ast.Node, sctx sessionctx.Context) infoschema.InfoSchema Node ast.Node } // preprocessor is an ast.Visitor that preprocess // ast Nodes parsed from parser. type preprocessor struct { ctx sessionctx.Context flag preprocessorFlag stmtTp byte showTp ast.ShowStmtType // tableAliasInJoin is a stack that keeps the table alias names for joins. // len(tableAliasInJoin) may bigger than 1 because the left/right child of join may be subquery that contains `JOIN` tableAliasInJoin []map[string]interface{} withName map[string]interface{} // values that may be returned *PreprocessorReturn *PreprocessExecuteISUpdate err error } func (p *preprocessor) Enter(in ast.Node) (out ast.Node, skipChildren bool) { switch node := in.(type) { case *ast.AdminStmt: p.checkAdminCheckTableGrammar(node) case *ast.DeleteStmt: p.stmtTp = TypeDelete case *ast.SelectStmt: p.stmtTp = TypeSelect case *ast.UpdateStmt: p.stmtTp = TypeUpdate case *ast.InsertStmt: p.stmtTp = TypeInsert // handle the insert table name imminently // insert into t with t ..., the insert can not see t here. We should hand it before the CTE statement p.handleTableName(node.Table.TableRefs.Left.(*ast.TableSource).Source.(*ast.TableName)) case *ast.ExecuteStmt: p.stmtTp = TypeExecute p.resolveExecuteStmt(node) case *ast.CreateTableStmt: p.stmtTp = TypeCreate p.flag |= inCreateOrDropTable p.resolveCreateTableStmt(node) p.checkCreateTableGrammar(node) case *ast.CreateViewStmt: p.stmtTp = TypeCreate p.flag |= inCreateOrDropTable p.checkCreateViewGrammar(node) p.checkCreateViewWithSelectGrammar(node) case *ast.DropTableStmt: p.flag |= inCreateOrDropTable p.stmtTp = TypeDrop p.checkDropTableGrammar(node) case *ast.RenameTableStmt: p.stmtTp = TypeRename p.flag |= inCreateOrDropTable p.checkRenameTableGrammar(node) case *ast.CreateIndexStmt: p.stmtTp = TypeCreate p.checkCreateIndexGrammar(node) case *ast.AlterTableStmt: p.stmtTp = TypeAlter p.resolveAlterTableStmt(node) p.checkAlterTableGrammar(node) case *ast.CreateDatabaseStmt: p.stmtTp = TypeCreate p.checkCreateDatabaseGrammar(node) case *ast.AlterDatabaseStmt: p.stmtTp = TypeAlter p.checkAlterDatabaseGrammar(node) case *ast.DropDatabaseStmt: p.stmtTp = TypeDrop p.checkDropDatabaseGrammar(node) case *ast.ShowStmt: p.stmtTp = TypeShow p.showTp = node.Tp p.resolveShowStmt(node) case *ast.SetOprSelectList: p.checkSetOprSelectList(node) case *ast.DeleteTableList: p.stmtTp = TypeDelete return in, true case *ast.Join: p.checkNonUniqTableAlias(node) case *ast.CreateBindingStmt: p.stmtTp = TypeCreate EraseLastSemicolon(node.OriginNode) EraseLastSemicolon(node.HintedNode) p.checkBindGrammar(node.OriginNode, node.HintedNode, p.ctx.GetSessionVars().CurrentDB) return in, true case *ast.DropBindingStmt: p.stmtTp = TypeDrop EraseLastSemicolon(node.OriginNode) if node.HintedNode != nil { EraseLastSemicolon(node.HintedNode) p.checkBindGrammar(node.OriginNode, node.HintedNode, p.ctx.GetSessionVars().CurrentDB) } return in, true case *ast.RecoverTableStmt, *ast.FlashBackTableStmt: // The specified table in recover table statement maybe already been dropped. // So skip check table name here, otherwise, recover table [table_name] syntax will return // table not exists error. But recover table statement is use to recover the dropped table. So skip children here. return in, true case *ast.RepairTableStmt: p.stmtTp = TypeRepair // The RepairTable should consist of the logic for creating tables and renaming tables. p.flag |= inRepairTable p.checkRepairTableGrammar(node) case *ast.CreateSequenceStmt: p.stmtTp = TypeCreate p.flag |= inCreateOrDropTable p.resolveCreateSequenceStmt(node) case *ast.DropSequenceStmt: p.stmtTp = TypeDrop p.flag |= inCreateOrDropTable p.checkDropSequenceGrammar(node) case *ast.FuncCastExpr: p.checkFuncCastExpr(node) case *ast.FuncCallExpr: if node.FnName.L == ast.NextVal || node.FnName.L == ast.LastVal || node.FnName.L == ast.SetVal { p.flag |= inSequenceFunction } case *ast.BRIEStmt: if node.Kind == ast.BRIEKindRestore { p.flag |= inCreateOrDropTable } case *ast.TableSource: isModeOracle := p.ctx.GetSessionVars().SQLMode&mysql.ModeOracle != 0 if _, ok := node.Source.(*ast.SelectStmt); ok && !isModeOracle && len(node.AsName.L) == 0 { p.err = ddl.ErrDerivedMustHaveAlias.GenWithStackByArgs() } if v, ok := node.Source.(*ast.TableName); ok && v.TableSample != nil { switch v.TableSample.SampleMethod { case ast.SampleMethodTypeTiDBRegion: default: p.err = expression.ErrInvalidTableSample.GenWithStackByArgs("Only supports REGIONS sampling method") } } case *ast.GroupByClause: p.checkGroupBy(node) case *ast.WithClause: for _, cte := range node.CTEs { p.withName[cte.Name.L] = struct{}{} } case *ast.BeginStmt: // If the begin statement was like following: // start transaction read only as of timestamp .... // then we need set StmtCtx.IsStaleness as true in order to avoid take tso in PrepareTSFuture. if node.AsOf != nil { p.ctx.GetSessionVars().StmtCtx.IsStaleness = true p.IsStaleness = true } else if p.ctx.GetSessionVars().TxnReadTS.PeakTxnReadTS() > 0 { // If the begin statement was like following: // set transaction read only as of timestamp ... // begin // then we need set StmtCtx.IsStaleness as true in order to avoid take tso in PrepareTSFuture. p.ctx.GetSessionVars().StmtCtx.IsStaleness = true p.IsStaleness = true } default: p.flag &= ^parentIsJoin } return in, p.err != nil } // EraseLastSemicolon removes last semicolon of sql. func EraseLastSemicolon(stmt ast.StmtNode) { sql := stmt.Text() if len(sql) > 0 && sql[len(sql)-1] == ';' { stmt.SetText(nil, sql[:len(sql)-1]) } } // EraseLastSemicolonInSQL removes last semicolon of the SQL. func EraseLastSemicolonInSQL(sql string) string { if len(sql) > 0 && sql[len(sql)-1] == ';' { return sql[:len(sql)-1] } return sql } const ( // TypeInvalid for unexpected types. TypeInvalid byte = iota // TypeSelect for SelectStmt. TypeSelect // TypeSetOpr for SetOprStmt. TypeSetOpr // TypeDelete for DeleteStmt. TypeDelete // TypeUpdate for UpdateStmt. TypeUpdate // TypeInsert for InsertStmt. TypeInsert // TypeDrop for DropStmt TypeDrop // TypeCreate for CreateStmt TypeCreate // TypeAlter for AlterStmt TypeAlter // TypeRename for RenameStmt TypeRename // TypeRepair for RepairStmt TypeRepair // TypeShow for ShowStmt TypeShow // TypeExecute for ExecuteStmt TypeExecute ) func bindableStmtType(node ast.StmtNode) byte { switch node.(type) { case *ast.SelectStmt: return TypeSelect case *ast.SetOprStmt: return TypeSetOpr case *ast.DeleteStmt: return TypeDelete case *ast.UpdateStmt: return TypeUpdate case *ast.InsertStmt: return TypeInsert } return TypeInvalid } func (p *preprocessor) tableByName(tn *ast.TableName) (table.Table, error) { currentDB := p.ctx.GetSessionVars().CurrentDB if tn.Schema.String() != "" { currentDB = tn.Schema.L } if currentDB == "" { return nil, errors.Trace(ErrNoDB) } sName := model.NewCIStr(currentDB) is := p.ensureInfoSchema() // for 'SHOW CREATE VIEW/SEQUENCE ...' statement, ignore local temporary tables. if p.stmtTp == TypeShow && (p.showTp == ast.ShowCreateView || p.showTp == ast.ShowCreateSequence) { is = temptable.DetachLocalTemporaryTableInfoSchema(is) } tbl, err := is.TableByName(sName, tn.Name) if err != nil { // We should never leak that the table doesn't exist (i.e. attach ErrTableNotExists) // unless we know that the user has permissions to it, should it exist. // By checking here, this makes all SELECT/SHOW/INSERT/UPDATE/DELETE statements safe. currentUser, activeRoles := p.ctx.GetSessionVars().User, p.ctx.GetSessionVars().ActiveRoles if pm := privilege.GetPrivilegeManager(p.ctx); pm != nil { if !pm.RequestVerification(activeRoles, sName.L, tn.Name.O, "", mysql.AllPrivMask) { u := currentUser.Username h := currentUser.Hostname if currentUser.AuthHostname != "" { u = currentUser.AuthUsername h = currentUser.AuthHostname } return nil, ErrTableaccessDenied.GenWithStackByArgs(p.stmtType(), u, h, tn.Name.O) } } return nil, err } return tbl, err } func (p *preprocessor) checkBindGrammar(originNode, hintedNode ast.StmtNode, defaultDB string) { origTp := bindableStmtType(originNode) hintedTp := bindableStmtType(hintedNode) if origTp == TypeInvalid || hintedTp == TypeInvalid { p.err = errors.Errorf("create binding doesn't support this type of query") return } if origTp != hintedTp { p.err = errors.Errorf("hinted sql and original sql have different query types") return } if origTp == TypeInsert { origInsert, hintedInsert := originNode.(*ast.InsertStmt), hintedNode.(*ast.InsertStmt) if origInsert.Select == nil || hintedInsert.Select == nil { p.err = errors.Errorf("create binding only supports INSERT / REPLACE INTO SELECT") return } } // Check the bind operation is not on any temporary table. tblNames := extractTableList(originNode, nil, false) for _, tn := range tblNames { tbl, err := p.tableByName(tn) if err != nil { // If the operation is order is: drop table -> drop binding // The table doesn't exist, it is not an error. if terror.ErrorEqual(err, infoschema.ErrTableNotExists) { continue } p.err = err return } if tbl.Meta().TempTableType != model.TempTableNone { p.err = ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("create binding") return } tableInfo := tbl.Meta() dbInfo, _ := p.ensureInfoSchema().SchemaByTable(tableInfo) tn.TableInfo = tableInfo tn.DBInfo = dbInfo } originSQL := parser.Normalize(utilparser.RestoreWithDefaultDB(originNode, defaultDB, originNode.Text())) hintedSQL := parser.Normalize(utilparser.RestoreWithDefaultDB(hintedNode, defaultDB, hintedNode.Text())) if originSQL != hintedSQL { p.err = errors.Errorf("hinted sql and origin sql don't match when hinted sql erase the hint info, after erase hint info, originSQL:%s, hintedSQL:%s", originSQL, hintedSQL) } } func (p *preprocessor) Leave(in ast.Node) (out ast.Node, ok bool) { switch x := in.(type) { case *ast.CreateTableStmt: p.flag &= ^inCreateOrDropTable p.checkAutoIncrement(x) p.checkContainDotColumn(x) case *ast.CreateViewStmt: p.flag &= ^inCreateOrDropTable case *ast.DropTableStmt, *ast.AlterTableStmt, *ast.RenameTableStmt: p.flag &= ^inCreateOrDropTable case *driver.ParamMarkerExpr: if p.flag&inPrepare == 0 { p.err = parser.ErrSyntax.GenWithStack("syntax error, unexpected '?'") return } case *ast.ExplainStmt: if _, ok := x.Stmt.(*ast.ShowStmt); ok { break } valid := false for i, length := 0, len(types.ExplainFormats); i < length; i++ { if strings.ToLower(x.Format) == types.ExplainFormats[i] { valid = true break } } if !valid { p.err = ErrUnknownExplainFormat.GenWithStackByArgs(x.Format) } case *ast.TableName: p.handleTableName(x) case *ast.Join: if len(p.tableAliasInJoin) > 0 { p.tableAliasInJoin = p.tableAliasInJoin[:len(p.tableAliasInJoin)-1] } case *ast.FuncCallExpr: // The arguments for builtin NAME_CONST should be constants // See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_name-const for details if x.FnName.L == ast.NameConst { if len(x.Args) != 2 { p.err = expression.ErrIncorrectParameterCount.GenWithStackByArgs(x.FnName.L) } else { _, isValueExpr1 := x.Args[0].(*driver.ValueExpr) isValueExpr2 := false switch x.Args[1].(type) { case *driver.ValueExpr, *ast.UnaryOperationExpr: isValueExpr2 = true } if !isValueExpr1 || !isValueExpr2 { p.err = ErrWrongArguments.GenWithStackByArgs("NAME_CONST") } } break } // no need sleep when retry transaction and avoid unexpect sleep caused by retry. if p.flag&inTxnRetry > 0 && x.FnName.L == ast.Sleep { if len(x.Args) == 1 { x.Args[0] = ast.NewValueExpr(0, "", "") } } if x.FnName.L == ast.NextVal || x.FnName.L == ast.LastVal || x.FnName.L == ast.SetVal { p.flag &= ^inSequenceFunction } case *ast.RepairTableStmt: p.flag &= ^inRepairTable case *ast.CreateSequenceStmt: p.flag &= ^inCreateOrDropTable case *ast.BRIEStmt: if x.Kind == ast.BRIEKindRestore { p.flag &= ^inCreateOrDropTable } } return in, p.err == nil } func checkAutoIncrementOp(colDef *ast.ColumnDef, index int) (bool, error) { var hasAutoIncrement bool if colDef.Options[index].Tp == ast.ColumnOptionAutoIncrement { hasAutoIncrement = true if len(colDef.Options) == index+1 { return hasAutoIncrement, nil } for _, op := range colDef.Options[index+1:] { if op.Tp == ast.ColumnOptionDefaultValue { if tmp, ok := op.Expr.(*driver.ValueExpr); ok { if !tmp.Datum.IsNull() { return hasAutoIncrement, errors.Errorf("Invalid default value for '%s'", colDef.Name.Name.O) } } } } } if colDef.Options[index].Tp == ast.ColumnOptionDefaultValue && len(colDef.Options) != index+1 { if tmp, ok := colDef.Options[index].Expr.(*driver.ValueExpr); ok { if tmp.Datum.IsNull() { return hasAutoIncrement, nil } } for _, op := range colDef.Options[index+1:] { if op.Tp == ast.ColumnOptionAutoIncrement { return hasAutoIncrement, errors.Errorf("Invalid default value for '%s'", colDef.Name.Name.O) } } } return hasAutoIncrement, nil } func isConstraintKeyTp(constraints []*ast.Constraint, colDef *ast.ColumnDef) bool { for _, c := range constraints { if c.Keys[0].Expr != nil { continue } // If the constraint as follows: primary key(c1, c2) // we only support c1 column can be auto_increment. if colDef.Name.Name.L != c.Keys[0].Column.Name.L { continue } switch c.Tp { case ast.ConstraintPrimaryKey, ast.ConstraintKey, ast.ConstraintIndex, ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey: return true } } return false } func (p *preprocessor) checkAutoIncrement(stmt *ast.CreateTableStmt) { autoIncrementCols := make(map[*ast.ColumnDef]bool) for _, colDef := range stmt.Cols { var hasAutoIncrement bool var isKey bool for i, op := range colDef.Options { ok, err := checkAutoIncrementOp(colDef, i) if err != nil { p.err = err return } if ok { hasAutoIncrement = true } switch op.Tp { case ast.ColumnOptionPrimaryKey, ast.ColumnOptionUniqKey: isKey = true } } if hasAutoIncrement { autoIncrementCols[colDef] = isKey } } if len(autoIncrementCols) < 1 { return } if len(autoIncrementCols) > 1 { p.err = autoid.ErrWrongAutoKey.GenWithStackByArgs() return } // Only have one auto_increment col. for col, isKey := range autoIncrementCols { if !isKey { isKey = isConstraintKeyTp(stmt.Constraints, col) } autoIncrementMustBeKey := true for _, opt := range stmt.Options { if opt.Tp == ast.TableOptionEngine && strings.EqualFold(opt.StrValue, "MyISAM") { autoIncrementMustBeKey = false } } if autoIncrementMustBeKey && !isKey { p.err = autoid.ErrWrongAutoKey.GenWithStackByArgs() } switch col.Tp.Tp { case mysql.TypeTiny, mysql.TypeShort, mysql.TypeLong, mysql.TypeFloat, mysql.TypeDouble, mysql.TypeLonglong, mysql.TypeInt24: default: p.err = errors.Errorf("Incorrect column specifier for column '%s'", col.Name.Name.O) } } } // checkSetOprSelectList checks union's selectList. // refer: https://dev.mysql.com/doc/refman/5.7/en/union.html // https://mariadb.com/kb/en/intersect/ // https://mariadb.com/kb/en/except/ // "To apply ORDER BY or LIMIT to an individual SELECT, place the clause inside the parentheses that enclose the SELECT." func (p *preprocessor) checkSetOprSelectList(stmt *ast.SetOprSelectList) { for _, sel := range stmt.Selects[:len(stmt.Selects)-1] { switch s := sel.(type) { case *ast.SelectStmt: if s.SelectIntoOpt != nil { p.err = ErrWrongUsage.GenWithStackByArgs("UNION", "INTO") return } if s.IsInBraces { continue } if s.Limit != nil { p.err = ErrWrongUsage.GenWithStackByArgs("UNION", "LIMIT") return } if s.OrderBy != nil { p.err = ErrWrongUsage.GenWithStackByArgs("UNION", "ORDER BY") return } case *ast.SetOprSelectList: p.checkSetOprSelectList(s) } } } func (p *preprocessor) checkCreateDatabaseGrammar(stmt *ast.CreateDatabaseStmt) { if isIncorrectName(stmt.Name) { p.err = ddl.ErrWrongDBName.GenWithStackByArgs(stmt.Name) } } func (p *preprocessor) checkAlterDatabaseGrammar(stmt *ast.AlterDatabaseStmt) { // for 'ALTER DATABASE' statement, database name can be empty to alter default database. if isIncorrectName(stmt.Name) && !stmt.AlterDefaultDatabase { p.err = ddl.ErrWrongDBName.GenWithStackByArgs(stmt.Name) } } func (p *preprocessor) checkDropDatabaseGrammar(stmt *ast.DropDatabaseStmt) { if isIncorrectName(stmt.Name) { p.err = ddl.ErrWrongDBName.GenWithStackByArgs(stmt.Name) } } func (p *preprocessor) checkAdminCheckTableGrammar(stmt *ast.AdminStmt) { for _, table := range stmt.Tables { tableInfo, err := p.tableByName(table) if err != nil { p.err = err return } tempTableType := tableInfo.Meta().TempTableType if (stmt.Tp == ast.AdminCheckTable || stmt.Tp == ast.AdminChecksumTable || stmt.Tp == ast.AdminCheckIndex) && tempTableType != model.TempTableNone { if stmt.Tp == ast.AdminChecksumTable { p.err = ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table") } else if stmt.Tp == ast.AdminCheckTable { p.err = ErrOptOnTemporaryTable.GenWithStackByArgs("admin check table") } else { p.err = ErrOptOnTemporaryTable.GenWithStackByArgs("admin check index") } return } } } func (p *preprocessor) checkCreateTableGrammar(stmt *ast.CreateTableStmt) { if stmt.ReferTable != nil { schema := model.NewCIStr(p.ctx.GetSessionVars().CurrentDB) if stmt.ReferTable.Schema.String() != "" { schema = stmt.ReferTable.Schema } // get the infoschema from the context. tableInfo, err := p.ensureInfoSchema().TableByName(schema, stmt.ReferTable.Name) if err != nil { p.err = err return } tableMetaInfo := tableInfo.Meta() if tableMetaInfo.TempTableType != model.TempTableNone { p.err = ErrOptOnTemporaryTable.GenWithStackByArgs("create table like") return } if stmt.TemporaryKeyword != ast.TemporaryNone { err := checkReferInfoForTemporaryTable(tableMetaInfo) if err != nil { p.err = err return } } } if stmt.TemporaryKeyword != ast.TemporaryNone { for _, opt := range stmt.Options { switch opt.Tp { case ast.TableOptionShardRowID: p.err = ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits") return case ast.TableOptionPlacementPolicy: p.err = ErrOptOnTemporaryTable.GenWithStackByArgs("PLACEMENT") return } } } tName := stmt.Table.Name.String() if isIncorrectName(tName) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tName) return } countPrimaryKey := 0 for _, colDef := range stmt.Cols { if err := checkColumn(colDef); err != nil { p.err = err return } isPrimary, err := checkColumnOptions(stmt.TemporaryKeyword != ast.TemporaryNone, colDef.Options) if err != nil { p.err = err return } countPrimaryKey += isPrimary if countPrimaryKey > 1 { p.err = infoschema.ErrMultiplePriKey return } } for _, constraint := range stmt.Constraints { switch tp := constraint.Tp; tp { case ast.ConstraintKey, ast.ConstraintIndex, ast.ConstraintUniq, ast.ConstraintUniqKey, ast.ConstraintUniqIndex: err := checkIndexInfo(constraint.Name, constraint.Keys) if err != nil { p.err = err return } if constraint.IsEmptyIndex { p.err = ddl.ErrWrongNameForIndex.GenWithStackByArgs(constraint.Name) return } case ast.ConstraintPrimaryKey: if countPrimaryKey > 0 { p.err = infoschema.ErrMultiplePriKey return } countPrimaryKey++ err := checkIndexInfo(constraint.Name, constraint.Keys) if err != nil { p.err = err return } } } if p.err = checkUnsupportedTableOptions(stmt.Options); p.err != nil { return } if stmt.Select != nil { // FIXME: a temp error noticing 'not implemented' (issue 4754) p.err = errors.New("'CREATE TABLE ... SELECT' is not implemented yet") return } else if len(stmt.Cols) == 0 && stmt.ReferTable == nil { p.err = ddl.ErrTableMustHaveColumns return } if stmt.Partition != nil { for _, def := range stmt.Partition.Definitions { pName := def.Name.String() if isIncorrectName(pName) { p.err = ddl.ErrWrongPartitionName.GenWithStackByArgs(pName) return } } } } func (p *preprocessor) checkCreateViewGrammar(stmt *ast.CreateViewStmt) { vName := stmt.ViewName.Name.String() if isIncorrectName(vName) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(vName) return } for _, col := range stmt.Cols { if isIncorrectName(col.String()) { p.err = ddl.ErrWrongColumnName.GenWithStackByArgs(col) return } } } func (p *preprocessor) checkCreateViewWithSelect(stmt ast.Node) { switch s := stmt.(type) { case *ast.SelectStmt: if s.SelectIntoOpt != nil { p.err = ddl.ErrViewSelectClause.GenWithStackByArgs("INFO") return } if s.LockInfo != nil && s.LockInfo.LockType != ast.SelectLockNone { s.LockInfo.LockType = ast.SelectLockNone return } case *ast.SetOprSelectList: for _, sel := range s.Selects { p.checkCreateViewWithSelect(sel) } } } func (p *preprocessor) checkCreateViewWithSelectGrammar(stmt *ast.CreateViewStmt) { switch stmt := stmt.Select.(type) { case *ast.SelectStmt: p.checkCreateViewWithSelect(stmt) case *ast.SetOprStmt: for _, selectStmt := range stmt.SelectList.Selects { p.checkCreateViewWithSelect(selectStmt) if p.err != nil { return } } } } func (p *preprocessor) checkDropSequenceGrammar(stmt *ast.DropSequenceStmt) { p.checkDropTableNames(stmt.Sequences) } func (p *preprocessor) checkDropTableGrammar(stmt *ast.DropTableStmt) { p.checkDropTableNames(stmt.Tables) if stmt.TemporaryKeyword != ast.TemporaryNone { p.checkDropTemporaryTableGrammar(stmt) } } func (p *preprocessor) checkDropTemporaryTableGrammar(stmt *ast.DropTableStmt) { currentDB := model.NewCIStr(p.ctx.GetSessionVars().CurrentDB) for _, t := range stmt.Tables { if isIncorrectName(t.Name.String()) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(t.Name.String()) return } schema := t.Schema if schema.L == "" { schema = currentDB } tbl, err := p.ensureInfoSchema().TableByName(schema, t.Name) if infoschema.ErrTableNotExists.Equal(err) { // Non-exist table will be checked in ddl executor continue } if err != nil { p.err = err return } tblInfo := tbl.Meta() if stmt.TemporaryKeyword == ast.TemporaryGlobal && tblInfo.TempTableType != model.TempTableGlobal { p.err = ErrDropTableOnTemporaryTable return } } } func (p *preprocessor) checkDropTableNames(tables []*ast.TableName) { for _, t := range tables { if isIncorrectName(t.Name.String()) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(t.Name.String()) return } } } func (p *preprocessor) checkNonUniqTableAlias(stmt *ast.Join) { if p.flag&parentIsJoin == 0 { p.tableAliasInJoin = append(p.tableAliasInJoin, make(map[string]interface{})) } tableAliases := p.tableAliasInJoin[len(p.tableAliasInJoin)-1] isOracleMode := p.ctx.GetSessionVars().SQLMode&mysql.ModeOracle != 0 if !isOracleMode { if err := isTableAliasDuplicate(stmt.Left, tableAliases); err != nil { p.err = err return } if err := isTableAliasDuplicate(stmt.Right, tableAliases); err != nil { p.err = err return } } p.flag |= parentIsJoin } func isTableAliasDuplicate(node ast.ResultSetNode, tableAliases map[string]interface{}) error { if ts, ok := node.(*ast.TableSource); ok { tabName := ts.AsName if tabName.L == "" { if tableNode, ok := ts.Source.(*ast.TableName); ok { if tableNode.Schema.L != "" { tabName = model.NewCIStr(fmt.Sprintf("%s.%s", tableNode.Schema.L, tableNode.Name.L)) } else { tabName = tableNode.Name } } } _, exists := tableAliases[tabName.L] if len(tabName.L) != 0 && exists { return ErrNonUniqTable.GenWithStackByArgs(tabName) } tableAliases[tabName.L] = nil } return nil } func checkColumnOptions(isTempTable bool, ops []*ast.ColumnOption) (int, error) { isPrimary, isGenerated, isStored := 0, 0, false for _, op := range ops { switch op.Tp { case ast.ColumnOptionPrimaryKey: isPrimary = 1 case ast.ColumnOptionGenerated: isGenerated = 1 isStored = op.Stored case ast.ColumnOptionAutoRandom: if isTempTable { return isPrimary, ErrOptOnTemporaryTable.GenWithStackByArgs("auto_random") } } } if isPrimary > 0 && isGenerated > 0 && !isStored { return isPrimary, ErrUnsupportedOnGeneratedColumn.GenWithStackByArgs("Defining a virtual generated column as primary key") } return isPrimary, nil } func (p *preprocessor) checkCreateIndexGrammar(stmt *ast.CreateIndexStmt) { tName := stmt.Table.Name.String() if isIncorrectName(tName) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tName) return } if stmt.IndexName == "" { p.err = ddl.ErrWrongNameForIndex.GenWithStackByArgs(stmt.IndexName) return } p.err = checkIndexInfo(stmt.IndexName, stmt.IndexPartSpecifications) } func (p *preprocessor) checkGroupBy(stmt *ast.GroupByClause) { noopFuncsMode := p.ctx.GetSessionVars().NoopFuncsMode for _, item := range stmt.Items { if !item.NullOrder && noopFuncsMode != variable.OnInt { err := expression.ErrFunctionsNoopImpl.GenWithStackByArgs("GROUP BY expr ASC|DESC") if noopFuncsMode == variable.OffInt { p.err = err return } // NoopFuncsMode is Warn, append an error p.ctx.GetSessionVars().StmtCtx.AppendWarning(err) } } } func (p *preprocessor) checkRenameTableGrammar(stmt *ast.RenameTableStmt) { oldTable := stmt.TableToTables[0].OldTable.Name.String() newTable := stmt.TableToTables[0].NewTable.Name.String() p.checkRenameTable(oldTable, newTable) } func (p *preprocessor) checkRenameTable(oldTable, newTable string) { if isIncorrectName(oldTable) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(oldTable) return } if isIncorrectName(newTable) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(newTable) return } } func (p *preprocessor) checkRepairTableGrammar(stmt *ast.RepairTableStmt) { // Check create table stmt whether it's is in REPAIR MODE. if !domainutil.RepairInfo.InRepairMode() { p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("TiDB is not in REPAIR MODE") return } if len(domainutil.RepairInfo.GetRepairTableList()) == 0 { p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("repair list is empty") return } // Check rename action as the rename statement does. oldTable := stmt.Table.Name.String() newTable := stmt.CreateStmt.Table.Name.String() p.checkRenameTable(oldTable, newTable) } func (p *preprocessor) checkAlterTableGrammar(stmt *ast.AlterTableStmt) { tName := stmt.Table.Name.String() if isIncorrectName(tName) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tName) return } specs := stmt.Specs for _, spec := range specs { if spec.NewTable != nil { ntName := spec.NewTable.Name.String() if isIncorrectName(ntName) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(ntName) return } } for _, colDef := range spec.NewColumns { if p.err = checkColumn(colDef); p.err != nil { return } } if p.err = checkUnsupportedTableOptions(spec.Options); p.err != nil { return } switch spec.Tp { case ast.AlterTableAddConstraint: switch spec.Constraint.Tp { case ast.ConstraintKey, ast.ConstraintIndex, ast.ConstraintUniq, ast.ConstraintUniqIndex, ast.ConstraintUniqKey, ast.ConstraintPrimaryKey: p.err = checkIndexInfo(spec.Constraint.Name, spec.Constraint.Keys) if p.err != nil { return } default: // Nothing to do now. } case ast.AlterTableAddStatistics, ast.AlterTableDropStatistics: statsName := spec.Statistics.StatsName if isIncorrectName(statsName) { msg := fmt.Sprintf("Incorrect statistics name: %s", statsName) p.err = ErrInternal.GenWithStack(msg) return } case ast.AlterTableAddPartitions: for _, def := range spec.PartDefinitions { pName := def.Name.String() if isIncorrectName(pName) { p.err = ddl.ErrWrongPartitionName.GenWithStackByArgs(pName) return } } default: // Nothing to do now. } } } // checkDuplicateColumnName checks if index exists duplicated columns. func checkDuplicateColumnName(IndexPartSpecifications []*ast.IndexPartSpecification) error { colNames := make(map[string]struct{}, len(IndexPartSpecifications)) for _, IndexColNameWithExpr := range IndexPartSpecifications { if IndexColNameWithExpr.Column != nil { name := IndexColNameWithExpr.Column.Name if _, ok := colNames[name.L]; ok { return infoschema.ErrColumnExists.GenWithStackByArgs(name) } colNames[name.L] = struct{}{} } } return nil } // checkIndexInfo checks index name, index column names and prefix lengths. func checkIndexInfo(indexName string, IndexPartSpecifications []*ast.IndexPartSpecification) error { if strings.EqualFold(indexName, mysql.PrimaryKeyName) { return ddl.ErrWrongNameForIndex.GenWithStackByArgs(indexName) } if len(IndexPartSpecifications) > mysql.MaxKeyParts { return infoschema.ErrTooManyKeyParts.GenWithStackByArgs(mysql.MaxKeyParts) } for _, idxSpec := range IndexPartSpecifications { // -1 => unspecified/full, > 0 OK, 0 => error if idxSpec.Expr == nil && idxSpec.Length == 0 { return ErrKeyPart0.GenWithStackByArgs(idxSpec.Column.Name.O) } } return checkDuplicateColumnName(IndexPartSpecifications) } // checkUnsupportedTableOptions checks if there exists unsupported table options func checkUnsupportedTableOptions(options []*ast.TableOption) error { var err error = nil for _, option := range options { switch option.Tp { case ast.TableOptionUnion: err = ddl.ErrTableOptionUnionUnsupported case ast.TableOptionInsertMethod: err = ddl.ErrTableOptionInsertMethodUnsupported case ast.TableOptionEngine: err = checkTableEngine(option.StrValue) } if err != nil { return err } } return nil } var mysqlValidTableEngineNames = map[string]struct{}{ "archive": {}, "blackhole": {}, "csv": {}, "example": {}, "federated": {}, "innodb": {}, "memory": {}, "merge": {}, "mgr_myisam": {}, "myisam": {}, "ndb": {}, "heap": {}, } func checkTableEngine(engineName string) error { if _, have := mysqlValidTableEngineNames[strings.ToLower(engineName)]; !have { return ddl.ErrUnknownEngine.GenWithStackByArgs(engineName) } return nil } func checkReferInfoForTemporaryTable(tableMetaInfo *model.TableInfo) error { if tableMetaInfo.AutoRandomBits != 0 { return ErrOptOnTemporaryTable.GenWithStackByArgs("auto_random") } if tableMetaInfo.PreSplitRegions != 0 { return ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions") } if tableMetaInfo.Partition != nil { return ErrPartitionNoTemporary } if tableMetaInfo.ShardRowIDBits != 0 { return ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits") } if tableMetaInfo.PlacementPolicyRef != nil { return ErrOptOnTemporaryTable.GenWithStackByArgs("placement") } return nil } // checkColumn checks if the column definition is valid. // See https://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html func checkColumn(colDef *ast.ColumnDef) error { // Check column name. cName := colDef.Name.Name.String() if isIncorrectName(cName) { return ddl.ErrWrongColumnName.GenWithStackByArgs(cName) } if isInvalidDefaultValue(colDef) { return types.ErrInvalidDefault.GenWithStackByArgs(colDef.Name.Name.O) } // Check column type. tp := colDef.Tp if tp == nil { return nil
} if tp.Flen > math.MaxUint32 { return types.ErrTooBigDisplayWidth.GenWithStack("Display width out of range for column '%s' (max = %d)", colDef.Name.Name.O, math.MaxUint32) } switch tp.Tp { case mysql.TypeString: if tp.Flen != types.UnspecifiedLength && tp.Flen > mysql.MaxFieldCharLength { return types.ErrTooBigFieldLength.GenWithStack("Column length too big for column '%s' (max = %d); use BLOB or TEXT instead", colDef.Name.Name.O, mysql.MaxFieldCharLength) } case mysql.TypeVarchar: if len(tp.Charset) == 0 { // It's not easy to get the schema charset and table charset here. // The charset is determined by the order ColumnDefaultCharset --> TableDefaultCharset-->DatabaseDefaultCharset-->SystemDefaultCharset. // return nil, to make the check in the ddl.CreateTable. return nil } err := ddl.IsTooBigFieldLength(colDef.Tp.Flen, colDef.Name.Name.O, tp.Charset) if err != nil { return err } case mysql.TypeFloat, mysql.TypeDouble: // For FLOAT, the SQL standard permits an optional specification of the precision. // https://dev.mysql.com/doc/refman/8.0/en/floating-point-types.html if tp.Decimal == -1 { switch tp.Tp { case mysql.TypeDouble: // For Double type Flen and Decimal check is moved to parser component default: if tp.Flen > mysql.MaxDoublePrecisionLength { return types.ErrWrongFieldSpec.GenWithStackByArgs(colDef.Name.Name.O) } } } else { if tp.Decimal > mysql.MaxFloatingTypeScale { return types.ErrTooBigScale.GenWithStackByArgs(tp.Decimal, colDef.Name.Name.O, mysql.MaxFloatingTypeScale) } if tp.Flen > mysql.MaxFloatingTypeWidth || tp.Flen == 0 { return types.ErrTooBigDisplayWidth.GenWithStackByArgs(colDef.Name.Name.O, mysql.MaxFloatingTypeWidth) } if tp.Flen < tp.Decimal { return types.ErrMBiggerThanD.GenWithStackByArgs(colDef.Name.Name.O) } } case mysql.TypeSet: if len(tp.Elems) > mysql.MaxTypeSetMembers { return types.ErrTooBigSet.GenWithStack("Too many strings for column %s and SET", colDef.Name.Name.O) } // Check set elements. See https://dev.mysql.com/doc/refman/5.7/en/set.html. for _, str := range colDef.Tp.Elems { if strings.Contains(str, ",") { return types.ErrIllegalValueForType.GenWithStackByArgs(types.TypeStr(tp.Tp), str) } } case mysql.TypeNewDecimal: if tp.Decimal > mysql.MaxDecimalScale { return types.ErrTooBigScale.GenWithStackByArgs(tp.Decimal, colDef.Name.Name.O, mysql.MaxDecimalScale) } if tp.Flen > mysql.MaxDecimalWidth { return types.ErrTooBigPrecision.GenWithStackByArgs(tp.Flen, colDef.Name.Name.O, mysql.MaxDecimalWidth) } if tp.Flen < tp.Decimal { return types.ErrMBiggerThanD.GenWithStackByArgs(colDef.Name.Name.O) } // If decimal and flen all equals 0, just set flen to default value. if tp.Decimal == 0 && tp.Flen == 0 { defaultFlen, _ := mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeNewDecimal) tp.Flen = defaultFlen } case mysql.TypeBit: if tp.Flen <= 0 { return types.ErrInvalidFieldSize.GenWithStackByArgs(colDef.Name.Name.O) } if tp.Flen > mysql.MaxBitDisplayWidth { return types.ErrTooBigDisplayWidth.GenWithStackByArgs(colDef.Name.Name.O, mysql.MaxBitDisplayWidth) } default: // TODO: Add more types. } return nil } // isDefaultValNowSymFunc checks whether default value is a NOW() builtin function. func isDefaultValNowSymFunc(expr ast.ExprNode) bool { if funcCall, ok := expr.(*ast.FuncCallExpr); ok { // Default value NOW() is transformed to CURRENT_TIMESTAMP() in parser. if funcCall.FnName.L == ast.CurrentTimestamp { return true } } return false } func isInvalidDefaultValue(colDef *ast.ColumnDef) bool { tp := colDef.Tp // Check the last default value. for i := len(colDef.Options) - 1; i >= 0; i-- { columnOpt := colDef.Options[i] if columnOpt.Tp == ast.ColumnOptionDefaultValue { if !(tp.Tp == mysql.TypeTimestamp || tp.Tp == mysql.TypeDatetime) && isDefaultValNowSymFunc(columnOpt.Expr) { return true } break } } return false } // isIncorrectName checks if the identifier is incorrect. // See https://dev.mysql.com/doc/refman/5.7/en/identifiers.html func isIncorrectName(name string) bool { if len(name) == 0 { return true } if name[len(name)-1] == ' ' { return true } return false } // checkContainDotColumn checks field contains the table name. // for example :create table t (c1.c2 int default null). func (p *preprocessor) checkContainDotColumn(stmt *ast.CreateTableStmt) { tName := stmt.Table.Name.String() sName := stmt.Table.Schema.String() for _, colDef := range stmt.Cols { // check schema and table names. if colDef.Name.Schema.O != sName && len(colDef.Name.Schema.O) != 0 { p.err = ddl.ErrWrongDBName.GenWithStackByArgs(colDef.Name.Schema.O) return } if colDef.Name.Table.O != tName && len(colDef.Name.Table.O) != 0 { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(colDef.Name.Table.O) return } } } func (p *preprocessor) stmtType() string { switch p.stmtTp { case TypeDelete: return "DELETE" case TypeUpdate: return "UPDATE" case TypeInsert: return "INSERT" case TypeDrop: return "DROP" case TypeCreate: return "CREATE" case TypeAlter: return "ALTER" case TypeRename: return "DROP, ALTER" case TypeRepair: return "SELECT, INSERT" case TypeShow: return "SHOW" default: return "SELECT" // matches Select and uncaught cases. } } func (p *preprocessor) handleTableName(tn *ast.TableName) { if tn.Schema.L == "" { if _, ok := p.withName[tn.Name.L]; ok { return } currentDB := p.ctx.GetSessionVars().CurrentDB if currentDB == "" { p.err = errors.Trace(ErrNoDB) return } tn.Schema = model.NewCIStr(currentDB) } if p.flag&inCreateOrDropTable > 0 { // The table may not exist in create table or drop table statement. if p.flag&inRepairTable > 0 { // Create stmt is in repair stmt, skip resolving the table to avoid error. return } // Create stmt is not in repair stmt, check the table not in repair list. if domainutil.RepairInfo.InRepairMode() { p.checkNotInRepair(tn) } return } // repairStmt: admin repair table A create table B ... // repairStmt's tableName is whether `inCreateOrDropTable` or `inRepairTable` flag. if p.flag&inRepairTable > 0 { p.handleRepairName(tn) return } p.handleAsOfAndReadTS(tn.AsOf) if p.err != nil { return } table, err := p.tableByName(tn) if err != nil { p.err = err return } tableInfo := table.Meta() dbInfo, _ := p.ensureInfoSchema().SchemaByTable(tableInfo) // tableName should be checked as sequence object. if p.flag&inSequenceFunction > 0 { if !tableInfo.IsSequence() { p.err = infoschema.ErrWrongObject.GenWithStackByArgs(dbInfo.Name.O, tableInfo.Name.O, "SEQUENCE") return } } tn.TableInfo = tableInfo tn.DBInfo = dbInfo } func (p *preprocessor) checkNotInRepair(tn *ast.TableName) { tableInfo, dbInfo := domainutil.RepairInfo.GetRepairedTableInfoByTableName(tn.Schema.L, tn.Name.L) if dbInfo == nil { return } if tableInfo != nil { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tn.Name.L, "this table is in repair") } } func (p *preprocessor) handleRepairName(tn *ast.TableName) { // Check the whether the repaired table is system table. if util.IsMemOrSysDB(tn.Schema.L) { p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("memory or system database is not for repair") return } tableInfo, dbInfo := domainutil.RepairInfo.GetRepairedTableInfoByTableName(tn.Schema.L, tn.Name.L) // tableName here only has the schema rather than DBInfo. if dbInfo == nil { p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("database " + tn.Schema.L + " is not in repair") return } if tableInfo == nil { p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("table " + tn.Name.L + " is not in repair") return } p.ctx.SetValue(domainutil.RepairedTable, tableInfo) p.ctx.SetValue(domainutil.RepairedDatabase, dbInfo) } func (p *preprocessor) resolveShowStmt(node *ast.ShowStmt) { if node.DBName == "" { if node.Table != nil && node.Table.Schema.L != "" { node.DBName = node.Table.Schema.O } else { node.DBName = p.ctx.GetSessionVars().CurrentDB } } else if node.Table != nil && node.Table.Schema.L == "" { node.Table.Schema = model.NewCIStr(node.DBName) } if node.User != nil && node.User.CurrentUser { // Fill the Username and Hostname with the current user. currentUser := p.ctx.GetSessionVars().User if currentUser != nil { node.User.Username = currentUser.Username node.User.Hostname = currentUser.Hostname node.User.AuthUsername = currentUser.AuthUsername node.User.AuthHostname = currentUser.AuthHostname } } } func (p *preprocessor) resolveExecuteStmt(node *ast.ExecuteStmt) { prepared, err := GetPreparedStmt(node, p.ctx.GetSessionVars()) if err != nil { p.err = err return } if prepared.SnapshotTSEvaluator != nil { snapshotTS, err := prepared.SnapshotTSEvaluator(p.ctx) if err != nil { p.err = err return } is, err := domain.GetDomain(p.ctx).GetSnapshotInfoSchema(snapshotTS) if err != nil { p.err = err return } p.LastSnapshotTS = snapshotTS p.initedLastSnapshotTS = true p.InfoSchema = temptable.AttachLocalTemporaryTableInfoSchema(p.ctx, is) } } func (p *preprocessor) resolveCreateTableStmt(node *ast.CreateTableStmt) { for _, val := range node.Constraints { if val.Refer != nil && val.Refer.Table.Schema.String() == "" { val.Refer.Table.Schema = node.Table.Schema } } } func (p *preprocessor) resolveAlterTableStmt(node *ast.AlterTableStmt) { for _, spec := range node.Specs { if spec.Tp == ast.AlterTableRenameTable { p.flag |= inCreateOrDropTable break } if spec.Tp == ast.AlterTableAddConstraint && spec.Constraint.Refer != nil { table := spec.Constraint.Refer.Table if table.Schema.L == "" && node.Table.Schema.L != "" { table.Schema = model.NewCIStr(node.Table.Schema.L) } } } } func (p *preprocessor) resolveCreateSequenceStmt(stmt *ast.CreateSequenceStmt) { sName := stmt.Name.Name.String() if isIncorrectName(sName) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(sName) return } } func (p *preprocessor) checkFuncCastExpr(node *ast.FuncCastExpr) { if node.Tp.EvalType() == types.ETDecimal { if node.Tp.Flen >= node.Tp.Decimal && node.Tp.Flen <= mysql.MaxDecimalWidth && node.Tp.Decimal <= mysql.MaxDecimalScale { // valid return } var buf strings.Builder restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &buf) if err := node.Expr.Restore(restoreCtx); err != nil { p.err = err return } if node.Tp.Flen < node.Tp.Decimal { p.err = types.ErrMBiggerThanD.GenWithStackByArgs(buf.String()) return } if node.Tp.Flen > mysql.MaxDecimalWidth { p.err = types.ErrTooBigPrecision.GenWithStackByArgs(node.Tp.Flen, buf.String(), mysql.MaxDecimalWidth) return } if node.Tp.Decimal > mysql.MaxDecimalScale { p.err = types.ErrTooBigScale.GenWithStackByArgs(node.Tp.Decimal, buf.String(), mysql.MaxDecimalScale) return } } } // handleAsOfAndReadTS tries to handle as of closure, or possibly read_ts. func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { if p.stmtTp != TypeSelect { return } defer func() { // If the select statement was like 'select * from t as of timestamp ...' or in a stale read transaction // or is affected by the tidb_read_staleness session variable, then the statement will be makred as isStaleness // in stmtCtx if p.flag&inPrepare == 0 && p.IsStaleness { p.ctx.GetSessionVars().StmtCtx.IsStaleness = true } }() // When statement is during the Txn, we check whether there exists AsOfClause. If exists, we will return error, // otherwise we should directly set the return param from TxnCtx. p.ReadReplicaScope = kv.GlobalReplicaScope if p.ctx.GetSessionVars().InTxn() { if node != nil { p.err = ErrAsOf.FastGenWithCause("as of timestamp can't be set in transaction.") return } txnCtx := p.ctx.GetSessionVars().TxnCtx p.ReadReplicaScope = txnCtx.TxnScope // It means we meet following case: // 1. start transaction read only as of timestamp ts // 2. select statement if txnCtx.IsStaleness { p.LastSnapshotTS = txnCtx.StartTS p.IsStaleness = txnCtx.IsStaleness p.initedLastSnapshotTS = true return } } scope := config.GetTxnScopeFromConfig() if p.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && scope != kv.GlobalReplicaScope { p.ReadReplicaScope = scope } // If the statement is in auto-commit mode, we will check whether there exists read_ts, if exists, // we will directly use it. The txnScope will be defined by the zone label, if it is not set, we will use // global txnScope directly. readTS := p.ctx.GetSessionVars().TxnReadTS.UseTxnReadTS() readStaleness := p.ctx.GetSessionVars().ReadStaleness var ts uint64 switch { case readTS > 0: ts = readTS if node != nil { p.err = ErrAsOf.FastGenWithCause("can't use select as of while already set transaction as of") return } if !p.initedLastSnapshotTS { p.SnapshotTSEvaluator = func(sessionctx.Context) (uint64, error) { return ts, nil } p.LastSnapshotTS = ts p.IsStaleness = true } case readTS == 0 && node != nil: // If we didn't use read_ts, and node isn't nil, it means we use 'select table as of timestamp ... ' // for stale read // It means we meet following case: // select statement with as of timestamp ts, p.err = calculateTsExpr(p.ctx, node) if p.err != nil { return } if err := sessionctx.ValidateStaleReadTS(context.Background(), p.ctx, ts); err != nil { p.err = errors.Trace(err) return } if !p.initedLastSnapshotTS { p.SnapshotTSEvaluator = func(ctx sessionctx.Context) (uint64, error) { return calculateTsExpr(ctx, node) } p.LastSnapshotTS = ts p.IsStaleness = true } case readTS == 0 && node == nil && readStaleness != 0: // If both readTS and node is empty while the readStaleness isn't, it means we meet following situation: // set @@tidb_read_staleness='-5'; // select * from t; // Then the following select statement should be affected by the tidb_read_staleness in session. ts, p.err = calculateTsWithReadStaleness(p.ctx, readStaleness) if p.err != nil { return } if err := sessionctx.ValidateStaleReadTS(context.Background(), p.ctx, ts); err != nil { p.err = errors.Trace(err) return } if !p.initedLastSnapshotTS { p.SnapshotTSEvaluator = func(ctx sessionctx.Context) (uint64, error) { return calculateTsWithReadStaleness(p.ctx, readStaleness) } p.LastSnapshotTS = ts p.IsStaleness = true } case readTS == 0 && node == nil && readStaleness == 0: // If both readTS and node is empty while the readStaleness is empty, // setting p.ReadReplicaScope is necessary to verify the txn scope later // because we may be in a local txn without using the Stale Read. p.ReadReplicaScope = scope } // If the select statement is related to multi tables, we should grantee that all tables use the same timestamp if p.LastSnapshotTS != ts { p.err = ErrAsOf.GenWithStack("can not set different time in the as of") return } if p.LastSnapshotTS != 0 { dom := domain.GetDomain(p.ctx) is, err := dom.GetSnapshotInfoSchema(p.LastSnapshotTS) // if infoschema is empty, LastSnapshotTS init failed if err != nil { p.err = err return } if is == nil { p.err = fmt.Errorf("can not get any information schema based on snapshotTS: %d", p.LastSnapshotTS) return } p.InfoSchema = temptable.AttachLocalTemporaryTableInfoSchema(p.ctx, is) } p.initedLastSnapshotTS = true } // ensureInfoSchema get the infoschema from the preprocessor. // there some situations: // - the stmt specifies the schema version. // - session variable // - transaction context func (p *preprocessor) ensureInfoSchema() infoschema.InfoSchema { if p.InfoSchema != nil { return p.InfoSchema } // `Execute` under some conditions need to see the latest information schema. if p.PreprocessExecuteISUpdate != nil { if newInfoSchema := p.ExecuteInfoSchemaUpdate(p.Node, p.ctx); newInfoSchema != nil { p.InfoSchema = newInfoSchema return p.InfoSchema } } p.InfoSchema = p.ctx.GetInfoSchema().(infoschema.InfoSchema) return p.InfoSchema } func (p *preprocessor) initTxnContextProviderIfNecessary(node ast.Node) { if p.err != nil || p.flag&initTxnContextProvider == 0 { return } p.err = sessiontxn.GetTxnManager(p.ctx).SetContextProvider(&sessiontxn.SimpleTxnContextProvider{ InfoSchema: p.ensureInfoSchema(), }) }
explore_completeness.py
from __future__ import division from six.moves import range import libtbx import sys class stats_manager(libtbx.slots_getstate_setstate): __slots__ = [ "i_calc", "use_symmetry", "n_indices", "completeness_history", "min_count_history", "counts", "currently_zero", "new_0"] def __init__(O, n_reserve, i_calc, use_symmetry): from cctbx.array_family import flex O.i_calc = i_calc O.use_symmetry = use_symmetry if (use_symmetry): O.n_indices = O.i_calc.asu.indices().size() else: O.n_indices = O.i_calc.p1_anom.indices().size() O.completeness_history = flex.double() O.completeness_history.reserve(n_reserve) O.completeness_history.append(0) O.min_count_history = flex.size_t() O.min_count_history.reserve(n_reserve) O.min_count_history.append(0) O.counts = flex.size_t(O.n_indices, 0) O.currently_zero = O.n_indices O.new_0 = None def update(O, miller_index_i_seqs): from cctbx.array_family import flex if (O.use_symmetry): isel = O.i_calc.asu_iselection.select(miller_index_i_seqs) else: isel = miller_index_i_seqs previously_zero = O.counts.increment_and_track_up_from_zero( iselection=isel) O.new_0 = O.currently_zero - previously_zero O.completeness_history.append(1-O.new_0/O.n_indices) O.min_count_history.append(flex.min(O.counts)) assert O.new_0 >= 0 if (O.new_0 == 0 and O.currently_zero != 0): print "Complete with %d images." % (len(O.completeness_history)-1) print O.currently_zero = O.new_0 def report(O, plot=None, xy_prefix=None): from cctbx.array_family import flex print "Number of shots:", O.completeness_history.size()-1 print print "Histogram of counts per reflection:" flex.histogram(O.counts.as_double(), n_slots=8).show( prefix=" ", format_cutoffs="%7.0f") print print "Observations per reflection:" flex.show_count_stats(counts=O.counts, prefix=" ") print " Median:", int(flex.median(O.counts.as_double())+0.5) print sys.stdout.flush() if (xy_prefix is None): xy_prefix = "" elif (len(xy_prefix) != 0): xy_prefix = xy_prefix + "_" def dump_xy(name, array): f = open(xy_prefix + "%s.xy" % name, "w") for i,c in enumerate(array): print >> f, i, c dump_xy("completeness_history", O.completeness_history) dump_xy("min_count_history", O.min_count_history) if (O.use_symmetry): _ = O.i_calc.asu else: _ = O.i_calc.p1_anom _ = _.customized_copy(data=O.counts).sort(by_value="resolution") sym_factors = _.space_group().order_p() if (not O.i_calc.asu.anomalous_flag()): sym_factors *= 2 sym_factors /= _.multiplicities().data() counts_sorted_by_resolution = _.data().as_int() * sym_factors dump_xy("counts_sorted_by_resolution", counts_sorted_by_resolution) dump_xy("d_spacings_sorted_by_resolution", _.d_spacings().data()) if (plot == "completeness"): from libtbx import pyplot fig = pyplot.figure() ax = fig.add_subplot(1, 1, 1) _ = O.completeness_history nx = _.size() ax.plot(range(nx), _, "r-") ax.axis([0, nx, 0, 1]) pyplot.show() elif (plot == "redundancy"): from libtbx import pyplot fig = pyplot.figure() ax = fig.add_subplot(1, 1, 1) _ = counts_sorted_by_resolution ax.plot(range(len(_)), _, "r-") ax.axis([-_.size()*0.05, _.size()*1.05, 0, None]) pyplot.show() elif (plot is not None): raise RuntimeError('Unknown plot type: "%s"' % plot) def kirian_delta_vs_ewald_proximity( unit_cell, miller_indices, crystal_rotation_matrix, ewald_radius, d_min, detector_distance, detector_size, detector_pixels): from scitbx import matrix from libtbx.math_utils import nearest_integer cr = matrix.sqr(crystal_rotation_matrix) a_matrix = cr * matrix.sqr(unit_cell.fractionalization_matrix()).transpose() a_inv = a_matrix.inverse() dsx, dsy = detector_size dpx, dpy = detector_pixels deltas = [[] for _ in range(len(miller_indices))] h_lookup = {} for i,h in enumerate(miller_indices): h_lookup[h] = i for pi in range(dpx): for pj in range(dpy): cx = ((pi + 0.5) / dpx - 0.5) * dsx cy = ((pj + 0.5) / dpy - 0.5) * dsy lo = matrix.col((cx, cy, -detector_distance)) ko = lo.normalize() * ewald_radius ki = matrix.col((0,0,-ewald_radius)) dk = ki - ko h_frac = a_inv * dk h = matrix.col([nearest_integer(_) for _ in h_frac]) if (h.elems == (0,0,0)): continue g_hkl = a_matrix * h delta = (dk - g_hkl).length() i = h_lookup.get(h.elems) if (i is None): assert unit_cell.d(h) < d_min else: deltas[i].append(delta) def ewald_proximity(h): # compare with code in image_simple.hpp rv = matrix.col(unit_cell.reciprocal_space_vector(h)) rvr = cr * rv rvre = matrix.col((rvr[0], rvr[1], rvr[2]+ewald_radius)) rvre_len = rvre.length() return abs(1 - rvre_len / ewald_radius) def
(): fn_xy = "kirian_delta_vs_ewald_proximity.xy" print "Writing file:", fn_xy f = open(fn_xy, "w") print >> f, """\ @with g0 @ s0 symbol 1 @ s0 symbol size 0.1 @ s0 line type 0""" for h, ds in zip(miller_indices, deltas): if (len(ds) != 0): print >> f, min(ds), ewald_proximity(h) print >> f, "&" print write_xy() STOP() def simulate(work_params, i_calc): from rstbx.simage import image_simple from cctbx.array_family import flex n_shots = work_params.number_of_shots stats = stats_manager( n_reserve=max(n_shots, 1000000), i_calc=i_calc, use_symmetry=work_params.use_symmetry) mc_target = work_params.min_count_target def update_stats(miller_index_i_seqs): stats.update(miller_index_i_seqs) if (n_shots is not None and stats.min_count_history.size()-1 < n_shots): return False if (mc_target is not None and stats.min_count_history[-1] < mc_target): return False if (stats.new_0 != 0 and n_shots is None and mc_target is None): return False return True def get_miller_index_i_seqs(i_img, parallel=True): mt = flex.mersenne_twister(seed=work_params.noise.random_seed+i_img) crystal_rotation = mt.random_double_r3_rotation_matrix_arvo_1992() if (work_params.kirian_delta_vs_ewald_proximity): kirian_delta_vs_ewald_proximity( unit_cell=i_calc.p1_anom.unit_cell(), miller_indices=i_calc.p1_anom.indices(), crystal_rotation_matrix=crystal_rotation, ewald_radius=1/work_params.wavelength, d_min=work_params.d_min, detector_distance=work_params.detector.distance, detector_size=work_params.detector.size, detector_pixels=work_params.detector.pixels) img = image_simple( store_miller_index_i_seqs=True, store_signals=True).compute( unit_cell=i_calc.p1_anom.unit_cell(), miller_indices=i_calc.p1_anom.indices(), spot_intensity_factors=None, crystal_rotation_matrix=crystal_rotation, ewald_radius=1/work_params.wavelength, ewald_proximity=work_params.ewald_proximity, signal_max=1, detector_distance=work_params.detector.distance, detector_size=work_params.detector.size, detector_pixels=work_params.detector.pixels, point_spread=work_params.point_spread, gaussian_falloff_scale=work_params.gaussian_falloff_scale) result = img.miller_index_i_seqs if (work_params.usable_partiality_threshold is not None): result = result.select( img.signals > work_params.usable_partiality_threshold) if (parallel): return result.copy_to_byte_str() return result i_img = 0 stop = False if (not work_params.multiprocessing): while (not stop): try: miller_index_i_seqs = get_miller_index_i_seqs(i_img, parallel=False) except KeyboardInterrupt: print print "KeyboardInterrupt" print stop = True else: i_img += 1 stop = update_stats(miller_index_i_seqs) else: from libtbx import easy_mp pool = easy_mp.Pool(fixed_func=get_miller_index_i_seqs) try: print "multiprocessing pool size:", pool.processes print sys.stdout.flush() while (not stop): next_i_img = i_img + pool.processes args = range(i_img, next_i_img) mp_results = pool.map_fixed_func(iterable=args) i_img = next_i_img for miller_index_i_seqs in mp_results: assert miller_index_i_seqs is not None miller_index_i_seqs = flex.size_t_from_byte_str( byte_str=miller_index_i_seqs) stop = update_stats(miller_index_i_seqs) if (stop): break finally: pool.close() pool.join() return stats def run(args): from libtbx.utils import show_times_at_exit show_times_at_exit() from rstbx.simage import create work_params = create.process_args( args=args, extra_phil_str="""\ use_symmetry = False .type = bool number_of_shots = None .type = int min_count_target = None .type = int usable_partiality_threshold = 0.1 .type = float kirian_delta_vs_ewald_proximity = False .type = bool multiprocessing = False .type = bool xy_prefix = None .type = str plot = completeness redundancy .type = choice """) i_calc = create.build_i_calc(work_params) i_calc.p1_anom.show_comprehensive_summary() print sys.stdout.flush() stats = simulate(work_params, i_calc) stats.report(plot=work_params.plot, xy_prefix=work_params.xy_prefix)
write_xy
prepare.go
// Copyright (c) 2019 NEC Laboratories Europe GmbH. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package protobuf import ( "github.com/hyperledger-labs/minbft/messages" "github.com/hyperledger-labs/minbft/messages/protobuf/pb" ) type prepare struct { pbMsg *pb.Prepare } func newPrepare(r uint32, v uint64, req messages.Request) *prepare { return &prepare{pbMsg: &pb.Prepare{ ReplicaId: r, View: v, Request: pbRequestFromAPI(req), }} } func newPrepareFromPb(pbMsg *pb.Prepare) *prepare { return &prepare{pbMsg: pbMsg} } func (m *prepare) MarshalBinary() ([]byte, error) { return marshalMessage(m.pbMsg) } func (m *prepare) ReplicaID() uint32 { return m.pbMsg.GetReplicaId() } func (m *prepare) View() uint64 { return m.pbMsg.GetView() } func (m *prepare) Request() messages.Request { return newRequestFromPb(m.pbMsg.GetRequest()) } func (m *prepare) UIBytes() []byte { return m.pbMsg.Ui } func (m *prepare) SetUIBytes(uiBytes []byte) { m.pbMsg.Ui = uiBytes } func (prepare) ImplementsReplicaMessage() {} func (prepare) ImplementsPeerMessage() {} func (prepare) ImplementsPrepare() {} func pbPrepareFromAPI(m messages.Prepare) *pb.Prepare { if m, ok := m.(*prepare); ok
return pb.PrepareFromAPI(m) }
{ return m.pbMsg }
data_loaders.py
import os import glob import math import hydra import cv2 import numpy as np from shapely.geometry import Polygon import torch from torch.utils.data import Dataset, DataLoader import imgaug.augmenters as iaa import pyclipper import db_transforms from utils import dict_to_device, minmax_scaler_img class BaseDatasetIter(Dataset): def __init__(self, train_dir, train_gt_dir, ignore_tags, is_training=True, image_size=640, min_text_size=8, shrink_ratio=0.4, thresh_min=0.3, thresh_max=0.7, augment=None, mean=[103.939, 116.779, 123.68], debug=False): self.train_dir = train_dir self.train_gt_dir = train_gt_dir self.ignore_tags = ignore_tags self.is_training = is_training self.image_size = image_size self.min_text_size = min_text_size self.shrink_ratio = shrink_ratio self.thresh_min = thresh_min self.thresh_max = thresh_max self.augment = augment if self.augment is None: self.augment = self._get_default_augment() self.mean = mean self.debug = debug # load metadata self.image_paths, self.gt_paths = self.load_metadata( train_dir, train_gt_dir) # load annotation self.all_anns = self.load_all_anns(self.gt_paths) assert len(self.image_paths) == len(self.all_anns) def _get_default_augment(self): augment_seq = iaa.Sequential([ iaa.Fliplr(0.5), iaa.Affine(rotate=(-10, 10)), iaa.Resize((0.5, 3.0)) ]) return augment_seq def __len__(self): return len(self.image_paths) def __getitem__(self, index): image_path = self.image_paths[index] anns = self.all_anns[index] if self.debug: print(image_path) print(len(anns)) img = cv2.imread(image_path)[:, :, ::-1] if self.is_training and self.augment is not None: augment_seq = self.augment.to_deterministic() img, anns = db_transforms.transform(augment_seq, img, anns) img, anns = db_transforms.crop(img, anns) img, anns = db_transforms.resize(self.image_size, img, anns) anns = [ann for ann in anns if Polygon(ann['poly']).buffer(0).is_valid] gt = np.zeros((self.image_size, self.image_size), dtype=np.float32) # batch_gts mask = np.ones((self.image_size, self.image_size), dtype=np.float32) thresh_map = np.zeros((self.image_size, self.image_size), dtype=np.float32) # batch_thresh_maps # batch_thresh_masks thresh_mask = np.zeros((self.image_size, self.image_size), dtype=np.float32) if self.debug: print(type(anns), len(anns)) ignore_tags = [] for ann in anns: # i.e shape = (4, 2) / (6, 2) / ... poly = np.array(ann['poly']) height = max(poly[:, 1]) - min(poly[:, 1]) width = max(poly[:, 0]) - min(poly[:, 0]) polygon = Polygon(poly) # generate gt and mask if polygon.area < 1 or \ min(height, width) < self.min_text_size or \ ann['text'] in self.ignore_tags: ignore_tags.append(True) cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0) continue else: # 6th equation distance = polygon.area * \ (1 - np.power(self.shrink_ratio, 2)) / polygon.length subject = [tuple(_l) for _l in ann['poly']] padding = pyclipper.PyclipperOffset() padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) shrinked = padding.Execute(-distance) if len(shrinked) == 0: ignore_tags.append(True) cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0) continue else: shrinked = np.array(shrinked[0]).reshape(-1, 2) if shrinked.shape[0] > 2 and \ Polygon(shrinked).buffer(0).is_valid: ignore_tags.append(False) cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1) else: ignore_tags.append(True) cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0) continue # generate thresh map and thresh mask db_transforms.draw_thresh_map(ann['poly'], thresh_map, thresh_mask, shrink_ratio=self.shrink_ratio) thresh_map = thresh_map * \ (self.thresh_max - self.thresh_min) + self.thresh_min img = img.astype(np.float32) img[..., 0] -= self.mean[0] img[..., 1] -= self.mean[1] img[..., 2] -= self.mean[2]
img = np.transpose(img, (2, 0, 1)) data_return = { "image_path": image_path, "img": img, "prob_map": gt, "supervision_mask": mask, "thresh_map": thresh_map, "text_area_map": thresh_mask, } # for batch_size = 1 if not self.is_training: data_return["anns"] = [ann['poly'] for ann in anns] data_return["ignore_tags"] = ignore_tags # return image_path, img, gt, mask, thresh_map, thresh_mask return data_return class TotalTextDatasetIter(BaseDatasetIter): def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs): super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs) def load_metadata(self, img_dir, gt_dir): img_fps = sorted(glob.glob(os.path.join(img_dir, "*"))) gt_fps = [] for img_fp in img_fps: img_id = img_fp.split("/")[-1].replace("img", "").split(".")[0] gt_fn = "gt_img{}.txt".format(img_id) gt_fp = os.path.join(gt_dir, gt_fn) assert os.path.exists(img_fp) gt_fps.append(gt_fp) assert len(img_fps) == len(gt_fps) return img_fps, gt_fps def load_all_anns(self, gt_paths): res = [] for gt in gt_paths: lines = [] reader = open(gt, 'r').readlines() for line in reader: item = {} parts = line.strip().split(',') label = parts[-1] line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts] num_points = math.floor((len(line) - 1) / 2) * 2 poly = np.array(list(map(float, line[:num_points]))).reshape( (-1, 2)).tolist() if len(poly) < 3: continue item['poly'] = poly item['text'] = label lines.append(item) res.append(lines) return res class CTW1500DatasetIter(BaseDatasetIter): def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs): super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs) def load_metadata(self, img_dir, gt_dir): img_fps = sorted(glob.glob(os.path.join(img_dir, "*"))) gt_fps = [] for img_fp in img_fps: img_id = img_fp.split("/")[-1][:-4] gt_fn = "{}.txt".format(img_id) gt_fp = os.path.join(gt_dir, gt_fn) assert os.path.exists(img_fp) gt_fps.append(gt_fp) assert len(img_fps) == len(gt_fps) return img_fps, gt_fps def load_all_anns(self, gt_fps): """ Reference: https://github.com/whai362/PSENet/blob/master/dataset/ctw1500_loader.py """ res = [] for gt_fp in gt_fps: lines = [] with open(gt_fp, 'r') as f: for line in f: item = {} gt = line.strip().strip('\ufeff').strip('\xef\xbb\xbf') gt = list(map(int, gt.split(','))) x1 = np.int(gt[0]) y1 = np.int(gt[1]) bbox = [np.int(gt[i]) for i in range(4, 32)] bbox = np.asarray(bbox) + ([x1, y1] * 14) bbox = bbox.reshape(-1, 2).tolist() item['poly'] = bbox item['text'] = 'True' lines.append(item) res.append(lines) return res class ICDAR2015DatasetIter(BaseDatasetIter): def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs): super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs) def load_metadata(self, img_dir, gt_dir): img_fps = glob.glob(os.path.join(img_dir, "*")) gt_fps = [] for img_fp in img_fps: img_id = img_fp.split("/")[-1].split(".")[0] gt_fn = "gt_{}.txt".format(img_id) gt_fp = os.path.join(gt_dir, gt_fn) assert os.path.exists(img_fp) gt_fps.append(gt_fp) assert len(img_fps) == len(gt_fps) return img_fps, gt_fps def load_all_anns(self, gt_fps): res = [] for gt_fp in gt_fps: lines = [] with open(gt_fp, 'r') as f: for line in f: item = {} gt = line.strip().strip('\ufeff').strip( '\xef\xbb\xbf').split(",") label = ",".join(gt[8:]) poly = list(map(int, gt[:8])) poly = np.asarray(poly).reshape(-1, 2).tolist() item['poly'] = poly item['text'] = label lines.append(item) res.append(lines) return res class MSRATD500DatasetIter(BaseDatasetIter): def __init__(self, train_dir, train_gt_dir, ignore_tags, **kwargs): super().__init__(train_dir, train_gt_dir, ignore_tags, **kwargs) def transform_four_points(self, points, center_point, theta): """Reference: https://stackoverflow.com/questions/622140 """ theta = -theta new_coords = [] x_center, y_center = center_point for point in points: x, y = point x_new = x_center + (x - x_center) * np.cos(theta) + \ (y - y_center) * np.sin(theta) y_new = y_center - (x - x_center) * np.sin(theta) + \ (y - y_center) * np.cos(theta) x_new = int(x_new) y_new = int(y_new) new_coords.append((x_new, y_new)) return new_coords def load_metadata(self, img_dir, gt_dir=None): # ignore gt_dir img_fps = sorted(glob.glob(os.path.join(img_dir, "*.JPG"))) gt_fps = sorted(glob.glob(os.path.join(img_dir, "*.gt"))) assert len(img_fps) == len(gt_fps) return img_fps, gt_fps def load_all_anns(self, gt_fps): res = [] for gt_fp in gt_fps: lines = [] with open(gt_fp, 'r') as f: for line in f: item = {} line = list(map(float, line.strip().split())) index, dif, x_min, y_min, w, h, theta = line if int(dif) == 1: # difficult label continue c1 = (x_min, y_min) c2 = (x_min + w, y_min) c3 = (x_min + w, y_min + h) c4 = (x_min, y_min + h) center = (x_min + w / 2, y_min + h / 2) rot_box = self.transform_four_points([c1, c2, c3, c4], center, theta) rot_box = np.array(rot_box).tolist() item['poly'] = rot_box item['text'] = 'True' lines.append(item) res.append(lines) return res @hydra.main(config_path="../config.yaml", strict=False) def run(cfg): dataset_name = cfg.dataset.name ignore_tags = cfg.data[dataset_name].ignore_tags train_dir = cfg.data[dataset_name].train_dir train_gt_dir = cfg.data[dataset_name].train_gt_dir if dataset_name == 'totaltext': TextDatasetIter = TotalTextDatasetIter elif dataset_name == 'ctw1500': TextDatasetIter = CTW1500DatasetIter elif dataset_name == 'icdar2015': TextDatasetIter = ICDAR2015DatasetIter elif dataset_name == 'msra_td500': TextDatasetIter = MSRATD500DatasetIter else: raise NotImplementedError("Pls provide valid dataset name!") train_iter = TextDatasetIter(train_dir, train_gt_dir, ignore_tags, is_training=True, debug=False) train_loader = DataLoader(dataset=train_iter, batch_size=1, shuffle=True, num_workers=1) samples = next(iter(train_loader)) samples = dict_to_device(samples, device='cpu') for k, v in samples.items(): if isinstance(v, torch.Tensor): print(samples[k].device) import matplotlib.pyplot as plt plt.figure() plt.imshow(minmax_scaler_img(samples['img'][0].numpy().transpose(1, 2, 0))) plt.imshow(samples['prob_map'][0], cmap='jet', alpha=0.35) plt.imshow(samples['thresh_map'][0], cmap='jet', alpha=0.5) # plt.imshow(samples['text_area_map'][0], cmap='jet', alpha=0.5) # plt.imshow(samples['supervision_mask'][0], cmap='jet', alpha=0.5) plt.savefig(os.path.join(cfg.meta.root_dir, 'tmp/foo.jpg'), bbox_inches='tight') if __name__ == '__main__': run()
pcr15.rs
#[doc = "Reader of register PCR15"] pub type R = crate::R<u32, super::PCR15>; #[doc = "Writer for register PCR15"] pub type W = crate::W<u32, super::PCR15>; #[doc = "Register PCR15 `reset()`'s with value 0"] impl crate::ResetValue for super::PCR15 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Pull Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PS_A { #[doc = "0: Internal pulldown resistor is enabled on the corresponding pin, if the corresponding Port Pull Enable field is set."] _0 = 0, #[doc = "1: Internal pullup resistor is enabled on the corresponding pin, if the corresponding Port Pull Enable field is set."] _1 = 1, } impl From<PS_A> for bool { #[inline(always)] fn from(variant: PS_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PS`"] pub type PS_R = crate::R<bool, PS_A>; impl PS_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PS_A { match self.bits { false => PS_A::_0, true => PS_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == PS_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == PS_A::_1 } } #[doc = "Pull Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PE_A { #[doc = "0: Internal pullup or pulldown resistor is not enabled on the corresponding pin."] _0 = 0, #[doc = "1: Internal pullup or pulldown resistor is enabled on the corresponding pin, if the pin is configured as a digital input."] _1 = 1, } impl From<PE_A> for bool { #[inline(always)] fn from(variant: PE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PE`"] pub type PE_R = crate::R<bool, PE_A>; impl PE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PE_A { match self.bits { false => PE_A::_0, true => PE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == PE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == PE_A::_1 } } #[doc = "Slew Rate Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SRE_A { #[doc = "0: Fast slew rate is configured on the corresponding pin, if the pin is configured as a digital output."] _0 = 0, #[doc = "1: Slow slew rate is configured on the corresponding pin, if the pin is configured as a digital output."] _1 = 1, } impl From<SRE_A> for bool { #[inline(always)] fn from(variant: SRE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `SRE`"] pub type SRE_R = crate::R<bool, SRE_A>; impl SRE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SRE_A { match self.bits { false => SRE_A::_0, true => SRE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == SRE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == SRE_A::_1 } } #[doc = "Passive Filter Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PFE_A { #[doc = "0: Passive input filter is disabled on the corresponding pin."] _0 = 0, #[doc = "1: Passive input filter is enabled on the corresponding pin, if the pin is configured as a digital input. Refer to the device data sheet for filter characteristics."] _1 = 1, } impl From<PFE_A> for bool { #[inline(always)] fn from(variant: PFE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PFE`"] pub type PFE_R = crate::R<bool, PFE_A>; impl PFE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PFE_A { match self.bits { false => PFE_A::_0, true => PFE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == PFE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == PFE_A::_1 } } #[doc = "Open Drain Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ODE_A { #[doc = "0: Open drain output is disabled on the corresponding pin."] _0 = 0, #[doc = "1: Open drain output is enabled on the corresponding pin, if the pin is configured as a digital output."] _1 = 1, } impl From<ODE_A> for bool { #[inline(always)] fn from(variant: ODE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `ODE`"] pub type ODE_R = crate::R<bool, ODE_A>; impl ODE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ODE_A { match self.bits { false => ODE_A::_0, true => ODE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == ODE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == ODE_A::_1 } } #[doc = "Drive Strength Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DSE_A { #[doc = "0: Low drive strength is configured on the corresponding pin, if pin is configured as a digital output."] _0 = 0, #[doc = "1: High drive strength is configured on the corresponding pin, if pin is configured as a digital output."] _1 = 1, } impl From<DSE_A> for bool { #[inline(always)] fn from(variant: DSE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `DSE`"] pub type DSE_R = crate::R<bool, DSE_A>; impl DSE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DSE_A { match self.bits { false => DSE_A::_0, true => DSE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == DSE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == DSE_A::_1 } } #[doc = "Pin Mux Control\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum MUX_A { #[doc = "0: Pin disabled (analog)."] _000 = 0, #[doc = "1: Alternative 1 (GPIO)."] _001 = 1, #[doc = "2: Alternative 2 (chip-specific)."] _010 = 2, #[doc = "3: Alternative 3 (chip-specific)."] _011 = 3, #[doc = "4: Alternative 4 (chip-specific)."] _100 = 4, #[doc = "5: Alternative 5 (chip-specific)."] _101 = 5, #[doc = "6: Alternative 6 (chip-specific)."] _110 = 6, #[doc = "7: Alternative 7 (chip-specific)."] _111 = 7, } impl From<MUX_A> for u8 { #[inline(always)] fn from(variant: MUX_A) -> Self { variant as _ } } #[doc = "Reader of field `MUX`"] pub type MUX_R = crate::R<u8, MUX_A>; impl MUX_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> MUX_A { match self.bits { 0 => MUX_A::_000, 1 => MUX_A::_001, 2 => MUX_A::_010, 3 => MUX_A::_011, 4 => MUX_A::_100, 5 => MUX_A::_101, 6 => MUX_A::_110, 7 => MUX_A::_111, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `_000`"] #[inline(always)] pub fn is_000(&self) -> bool { *self == MUX_A::_000 } #[doc = "Checks if the value of the field is `_001`"] #[inline(always)] pub fn is_001(&self) -> bool { *self == MUX_A::_001 } #[doc = "Checks if the value of the field is `_010`"] #[inline(always)] pub fn is_010(&self) -> bool { *self == MUX_A::_010 } #[doc = "Checks if the value of the field is `_011`"] #[inline(always)] pub fn is_011(&self) -> bool { *self == MUX_A::_011 } #[doc = "Checks if the value of the field is `_100`"] #[inline(always)] pub fn is_100(&self) -> bool { *self == MUX_A::_100 } #[doc = "Checks if the value of the field is `_101`"] #[inline(always)] pub fn is_101(&self) -> bool { *self == MUX_A::_101 } #[doc = "Checks if the value of the field is `_110`"] #[inline(always)] pub fn is_110(&self) -> bool { *self == MUX_A::_110 } #[doc = "Checks if the value of the field is `_111`"] #[inline(always)] pub fn is_111(&self) -> bool { *self == MUX_A::_111 } } #[doc = "Write proxy for field `MUX`"] pub struct MUX_W<'a> { w: &'a mut W, } impl<'a> MUX_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: MUX_A) -> &'a mut W { { self.bits(variant.into()) } } #[doc = "Pin disabled (analog)."] #[inline(always)] pub fn _000(self) -> &'a mut W { self.variant(MUX_A::_000) } #[doc = "Alternative 1 (GPIO)."] #[inline(always)] pub fn _001(self) -> &'a mut W { self.variant(MUX_A::_001) } #[doc = "Alternative 2 (chip-specific)."] #[inline(always)] pub fn _010(self) -> &'a mut W { self.variant(MUX_A::_010) } #[doc = "Alternative 3 (chip-specific)."] #[inline(always)] pub fn _011(self) -> &'a mut W { self.variant(MUX_A::_011) } #[doc = "Alternative 4 (chip-specific)."] #[inline(always)] pub fn _100(self) -> &'a mut W { self.variant(MUX_A::_100) } #[doc = "Alternative 5 (chip-specific)."] #[inline(always)] pub fn _101(self) -> &'a mut W { self.variant(MUX_A::_101) } #[doc = "Alternative 6 (chip-specific)."] #[inline(always)] pub fn _110(self) -> &'a mut W { self.variant(MUX_A::_110) } #[doc = "Alternative 7 (chip-specific)."] #[inline(always)] pub fn _111(self) -> &'a mut W { self.variant(MUX_A::_111) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x07 << 8)) | (((value as u32) & 0x07) << 8); self.w } } #[doc = "Lock Register\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum LK_A { #[doc = "0: Pin Control Register fields \\[15:0\\] are not locked."] _0 = 0, #[doc = "1: Pin Control Register fields \\[15:0\\] are locked and cannot be updated until the next system reset."] _1 = 1, } impl From<LK_A> for bool { #[inline(always)] fn from(variant: LK_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `LK`"] pub type LK_R = crate::R<bool, LK_A>; impl LK_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> LK_A { match self.bits { false => LK_A::_0, true => LK_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == LK_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == LK_A::_1 } } #[doc = "Write proxy for field `LK`"] pub struct LK_W<'a> { w: &'a mut W, } impl<'a> LK_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: LK_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Pin Control Register fields \\[15:0\\] are not locked."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(LK_A::_0) } #[doc = "Pin Control Register fields \\[15:0\\] are locked and cannot be updated until the next system reset."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(LK_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15); self.w } } #[doc = "Interrupt Configuration\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum IRQC_A { #[doc = "0: Interrupt/DMA request disabled."] _0000 = 0, #[doc = "1: DMA request on rising edge."] _0001 = 1, #[doc = "2: DMA request on falling edge."] _0010 = 2, #[doc = "3: DMA request on either edge."] _0011 = 3, #[doc = "8: Interrupt when logic zero."] _1000 = 8, #[doc = "9: Interrupt on rising edge."] _1001 = 9, #[doc = "10: Interrupt on falling edge."] _1010 = 10, #[doc = "11: Interrupt on either edge."] _1011 = 11, #[doc = "12: Interrupt when logic one."] _1100 = 12, } impl From<IRQC_A> for u8 { #[inline(always)] fn from(variant: IRQC_A) -> Self { variant as _ } } #[doc = "Reader of field `IRQC`"] pub type IRQC_R = crate::R<u8, IRQC_A>; impl IRQC_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> crate::Variant<u8, IRQC_A> { use crate::Variant::*; match self.bits { 0 => Val(IRQC_A::_0000), 1 => Val(IRQC_A::_0001), 2 => Val(IRQC_A::_0010), 3 => Val(IRQC_A::_0011), 8 => Val(IRQC_A::_1000), 9 => Val(IRQC_A::_1001), 10 => Val(IRQC_A::_1010), 11 => Val(IRQC_A::_1011), 12 => Val(IRQC_A::_1100), i => Res(i), } } #[doc = "Checks if the value of the field is `_0000`"] #[inline(always)] pub fn is_0000(&self) -> bool { *self == IRQC_A::_0000 } #[doc = "Checks if the value of the field is `_0001`"] #[inline(always)] pub fn is_0001(&self) -> bool { *self == IRQC_A::_0001 } #[doc = "Checks if the value of the field is `_0010`"] #[inline(always)] pub fn is_0010(&self) -> bool { *self == IRQC_A::_0010 } #[doc = "Checks if the value of the field is `_0011`"] #[inline(always)] pub fn is_0011(&self) -> bool { *self == IRQC_A::_0011 } #[doc = "Checks if the value of the field is `_1000`"] #[inline(always)] pub fn is_1000(&self) -> bool { *self == IRQC_A::_1000 } #[doc = "Checks if the value of the field is `_1001`"] #[inline(always)] pub fn is_1001(&self) -> bool { *self == IRQC_A::_1001 } #[doc = "Checks if the value of the field is `_1010`"] #[inline(always)] pub fn is_1010(&self) -> bool { *self == IRQC_A::_1010 } #[doc = "Checks if the value of the field is `_1011`"] #[inline(always)] pub fn is_1011(&self) -> bool { *self == IRQC_A::_1011 } #[doc = "Checks if the value of the field is `_1100`"] #[inline(always)] pub fn is_1100(&self) -> bool { *self == IRQC_A::_1100 } } #[doc = "Write proxy for field `IRQC`"] pub struct IRQC_W<'a> { w: &'a mut W, } impl<'a> IRQC_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: IRQC_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Interrupt/DMA request disabled."] #[inline(always)] pub fn _0000(self) -> &'a mut W { self.variant(IRQC_A::_0000) } #[doc = "DMA request on rising edge."] #[inline(always)] pub fn _0001(self) -> &'a mut W { self.variant(IRQC_A::_0001) } #[doc = "DMA request on falling edge."] #[inline(always)] pub fn _0010(self) -> &'a mut W { self.variant(IRQC_A::_0010) } #[doc = "DMA request on either edge."] #[inline(always)] pub fn _0011(self) -> &'a mut W { self.variant(IRQC_A::_0011) } #[doc = "Interrupt when logic zero."] #[inline(always)] pub fn _1000(self) -> &'a mut W { self.variant(IRQC_A::_1000) } #[doc = "Interrupt on rising edge."] #[inline(always)] pub fn _1001(self) -> &'a mut W { self.variant(IRQC_A::_1001) } #[doc = "Interrupt on falling edge."] #[inline(always)] pub fn _1010(self) -> &'a mut W { self.variant(IRQC_A::_1010) } #[doc = "Interrupt on either edge."] #[inline(always)] pub fn _1011(self) -> &'a mut W { self.variant(IRQC_A::_1011) } #[doc = "Interrupt when logic one."] #[inline(always)] pub fn _1100(self) -> &'a mut W { self.variant(IRQC_A::_1100) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 16)) | (((value as u32) & 0x0f) << 16); self.w } } #[doc = "Interrupt Status Flag\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ISF_A { #[doc = "0: Configured interrupt is not detected."] _0 = 0, #[doc = "1: Configured interrupt is detected. If the pin is configured to generate a DMA request, then the corresponding flag will be cleared automatically at the completion of the requested DMA transfer. Otherwise, the flag remains set until a logic one is written to the flag. If the pin is configured for a level sensitive interrupt and the pin remains asserted, then the flag is set again immediately after it is cleared."] _1 = 1, } impl From<ISF_A> for bool { #[inline(always)] fn from(variant: ISF_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `ISF`"] pub type ISF_R = crate::R<bool, ISF_A>; impl ISF_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ISF_A { match self.bits { false => ISF_A::_0, true => ISF_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == ISF_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == ISF_A::_1 } } #[doc = "Write proxy for field `ISF`"] pub struct ISF_W<'a> { w: &'a mut W, } impl<'a> ISF_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ISF_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Configured interrupt is not detected."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(ISF_A::_0) } #[doc = "Configured interrupt is detected. If the pin is configured to generate a DMA request, then the corresponding flag will be cleared automatically at the completion of the requested DMA transfer. Otherwise, the flag remains set until a logic one is written to the flag. If the pin is configured for a level sensitive interrupt and the pin remains asserted, then the flag is set again immediately after it is cleared."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(ISF_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24); self.w } } impl R { #[doc = "Bit 0 - Pull Select"] #[inline(always)] pub fn ps(&self) -> PS_R { PS_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Pull Enable"] #[inline(always)] pub fn pe(&self) -> PE_R { PE_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Slew Rate Enable"] #[inline(always)] pub fn sre(&self) -> SRE_R { SRE_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 4 - Passive Filter Enable"] #[inline(always)] pub fn pfe(&self) -> PFE_R { PFE_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Open Drain Enable"] #[inline(always)] pub fn ode(&self) -> ODE_R { ODE_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Drive Strength Enable"] #[inline(always)] pub fn dse(&self) -> DSE_R { DSE_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bits 8:10 - Pin Mux Control"] #[inline(always)] pub fn mux(&self) -> MUX_R
#[doc = "Bit 15 - Lock Register"] #[inline(always)] pub fn lk(&self) -> LK_R { LK_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bits 16:19 - Interrupt Configuration"] #[inline(always)] pub fn irqc(&self) -> IRQC_R { IRQC_R::new(((self.bits >> 16) & 0x0f) as u8) } #[doc = "Bit 24 - Interrupt Status Flag"] #[inline(always)] pub fn isf(&self) -> ISF_R { ISF_R::new(((self.bits >> 24) & 0x01) != 0) } } impl W { #[doc = "Bits 8:10 - Pin Mux Control"] #[inline(always)] pub fn mux(&mut self) -> MUX_W { MUX_W { w: self } } #[doc = "Bit 15 - Lock Register"] #[inline(always)] pub fn lk(&mut self) -> LK_W { LK_W { w: self } } #[doc = "Bits 16:19 - Interrupt Configuration"] #[inline(always)] pub fn irqc(&mut self) -> IRQC_W { IRQC_W { w: self } } #[doc = "Bit 24 - Interrupt Status Flag"] #[inline(always)] pub fn isf(&mut self) -> ISF_W { ISF_W { w: self } } }
{ MUX_R::new(((self.bits >> 8) & 0x07) as u8) }
test.py
from multiprocessing import Process,Queue import os import time q = Queue() def _write(q): print('Process(%s) is writing...' % os.getpid()) while 1: time.sleep(2) url = 100 q.put(url) print('Put %s to queue...' % url) if __name__ == "__main__": p = Process(target=_write,args=(q,))
p.join() https://www.cnblogs.com/itogo/p/5635629.html
p.start()
protogen.go
// Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ygen import ( "bytes" "fmt" "hash/fnv" "path/filepath" "regexp" "sort" "strings" "github.com/openconfig/goyang/pkg/yang" "github.com/openconfig/ygot/genutil" "github.com/openconfig/ygot/util" ) // Constants defining the defaults for Protobuf package generation. These constants // can be referred to by calling applications as defaults that are presented to // a user. const ( // DefaultBasePackageName defines the default base package that is // generated when generating proto3 code. DefaultBasePackageName = "openconfig" // DefaultEnumPackageName defines the default package name that is // used for the package that defines enumerated types that are // used throughout the schema. DefaultEnumPackageName = "enums" // DefaultYwrapperPath defines the default import path for the ywrapper.proto file, // excluding the filename. DefaultYwrapperPath = "github.com/openconfig/ygot/proto/ywrapper" // DefaultYextPath defines the default import path for the yext.proto file, excluding // the filename. DefaultYextPath = "github.com/openconfig/ygot/proto/yext" ) const ( // protoEnumZeroName is the name given to the value 0 in each generated protobuf enum. protoEnumZeroName string = "UNSET" // protoAnyType is the name of the type to use for a google.protobuf.Any field. protoAnyType = "google.protobuf.Any" // protoAnyPackage is the name of the import to be used when a google.protobuf.Any field // is included in the output data. protoAnyPackage = "google/protobuf/any.proto" // protoListKeyMessageSuffix specifies the suffix that should be added to a list's name // to specify the repeated message that makes up the list's key. The repeated message is // called <ListNameInCamelCase><protoListKeyMessageSuffix>. protoListKeyMessageSuffix = "Key" // protoSchemaAnnotationOption specifies the name of the FieldOption used to annotate // schemapaths into a protobuf message. protoSchemaAnnotationOption = "(yext.schemapath)" // protoMatchingListNameKeySuffix defines the suffix that should be added to a list // key's name in the case that it matches the name of the list itself. This is required // since in the case that we have YANG whereby there is a list that has a key // with the same name as the list, i.e.,: // // list foo { // key "foo"; // leaf foo { type string; } // } // // Then we need to ensure that we do not generate a message that has the // same field name used twice, i.e.: // // message FooParent { // message Foo { // ywrapper.StringValue foo = NN; // } // message FooKey { // string foo = 1; // Foo foo = 2; // } // repeated FooKey foo = NN; // } // // which may otherwise occur. In these cases, rather than rely on // genutil.MakeNameUnique which would append "_" to the name of the key we explicitly // append _ plus the string defined in protoMatchingListNameKeySuffix to the list name. protoMatchingListNameKeySuffix = "key" ) // protoMsgField describes a field of a protobuf message. // Note, throughout this package private structs that have public fields are used // in text/template which cannot refer to unexported fields. type protoMsgField struct { Tag uint32 // Tag is the field number that should be used in the protobuf message. Name string // Name is the field's name. Type string // Type is the protobuf type for the field. IsRepeated bool // IsRepeated indicates whether the field is repeated. Options []*protoOption // Extensions is the set of field extensions that should be specified for the field. IsOneOf bool // IsOneOf indicates that the field is a oneof and hence consists of multiple subfields. OneOfFields []*protoMsgField // OneOfFields contains the set of fields within the oneof } // protoOption describes a protobuf (message or field) option. type protoOption struct { // Name is the protobuf option's name. Name string // Value is the protobuf option's value. Value string } // protoMsg describes a protobuf message. type protoMsg struct { Name string // Name is the name of the protobuf message to be output. YANGPath string // YANGPath stores the path that the message corresponds to within the YANG schema. Fields []*protoMsgField // Fields is a slice of the fields that are within the message. Imports []string // Imports is a slice of strings that contains the relative import paths that are required by this message. Enums map[string]*protoMsgEnum // Enums lists the embedded enumerations within the message. ChildMsgs []*generatedProto3Message // ChildMsgs is the set of messages that should be embedded within the message. PathComment bool // PathComment - when set - indicates that comments that specify the path to a message should be included in the output protobuf. } // protoMsgEnum represents an embedded enumeration within a protobuf message. type protoMsgEnum struct { Values map[int64]protoEnumValue // Values that the enumerated type can take. } // protoEnumValue describes a value within a Protobuf enumeration. type protoEnumValue struct { ProtoLabel string // ProtoLabel is the label that should be used for the value in the protobuf. YANGLabel string // YANGLabel is the label that was originally specified in the YANG schema. } // protoEnum represents an enumeration that is defined at the root of a protobuf // package. type protoEnum struct { Name string // Name is the enumeration's name within the protobuf package. Description string // Description is a string description of the enumerated type within the YANG schema, used in comments. Values map[int64]protoEnumValue // Values contains the string names, keyed by enum value, that the enumerated type can take. ValuePrefix string // ValuePrefix contains the string prefix that should be prepended to each value within the enumerated type. } // proto3Header describes the header of a Protobuf3 package. type proto3Header struct { PackageName string // PackageName is the name of the package that is to be output. Imports []string // Imports is the set of packages that should be imported by the package whose header is being output. SourceYANGFiles []string // SourceYANGFiles specifies the list of the input YANG files that the protobuf is being generated based on. SourceYANGIncludePaths []string // SourceYANGIncludePaths specifies the list of the paths that were used to search for YANG imports. CompressPaths bool // CompressPaths indicates whether path compression was enabled or disabled for this generated protobuf. CallerName string // CallerName indicates the name of the entity initiating code generation. YwrapperPath string // YwrapperPath is the path to the ywrapper.proto file, excluding the filename. YextPath string // YextPath is the path to the yext.proto file, excluding the filename. } var disallowedInProtoIDRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]`) var ( // protoHeaderTemplate is populated and output at the top of the protobuf code output. protoHeaderTemplate = mustMakeTemplate("header", ` {{- /**/ -}} // {{ .PackageName }} is generated by {{ .CallerName }} as a protobuf // representation of a YANG schema. // // Input schema modules: {{- range $inputFile := .SourceYANGFiles }} // - {{ $inputFile }} {{- end }} {{- if .SourceYANGIncludePaths }} // Include paths: {{- range $importPath := .SourceYANGIncludePaths }} // - {{ $importPath }} {{- end -}} {{- end }} syntax = "proto3"; package {{ .PackageName }}; import "{{ .YwrapperPath }}/ywrapper.proto"; import "{{ .YextPath }}/yext.proto"; {{- range $importedProto := .Imports }} import "{{ $importedProto }}"; {{- end }} `) // protoMessageTemplate is populated for each entity that is mapped to a message // within the output protobuf. protoMessageTemplate = mustMakeTemplate("msg", ` {{ if .PathComment -}} // {{ .Name }} represents the {{ .YANGPath }} YANG schema element. {{ end -}} message {{ .Name }} { {{- range $idx, $msg := .ChildMsgs -}} {{- indentLines $msg.MessageCode -}} {{- end -}} {{- range $ename, $enum := .Enums }} enum {{ $ename }} { {{- range $i, $val := $enum.Values }} {{ toUpper $ename }}_{{ $val.ProtoLabel }} = {{ $i }} {{- if ne $val.YANGLabel "" }} [(yext.yang_name) = "{{ $val.YANGLabel }}"]{{ end -}} ; {{- end }} } {{- end -}} {{- range $idx, $field := .Fields }} {{ if $field.IsOneOf -}} oneof {{ $field.Name }} { {{- range $ooField := .OneOfFields }} {{ $ooField.Type }} {{ $ooField.Name }} = {{ $ooField.Tag }}; {{- end }} } {{- else -}} {{ if $field.IsRepeated }}repeated {{ end -}} {{ $field.Type }} {{ $field.Name }} = {{ $field.Tag }} {{- $noOptions := len .Options -}} {{- if ne $noOptions 0 }} [ {{- range $i, $opt := $field.Options -}} {{- $opt.Name }} = {{ $opt.Value -}} {{- if ne (inc $i) $noOptions -}}, {{- end }} {{- end -}} ] {{- end -}} ; {{- end -}} {{- end }} }`) // protoEnumTemplate is the template used to generate enumerations that are // not within a message. Such enums are used where there are referenced YANG // identity nodes, and where there are typedefs which include an enumeration. protoEnumTemplate = mustMakeTemplate("enum", ` // {{ .Name }} represents an enumerated type generated for the {{ .Description }}. enum {{ .Name }} { {{- range $i, $val := .Values }} {{ toUpper $.ValuePrefix }}_{{ $val.ProtoLabel }} = {{ $i }} {{- if ne $val.YANGLabel "" }} [(yext.yang_name) = "{{ $val.YANGLabel }}"]{{ end -}} ; {{- end }} } `) ) // writeProto3Header outputs the header for a proto3 generated file. It takes // an input proto3Header struct specifying the input arguments describing the // generated package, and returns a string containing the generated package's // header. func writeProto3Header(in proto3Header) (string, error) { if in.CallerName == "" { in.CallerName = genutil.CallerName() } // Sort the list of imports such that they are output in alphabetical // order, minimising diffs. sort.Strings(in.Imports) var b bytes.Buffer if err := protoHeaderTemplate.Execute(&b, in); err != nil { return "", err } return b.String(), nil } // generatedProto3Message contains the code for a proto3 message. type generatedProto3Message struct { PackageName string // PackageName is the name of the package that the proto3 message is within. MessageCode string // MessageCode contains the proto3 definition of the message. RequiredImports []string // RequiredImports contains the imports that are required by the generated message. } // protoMsgConfig defines the set of configuration options required to generate a Protobuf message. type protoMsgConfig struct { compressPaths bool // compressPaths indicates whether path compression should be enabled. basePackageName string // basePackageName specifies the package name that is the base for all child packages. enumPackageName string // enumPackageName specifies the package in which global enum definitions are specified. baseImportPath string // baseImportPath specifies the path that should be used for importing the generated files. annotateSchemaPaths bool // annotateSchemaPaths uses the yext protobuf field extensions to annotate the paths from the schema into the output protobuf. annotateEnumNames bool // annotateEnumNames uses the yext protobuf enum value extensions to annoate the original YANG name for an enum into the output protobuf. nestedMessages bool // nestedMessages indicates whether nested messages should be output for the protobuf schema. } // writeProto3Message outputs the generated Protobuf3 code for a particular protobuf message. It takes: // - msg: The Directory struct that describes a particular protobuf3 message. // - msgs: The set of other Directory structs, keyed by schema path, that represent the other proto3 // messages to be generated. // - protogen: The current generator state. // - cfg: The configuration for the message creation as defined in a protoMsgConfig struct. // It returns a generatedProto3Message pointer which includes the definition of the proto3 message, particularly the // name of the package it is within, the code for the message, and any imports for packages that are referenced by // the message. func writeProto3Msg(msg *Directory, msgs map[string]*Directory, protogen *protoGenState, cfg *protoMsgConfig, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*generatedProto3Message, util.Errors) { if cfg.nestedMessages { if !outputNestedMessage(msg, cfg.compressPaths) { return nil, nil } return writeProto3MsgNested(msg, msgs, protogen, cfg, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) } return writeProto3MsgSingleMsg(msg, msgs, protogen, cfg, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) } // outputNestedMessage determines whether the message represented by the supplied // Directory is a message that should be output when nested messages are being // created. The compressPaths argument specifies whether path compression is enabled. // Valid messages are those that are direct children of a module, or become a direct // child when path compression is enabled (i.e., lists that have their parent // surrounding container removed). func outputNestedMessage(msg *Directory, compressPaths bool) bool { // If path compression is enabled, and this entry is a list, then its top-level // parent will have been removed, therefore this is a valid message. The path // is 4 elements long since it is of the form // []string{"", module-name, surrounding-container, list-name}. if compressPaths && msg.Entry.IsList() && len(msg.Path) == 4 { return true } return msg.isChildOfModule() } // writeProto3MsgNested returns a nested set of protobuf messages for the message // supplied, which is expected to be a top-level message that code generation is // being performed for. It takes: // - msg: the top-level directory definition // - msgs: the set of message definitions (keyed by path) that are to be output // - protogen: the current code generation state. // - cfg: the configuration for the current code generation. // It returns a generated protobuf3 message. func writeProto3MsgNested(msg *Directory, msgs map[string]*Directory, protogen *protoGenState, cfg *protoMsgConfig, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*generatedProto3Message, util.Errors) { var gerrs util.Errors var childMsgs []*generatedProto3Message // Find all the children of the current message that should be output. for _, n := range msgs { if util.IsDirectEntryChild(msg.Entry, n.Entry, cfg.compressPaths) { cmsg, errs := writeProto3MsgNested(n, msgs, protogen, cfg, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if errs != nil { gerrs = append(gerrs, errs...) continue } childMsgs = append(childMsgs, cmsg) } } pkg, err := protobufPackageForMsg(msg, protogen, cfg.compressPaths, cfg.nestedMessages) if err != nil { return nil, append(gerrs, err) } // Generate this message, and its associated messages. msgDefs, errs := genProto3Msg(msg, msgs, protogen, cfg, pkg, childMsgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if errs != nil { return nil, append(gerrs, errs...) } gmsg, errs := genProto3MsgCode(pkg, msgDefs, false) if errs != nil { return nil, append(gerrs, errs...) } if gerrs != nil { return nil, gerrs } // Inherit the set of imports that are required for this child. We // skip any that are relative imports as these are only needed for // the case that we have different files per hierarchy level and // are not nesting messages. var imports []string if msg.IsFakeRoot { imports = gmsg.RequiredImports } else { allImports := map[string]bool{} for _, ch := range childMsgs { for _, i := range ch.RequiredImports { allImports[i] = true } } for _, i := range gmsg.RequiredImports { allImports[i] = true } epk := filepath.Join(cfg.baseImportPath, cfg.basePackageName, cfg.enumPackageName, fmt.Sprintf("%s.proto", cfg.enumPackageName)) for i := range allImports { if !strings.HasPrefix(i, cfg.baseImportPath) { imports = append(imports, i) } if allImports[epk] { imports = append(imports, epk) } } } gmsg.RequiredImports = imports return gmsg, nil } // protobufPackageForMsg takes a YANG directory definition, the current generator // state, whether path compression is currently enabled, and whether nested messages // are to be output and determines the package name for the output protobuf. In the // case that nested messages are being output, the package name is derived based // on the top-level module that the message is within. func protobufPackageForMsg(msg *Directory, protogen *protoGenState, compressPaths, nestedMessages bool) (string, error) { switch { case msg.IsFakeRoot: // In this case, we explicitly leave the package name as nil, which is interpeted // as meaning that the base package is used throughout the handling code. return "", nil case msg.Entry.Parent == nil: return "", fmt.Errorf("YANG schema element %s does not have a parent, protobuf messages are not generated for modules", msg.Entry.Path()) } e := msg.Entry // If we have nested messages enabled, the protobuf package name is defined // based on the top-level message within the schema tree that is created - // we therefore need to derive the name of this message. if nestedMessages { if compressPaths { if e.Parent.Parent == nil { // In the special case that the grandparent of this entry is nil, and // compress paths is enabled, then we are a top-level schema element - so // this message should be in the root package. return "", nil } if e.IsList() && e.Parent.Parent.Parent == nil { // If this is a list, and our great-grandparent is a module, then // since the level above this node has been compressed out, then it // is at the root. return "", nil } } if e.Parent != nil && e.Parent.Parent != nil { var n *yang.Entry for n = e.Parent; n.Parent.Parent != nil; n = n.Parent { } e = n } } return protogen.protobufPackage(e, compressPaths), nil } // writeProto3MsgSingleMsg generates a protobuf message definition. It takes the // arguments of writeProto3Message, outputting an individual message that outputs // a package definition and a single protobuf message. func writeProto3MsgSingleMsg(msg *Directory, msgs map[string]*Directory, protogen *protoGenState, cfg *protoMsgConfig, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*generatedProto3Message, util.Errors) { pkg, err := protobufPackageForMsg(msg, protogen, cfg.compressPaths, cfg.nestedMessages) if err != nil { return nil, []error{err} } msgDefs, errs := genProto3Msg(msg, msgs, protogen, cfg, pkg, nil, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if errs != nil { return nil, errs } return genProto3MsgCode(pkg, msgDefs, true) } // genProto3MsgCode takes an input package name, and set of protobuf message // definitions, and outputs the generated code for the messages. If the // pathComment argument is setFunc, each message is output with a comment // indicating its path in the YANG schema, otherwise it is included. func genProto3MsgCode(pkg string, msgDefs []*protoMsg, pathComment bool) (*generatedProto3Message, util.Errors) { var b bytes.Buffer var errs util.Errors imports := map[string]interface{}{} for i, msgDef := range msgDefs { // Sort the child messages into a determinstic order. We cannot use the // package name as a key as it may be the same for multiple packages, therefore // use the code. cmsgs := map[string]*generatedProto3Message{} var cstrs []string for _, m := range msgDef.ChildMsgs { if m == nil { errs = append(errs, fmt.Errorf("received nil message in %s", pkg)) continue } cmsgs[m.MessageCode] = m cstrs = append(cstrs, m.MessageCode) } sort.Strings(cstrs) var nm []*generatedProto3Message for _, c := range cstrs { nm = append(nm, cmsgs[c]) } msgDef.ChildMsgs = nm msgDef.PathComment = pathComment if err := protoMessageTemplate.Execute(&b, msgDef); err != nil { return nil, []error{err} } addNewKeys(imports, msgDef.Imports) if i != len(msgDefs)-1 { b.WriteRune('\n') } } if errs != nil { return nil, errs } return &generatedProto3Message{ PackageName: pkg, MessageCode: b.String(), RequiredImports: stringKeys(imports), }, nil } // genProto3Msg takes an input Directory which describes a container or list entry // within the YANG schema and returns a protoMsg which can be mapped to the protobuf // code representing it. It uses the set of messages that have been extracted and the // current generator state to map to other messages and ensure uniqueness of names. // The configuration parameters for the current code generation required are supplied // as a protoMsgConfig struct. The parentPkg argument specifies the name of the parent // package for the protobuf message(s) that are being generated, such that relative // paths can be used in the messages. func modifyFieldTag(fields []*protoMsgField, tag uint32) uint32 { for _, f := range fields { f.Tag = tag + 1 tag++ if f.IsOneOf { tag = modifyFieldTag(f.OneOfFields, tag) } } return tag } func genProto3Msg(msg *Directory, msgs map[string]*Directory, protogen *protoGenState, cfg *protoMsgConfig, parentPkg string, childMsgs []*generatedProto3Message, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) ([]*protoMsg, util.Errors) { var errs util.Errors var msgDefs []*protoMsg msgDef := &protoMsg{ // msg.name is already specified to be CamelCase in the form we expect it // to be for the protobuf message name. Name: msg.Name, YANGPath: util.SlicePathToString(msg.Path), Enums: map[string]*protoMsgEnum{}, ChildMsgs: childMsgs, } definedFieldNames := map[string]bool{} imports := map[string]interface{}{} var fNames []string for name := range msg.Fields { fNames = append(fNames, name) } sort.Strings(fNames) skipFields := map[string]bool{} if util.IsKeyedList(msg.Entry) { skipFields = util.ListKeyFieldsMap(msg.Entry) } var ctags uint32 = 1 for _, name := range fNames { // Skip fields that we are explicitly not asked to include. if _, ok := skipFields[name]; ok { continue } var err error field := msg.Fields[name] fieldDef := &protoMsgField{ Name: genutil.MakeNameUnique(safeProtoIdentifierName(name), definedFieldNames), } fieldDef.Tag = ctags defArgs := &protoDefinitionArgs{ field: field, directory: msg, definedDirectories: msgs, definedFieldNames: definedFieldNames, protogen: protogen, cfg: cfg, parentPkg: parentPkg, } switch { case field.IsList(): keyMsg, listImports, listErrs := addProtoListField(fieldDef, msgDef, defArgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if listErrs != nil { errs = append(errs, listErrs...) continue } addNewKeys(imports, listImports) if keyMsg != nil { msgDefs = append(msgDefs, keyMsg) } case field.IsContainer(): cImports, err := addProtoContainerField(fieldDef, defArgs) if err != nil { errs = append(errs, err) continue } addNewKeys(imports, cImports) case field.IsLeaf() || field.IsLeafList(): repeatedMsg, lImports, lErrs := addProtoLeafOrLeafListField(fieldDef, msgDef, defArgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if lErrs != nil { errs = append(errs, lErrs...) continue } addNewKeys(imports, lImports) if fieldDef.IsOneOf { ctags = modifyFieldTag(fieldDef.OneOfFields, ctags) } if repeatedMsg != nil { msgDefs = append(msgDefs, repeatedMsg) } case util.IsAnydata(field): fieldDef.Type = protoAnyType imports[protoAnyPackage] = true default: err = fmt.Errorf("proto: unknown field type in message %s, field %s", msg.Name, field.Name) } if cfg.annotateSchemaPaths { o, err := protoSchemaPathAnnotation(msg, name, cfg.compressPaths) if err != nil { errs = append(errs, err) continue } fieldDef.Options = append(fieldDef.Options, o) } if err != nil { errs = append(errs, err) continue } msgDef.Fields = append(msgDef.Fields, fieldDef) ctags++ } msgDef.Imports = stringKeys(imports) return append(msgDefs, msgDef), errs } // protoDefinitionArgs is used as the input argument when YANG is being mapped to protobuf. type protoDefinitionArgs struct { field *yang.Entry // field is the yang.Entry for which the proto output is being defined, in the case that the definition is for an individual entry. directory *Directory // directory is the Directory for which the proto output is being defined, in the case that the definition is for an directory entry. definedDirectories map[string]*Directory // definedDirectories specifies the set of Directories that have been defined in the current code generation context. definedFieldNames map[string]bool // definedFieldNames specifies the field names that have been defined in the context. protogen *protoGenState // protogen is the current generator state. cfg *protoMsgConfig parentPkg string // parentPackage stores the name of the protobuf package that the field's parent is within. } // addProtoListField modifies the field definition in fieldDef (which must correspond to a list field of a // YANG schema) to contain the definition of the field described by the args. In the case that the list is keyed // and nested messages are being output, the generated protobuf message for the key is appended to the supplied // message definition (msgDef). If nested messages are not being output, a definition of the key message is returned. // Along with the optional key message, it returns a list of the imports being used for the list. func addProtoListField(fieldDef *protoMsgField, msgDef *protoMsg, args *protoDefinitionArgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*protoMsg, []string, util.Errors) { listDef, keyMsg, err := protoListDefinition(args, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, nil, []error{fmt.Errorf("could not define list %s: %v", args.field.Path(), err)} } var nKeyMsg *protoMsg if keyMsg != nil { if args.cfg.nestedMessages { // If nested messages are being output, we must ensure that the // generated key message is output within the parent message - hence // it is generated directly here and appended to the child messages. kc, cerrs := genProto3MsgCode(args.parentPkg, []*protoMsg{keyMsg}, false) if cerrs != nil { return nil, nil, cerrs } msgDef.ChildMsgs = append(msgDef.ChildMsgs, kc) } else { nKeyMsg = keyMsg } } fieldDef.Type = listDef.listType // Lists are always repeated fields. fieldDef.IsRepeated = true return nKeyMsg, listDef.imports, nil } // addProtoContainerField modifies the field definition in fieldDef (which must correspond to a container field of // a YANG schema) to contain the definition of the field described by the args. It returns a slice of strings containing // the protobuf package imports that are required for the container defintion. func addProtoContainerField(fieldDef *protoMsgField, args *protoDefinitionArgs) ([]string, error) { childmsg, ok := args.definedDirectories[args.field.Path()] if !ok { return nil, fmt.Errorf("proto: could not resolve %s into a defined struct", args.field.Path()) } imports := map[string]interface{}{} var pfx string if !(args.cfg.compressPaths && args.directory.IsFakeRoot) { childpkg := args.protogen.protobufPackage(childmsg.Entry, args.cfg.compressPaths) // Add the import to the slice of imports if it is not already // there. This allows the message file to import the required // child packages. childpath := importPath(args.cfg.baseImportPath, args.cfg.basePackageName, childpkg) if imports[childpath] == nil { if !args.cfg.nestedMessages || args.directory.IsFakeRoot { imports[childpath] = true } } p, _ := stripPackagePrefix(args.parentPkg, childpkg) if !args.cfg.nestedMessages || args.directory.IsFakeRoot { pfx = fmt.Sprintf("%s.", p) } } fieldDef.Type = fmt.Sprintf("%s%s", pfx, childmsg.Name) return stringKeys(imports), nil } // addProtoLeafOrLeafListField modifies the field definition in fieldDef to contain a definition of the field that is // described in the args. If the field corresponds to a leaf-list of unions and hence requires another message to be // generated for it, it is appended to the message definition supplied (msgDef) when nested messages are being output, // otherwise it is returned. In addition, it returns a slice of strings describing the imports that are required for // the message. func addProtoLeafOrLeafListField(fieldDef *protoMsgField, msgDef *protoMsg, args *protoDefinitionArgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*protoMsg, []string, util.Errors)
// writeProtoEnums takes a map of enumerated types within the YANG schema and // returns the mapped Protobuf enum definition corresponding to each type. If // the annotateEnumNames bool is set, then the original enum value label is // stored in the definition. Since leaves that are of type enumeration are // output directly within a Protobuf message, these are skipped. func writeProtoEnums(enums map[string]*yangEnum, annotateEnumNames bool) ([]string, util.Errors) { var errs util.Errors var genEnums []string for _, enum := range enums { if skip, ok := enum.entry.Annotation["skipGlobalProtoGeneration"].(bool); util.IsSimpleEnumerationType(enum.entry.Type) || enum.entry.Type.Kind == yang.Yunion || (ok && skip) { // Skip simple enumerations and those within unions. // Furthermore, under the consistent naming scheme, non-typedef enumerations // within non-typedef unions are not generated in the global // file, only in the messages. continue } // Make the name of the enum upper case to follow Protobuf enum convention. p := &protoEnum{Name: enum.name} switch { case util.IsIdentityrefLeaf(enum.entry): // For an identityref the values are based on // the name of the identities that correspond with the base, and the value // is gleaned from the YANG schema. values := map[int64]protoEnumValue{ 0: {ProtoLabel: protoEnumZeroName}, } // Ensure that we output the identity values in a determinstic order. nameMap := map[string]*yang.Identity{} var names []string for _, v := range enum.entry.Type.IdentityBase.Values { names = append(names, v.Name) nameMap[v.Name] = v } for _, n := range names { v := nameMap[n] // Calculate a tag value for the identity values, since otherwise when another // module augments this module then the enum values may be subject to change. tag, err := fieldTag(fmt.Sprintf("%s%s", enum.entry.Type.IdentityBase.Name, v.Name)) if err != nil { errs = append(errs, fmt.Errorf("cannot calculate tag for %s: %v", v.Name, err)) } values[int64(tag)] = toProtoEnumValue(strings.ToUpper(safeProtoIdentifierName(v.Name)), v.Name, annotateEnumNames) } p.Values = values p.ValuePrefix = strings.ToUpper(enum.name) p.Description = fmt.Sprintf("YANG identity %s", enum.entry.Type.IdentityBase.Name) case enum.entry.Type.Kind == yang.Yenum: ge, err := genProtoEnum(enum.entry, annotateEnumNames) if err != nil { errs = append(errs, err) continue } p.Values = ge.Values // Capitalize name per proto style. p.ValuePrefix = strings.ToUpper(enum.name) // If the supplied enum entry has the valuePrefix annotation then use it to // calculate the enum value names. if e, ok := enum.entry.Annotation["valuePrefix"]; ok { t, ok := e.([]string) if ok { var pp []string for _, pe := range t { pp = append(pp, strings.ToUpper(safeProtoIdentifierName(yang.CamelCase(pe)))) } p.ValuePrefix = strings.Join(pp, "_") } } p.Description = fmt.Sprintf("YANG enumerated type %s", enum.entry.Type.Name) case len(enum.entry.Type.Type) != 0: errs = append(errs, fmt.Errorf("unimplemented: support for multiple enumerations within a union for %v", enum.name)) continue default: errs = append(errs, fmt.Errorf("unknown type of enumerated value in writeProtoEnums for %s, got: %v, type: %v", enum.name, enum, enum.entry.Type)) } var b bytes.Buffer if err := protoEnumTemplate.Execute(&b, p); err != nil { errs = append(errs, fmt.Errorf("cannot generate enumeration for %s: %v", enum.name, err)) continue } genEnums = append(genEnums, b.String()) } if len(errs) != 0 { return nil, errs } return genEnums, nil } // genProtoEnum takes an input yang.Entry that contains an enumerated type // and returns a protoMsgEnum that contains its definition within the proto // schema. If the annotateEnumNames bool is set, then the original YANG name // is stored with each enum value. func genProtoEnum(field *yang.Entry, annotateEnumNames bool) (*protoMsgEnum, error) { eval := map[int64]protoEnumValue{} names := field.Type.Enum.NameMap() eval[0] = protoEnumValue{ProtoLabel: protoEnumZeroName} if d := field.DefaultValue(); d != "" { if _, ok := names[d]; !ok { return nil, fmt.Errorf("enumeration %s specified a default - %s - that was not a valid value", field.Path(), d) } eval[0] = toProtoEnumValue(safeProtoIdentifierName(d), d, annotateEnumNames) } for n := range names { if n == field.DefaultValue() { // Can't happen if there was not a default, since "" is not // a valid enumeration name in YANG. continue } // Names are converted to upper case to follow the protobuf style guide, // adding one to ensure that the 0 value can represent unused values. eval[field.Type.Enum.Value(n)+1] = toProtoEnumValue(safeProtoIdentifierName(n), n, annotateEnumNames) } return &protoMsgEnum{Values: eval}, nil } // protoMsgListField describes a list field within a protobuf mesage. type protoMsgListField struct { listType string // listType is the name of the message that represents a list member. imports []string // imports is the set of modules that are required by this list message. } // protoListDefinition takes an input field described by a yang.Entry, the generator context (the set of proto messages, and the generator // state), along with whether path compression is enabled and generates the proto message definition for the list. It returns the definition // of the field representing the list as a protoMsgListField and an optional message which stores the key of a keyed list. func protoListDefinition(args *protoDefinitionArgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*protoMsgListField, *protoMsg, error) { listMsg, ok := args.definedDirectories[args.field.Path()] if !ok { return nil, nil, fmt.Errorf("proto: could not resolve list %s into a defined message", args.field.Path()) } listMsgName, ok := args.protogen.uniqueDirectoryNames[args.field.Path()] if !ok { return nil, nil, fmt.Errorf("proto: could not find unique message name for %s", args.field.Path()) } childPkg := args.protogen.protobufPackage(listMsg.Entry, args.cfg.compressPaths) var listKeyMsg *protoMsg var listDef *protoMsgListField if !util.IsKeyedList(listMsg.Entry) { // In proto3 we represent unkeyed lists as a // repeated field of the list message. listDef = &protoMsgListField{ listType: listMsgName, } if !args.cfg.nestedMessages { p := fmt.Sprintf("%s.%s.%s", args.cfg.basePackageName, childPkg, listMsgName) p, _ = stripPackagePrefix(fmt.Sprintf("%s.%s", args.cfg.basePackageName, args.parentPkg), p) listDef = &protoMsgListField{ listType: p, } listDef.imports = []string{importPath(args.cfg.baseImportPath, args.cfg.basePackageName, childPkg)} } } else { // YANG lists are mapped to a repeated message structure as described // in the YANG to Protobuf transformation specification. var err error listKeyMsg, err = genListKeyProto(childPkg, listMsgName, &protoDefinitionArgs{ field: args.field, directory: listMsg, protogen: args.protogen, cfg: args.cfg, parentPkg: args.parentPkg, }, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, nil, fmt.Errorf("proto: could not build mapping for list entry %s: %v", args.field.Path(), err) } // The type of this field is just the key message's name, since it // will be in the same package as the field's parent. listDef = &protoMsgListField{ listType: listKeyMsg.Name, } } return listDef, listKeyMsg, nil } // protoDefinedLeaf defines a YANG leaf within a protobuf message. type protoDefinedLeaf struct { protoType string // protoType is the protobuf type that the leaf should be mapped to. globalEnum bool // globalEnum indicates whether the leaf's type is a global scope enumeration (identityref, or typedef defining an enumeration) enums map[string]*protoMsgEnum // enums defines the set of enumerated values that are required for this leaf within the parent message. oneofs []*protoMsgField // oneofs defines the set of types within the leaf, if the returned leaf type is a protobuf oneof. repeatedMsg *protoMsg // repeatedMsgs returns a message that should be repeated for this leaf, used in the case of a leaf-list of unions. } // protoLeafDefinition takes an input leafName, and a set of protoDefinitionArgs specifying the context // for the leaf definition, and returns a protoDefinedLeaf describing how it is to be mapped within the // protobuf parent message. func protoLeafDefinition(leafName string, args *protoDefinitionArgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*protoDefinedLeaf, error) { protoType, err := args.protogen.yangTypeToProtoType(resolveTypeArgs{ yangType: args.field.Type, contextEntry: args.field, }, resolveProtoTypeArgs{ basePackageName: args.cfg.basePackageName, enumPackageName: args.cfg.enumPackageName, }, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, err } d := &protoDefinedLeaf{ protoType: protoType.NativeType, enums: map[string]*protoMsgEnum{}, } switch { case util.IsSimpleEnumerationType(args.field.Type): // For fields that are simple enumerations within a message, then we embed an enumeration // within the Protobuf message. e, err := genProtoEnum(args.field, args.cfg.annotateEnumNames) if err != nil { return nil, err } d.protoType = genutil.MakeNameUnique(protoType.NativeType, args.definedFieldNames) d.enums = map[string]*protoMsgEnum{} d.enums[d.protoType] = e case util.IsEnumeratedType(args.field.Type): d.globalEnum = true case protoType.UnionTypes != nil: u, err := unionFieldToOneOf(leafName, args.field, protoType, args.cfg.annotateEnumNames, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, err } // Append any enumerations that are within the union. for n, e := range u.enums { d.enums[n] = e } d.globalEnum = u.hadGlobalEnums // Append the oneof that was in the union. d.oneofs = append(d.oneofs, u.oneOfFields...) if u.repeatedMsg != nil { d.repeatedMsg = u.repeatedMsg d.protoType = u.repeatedMsg.Name } } return d, nil } // toProtoEnumValue takes an input enum definition - with a protobuf and YANG label, and returns // a protoEnumValue. The YANGLabel is only stored if annotateEnumValues is set. func toProtoEnumValue(protoName, yangName string, annotateEnumValues bool) protoEnumValue { ev := protoEnumValue{ ProtoLabel: protoName, } if annotateEnumValues { ev.YANGLabel = yangName } return ev } // safeProtoIdentifierName takes an input string which represents the name of a YANG schema // element and sanitises for use as a protobuf field name. func safeProtoIdentifierName(name string) string { // YANG identifiers must match the definition: // ;; An identifier MUST NOT start with (('X'|'x') ('M'|'m') ('L'|'l')) // identifier = (ALPHA / "_") // *(ALPHA / DIGIT / "_" / "-" / ".") // For Protobuf they must match: // ident = letter { letter | decimalDigit | "_" } // // Therefore we need to replace all characters in the YANG identifier that are not a // letter, digit, or underscore. return disallowedInProtoIDRegexp.ReplaceAllLiteralString(name, "_") } // protoTagForEntry returns a protobuf tag value for the entry e. func protoTagForEntry(e *yang.Entry) (uint32, error) { return fieldTag(e.Path()) } // fieldTag takes an input string and calculates a FNV hash for the value. If the // hash is in the range 19,000-19,999 or 1-1,000, the input string has _ appended to // it and the hash is calculated. func fieldTag(s string) (uint32, error) { h := fnv.New32() if _, err := h.Write([]byte(s)); err != nil { return 0, fmt.Errorf("could not write field path to hash: %v", err) } v := h.Sum32() & 0x1fffffff // 2^29-1 if (v >= 19000 && v <= 19999) || (v >= 1 && v <= 1000) { return fieldTag(fmt.Sprintf("%s_", s)) } return v, nil } // genListKeyProto generates a protoMsg that describes the proto3 message that represents // the key of a list for YANG lists. It takes a Directory pointer to the list being // described, the name of the list, the package name that the list is within, and the // current generator state. It returns the definition of the list key proto. func genListKeyProto(listPackage string, listName string, args *protoDefinitionArgs, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*protoMsg, error) { n := fmt.Sprintf("%s%s", listName, protoListKeyMessageSuffix) km := &protoMsg{ Name: n, YANGPath: args.field.Path(), Enums: map[string]*protoMsgEnum{}, } if listPackage != "" { km.Imports = []string{importPath(args.cfg.baseImportPath, args.cfg.basePackageName, listPackage)} } definedFieldNames := map[string]bool{} ctag := uint32(1) for _, k := range strings.Fields(args.field.Key) { kf, ok := args.directory.Fields[k] if !ok { return nil, fmt.Errorf("list %s included a key %s that did not exist", args.field.Path(), k) } scalarType, err := args.protogen.yangTypeToProtoScalarType(resolveTypeArgs{ yangType: kf.Type, contextEntry: kf, }, resolveProtoTypeArgs{ basePackageName: args.cfg.basePackageName, enumPackageName: args.cfg.enumPackageName, // When there is a union within a list key that has a single type within it // e.g.,: // list foo { // key "bar"; // leaf bar { // type union { // type string { pattern "a.*" } // type string { pattern "b.*" } // } // } // } // Then we want to use the scalar type rather than the wrapper type in // this message since all keys must be set. We therefore signal this in // the call to the type resolution. scalarTypeInSingleTypeUnion: true, }, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, fmt.Errorf("list %s included a key %s that did not have a valid proto type: %v", args.field.Path(), k, kf.Type) } var enumEntry *yang.Entry var unionEntry *yang.Entry switch { case kf.Type.Kind == yang.Yleafref: target, err := args.protogen.schematree.resolveLeafrefTarget(kf.Type.Path, kf) if err != nil { return nil, fmt.Errorf("error generating type for list %s key %s: type %v", args.field.Path(), k, kf.Type) } if util.IsSimpleEnumerationType(target.Type) { enumEntry = target } if scalarType.UnionTypes != nil { unionEntry = target } if util.IsIdentityrefLeaf(target) { km.Imports = append(km.Imports, importPath(args.cfg.baseImportPath, args.cfg.basePackageName, args.cfg.enumPackageName)) } case util.IsSimpleEnumerationType(kf.Type): enumEntry = kf case util.IsUnionType(kf.Type) && scalarType.UnionTypes != nil: unionEntry = kf } // Make the name of the key unique. We handle the case that the list name // matches the key field name by appending the protoMatchingListNameKeySuffix // to the field name, as described in the definition of protoMatchingListNameKeySuffix. fName := genutil.MakeNameUnique(safeProtoIdentifierName(k), definedFieldNames) if args.field.Name == k { fName = fmt.Sprintf("%s_%s", fName, protoMatchingListNameKeySuffix) } fd := &protoMsgField{ Name: fName, Tag: ctag, } switch { case enumEntry != nil: enum, err := genProtoEnum(enumEntry, args.cfg.annotateEnumNames) if err != nil { return nil, fmt.Errorf("error generating type for list %s key %s, type %v", args.field.Path(), k, enumEntry.Type) } tn := genutil.MakeNameUnique(scalarType.NativeType, definedFieldNames) fd.Type = tn km.Enums[tn] = enum case unionEntry != nil: fd.IsOneOf = true u, err := unionFieldToOneOf(fd.Name, unionEntry, scalarType, args.cfg.annotateEnumNames, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, fmt.Errorf("error generating type for union list key %s in list %s", k, args.field.Path()) } if u.repeatedMsg != nil { for _, f := range u.repeatedMsg.Fields { f.Tag = ctag + 1 ctag++ } } else { for _, f := range u.oneOfFields { f.Tag = ctag + 1 ctag++ } } fd.OneOfFields = append(fd.OneOfFields, u.oneOfFields...) for n, e := range u.enums { km.Enums[n] = e } if u.hadGlobalEnums { km.Imports = append(km.Imports, importPath(args.cfg.baseImportPath, args.cfg.basePackageName, args.cfg.enumPackageName)) } default: fd.Type = scalarType.NativeType } if args.cfg.annotateSchemaPaths { o, err := protoSchemaPathAnnotation(args.directory, k, args.cfg.compressPaths) if err != nil { return nil, err } fd.Options = append(fd.Options, o) } km.Fields = append(km.Fields, fd) ctag++ } // When using nested messages since the protobuf resolution rules mean that // the parent scope is searched, then we do not need to qualify the name of // the list message, even though it is in the parent's namespace. ltype := listName if !args.cfg.nestedMessages { p, _ := stripPackagePrefix(args.parentPkg, listPackage) ltype = fmt.Sprintf("%s.%s", p, listName) if listPackage == "" { // Handle the case that the context of the list is already the base package. ltype = listName } } km.Fields = append(km.Fields, &protoMsgField{ Name: safeProtoIdentifierName(args.field.Name), Type: ltype, Tag: ctag, }) return km, nil } // enumInProtoUnionField parses an enum that is within a union and returns the generated // enumeration that should be included within a protobuf message for it. If annotateEnumNames // is set to true, the enumerated value's original names are stored. func enumInProtoUnionField(name string, args resolveTypeArgs, annotateEnumNames, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (map[string]*protoMsgEnum, error) { enums := map[string]*protoMsgEnum{} for _, t := range args.yangType.Type { if util.IsSimpleEnumerationType(t) { definingType, err := util.DefiningType(t, args.contextEntry.Type) if err != nil { return nil, err } if useDefiningModuleForTypedefEnumNames && useConsistentNamesForProtoUnionEnums && !util.IsYANGBaseType(definingType) { // If the enumeration is within a typedef // union, then we don't generate a nested // version, and instead use the global version. continue } mappedType, err := yangEnumTypeToProtoType(resolveTypeArgs{yangType: t, contextEntry: args.contextEntry}, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, err } enum, err := genProtoEnum(&yang.Entry{ Name: mappedType.NativeType, Type: t, }, annotateEnumNames) if err != nil { return nil, err } enums[mappedType.NativeType] = enum } if util.IsUnionType(t) { es, err := enumInProtoUnionField(name, resolveTypeArgs{yangType: t, contextEntry: args.contextEntry}, annotateEnumNames, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, err } for name, enum := range es { enums[name] = enum } } } return enums, nil } // protoUnionField stores information relating to a oneof field within a protobuf // message. type protoUnionField struct { oneOfFields []*protoMsgField // oneOfFields contains a set of fields that are within a oneof. enums map[string]*protoMsgEnum // enums stores a definition of any simple enumeration types within the YANG union. repeatedMsg *protoMsg // repeatedMsg stores a message that contains fields that should be repeated, and is used to store a YANG leaf-list of union leaves. hadGlobalEnums bool // hadGlobalEnums determines whether there was a global scope enum (typedef, identityref) in the message. } // unionFieldToOneOf takes an input name, a yang.Entry containing a field definition and a MappedType // containing the proto type that the entry has been mapped to, and returns a definition of a union // field within the protobuf message. If the annotateEnumNames boolean is set, then any enumerated types // within the union have their original names within the YANG schema appended. func unionFieldToOneOf(fieldName string, e *yang.Entry, mtype *MappedType, annotateEnumNames, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums bool) (*protoUnionField, error) { enums, err := enumInProtoUnionField(fieldName, resolveTypeArgs{yangType: e.Type, contextEntry: e}, annotateEnumNames, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, err } var typeNames []string for tn := range mtype.UnionTypes { typeNames = append(typeNames, tn) } sort.Strings(typeNames) var importGlobalEnums bool var oofs []*protoMsgField for _, t := range typeNames { // Split the type name on "." to ensure that we don't have oneof options // that reference some other package in the type name. If there was a "." // in the field name, then this means that we had a global enumeration // present and hence should import this path. tp := strings.Split(t, ".") if len(tp) > 1 { importGlobalEnums = true } tn := tp[len(tp)-1] // Calculate the tag by having the path, with the type name appended to it // such that we have unique inputs for each option. We make the name lower-case // as it is conventional that protobuf field names are lowercase separated by // underscores. //ft, err := fieldTag(fmt.Sprintf("%s_%s", e.Path(), strings.ToLower(tn))) //if err != nil { // return nil, fmt.Errorf("could not calculate tag number for %s, type %s in oneof", e.Path(), tn) //} st := &protoMsgField{ Name: fmt.Sprintf("%s_%s", fieldName, strings.ToLower(tn)), Type: t, Tag: 0, } oofs = append(oofs, st) } if e.IsLeafList() { // In this case, we cannot return a oneof, since it is not possible to have a repeated // oneof, therefore we return a message that contains the protoMsgFields that are defined // above. p := &protoMsg{ Name: fmt.Sprintf("%sUnion", yang.CamelCase(fieldName)), YANGPath: fmt.Sprintf("%s union field %s", e.Path(), e.Name), Fields: oofs, } return &protoUnionField{ enums: enums, repeatedMsg: p, hadGlobalEnums: importGlobalEnums, }, nil } return &protoUnionField{ oneOfFields: oofs, enums: enums, hadGlobalEnums: importGlobalEnums, }, nil } // protoPackageToFilePath takes an input string containing a period separated protobuf package // name in the form parent.child and returns a path to the file that it should be written to // assuming a hierarchical directory structure is used. If the package supplied is // openconfig.interfaces.interface, it is returned as []string{"openconfig", "interfaces", // "interface.proto"} such that filepath.Join can create the relevant file system path // for the input package. func protoPackageToFilePath(pkg string) []string { pp := strings.Split(pkg, ".") return append(pp, fmt.Sprintf("%s.proto", pp[len(pp)-1])) } // protoSchemaPathAnnotation takes a protobuf message and field, and returns the protobuf // field option definitions required to annotate it with its schema path(s). func protoSchemaPathAnnotation(msg *Directory, fieldName string, compressPaths bool) (*protoOption, error) { // protobuf paths are always absolute. smapp, err := findMapPaths(msg, fieldName, compressPaths, false, true) if err != nil { return nil, err } var b bytes.Buffer b.WriteRune('"') for i, p := range smapp { b.WriteString(util.SlicePathToString(p)) if i != len(smapp)-1 { b.WriteString("|") } } b.WriteRune('"') return &protoOption{Name: protoSchemaAnnotationOption, Value: b.String()}, nil } // stripPackagePrefix removes the prefix of pfx from the path supplied. If pfx // is not a prefix of path the entire path is returned. If the prefix was // stripped, the returned bool is set. func stripPackagePrefix(pfx, path string) (string, bool) { pfxP := strings.Split(pfx, ".") pathP := strings.Split(path, ".") var i int for i = range pfxP { if pfxP[i] != pathP[i] { return path, false } } return strings.Join(pathP[i+1:], "."), true } // importPath returns a string indicating the import path for a particular // child package - considering the base import path, and base package name // for the generated set of protobuf messages. func importPath(baseImportPath, basePkgName, childPkg string) string { return filepath.Join(append([]string{baseImportPath}, protoPackageToFilePath(fmt.Sprintf("%s.%s", basePkgName, childPkg))...)...) }
{ var imports []string var repeatedMsg *protoMsg d, err := protoLeafDefinition(fieldDef.Name, args, useDefiningModuleForTypedefEnumNames, useConsistentNamesForProtoUnionEnums) if err != nil { return nil, nil, []error{fmt.Errorf("could not define field %s: %v", args.field.Path(), err)} } fieldDef.Type = d.protoType // For any enumerations that were within the field definition, glean them into the // message definition. for n, e := range d.enums { msgDef.Enums[n] = e } // For any oneof that is within the field definition, glean them into the message // definitions. if d.oneofs != nil { fieldDef.OneOfFields = append(fieldDef.OneOfFields, d.oneofs...) fieldDef.IsOneOf = true } if d.repeatedMsg != nil { if args.cfg.nestedMessages { gm, errs := genProto3MsgCode(args.parentPkg, []*protoMsg{d.repeatedMsg}, false) if err != nil { return nil, nil, errs } msgDef.ChildMsgs = append(msgDef.ChildMsgs, gm) } else { repeatedMsg = d.repeatedMsg } } // Add the global enumeration package if it is referenced by this field. if d.globalEnum { imports = append(imports, importPath(args.cfg.baseImportPath, args.cfg.basePackageName, args.cfg.enumPackageName)) } if args.field.ListAttr != nil { fieldDef.IsRepeated = true } return repeatedMsg, imports, nil }
publisher.go
package libratopublisher import ( "fmt" "os" "github.com/tnorris/canary/pkg/libratoaggregator" "github.com/tnorris/canary/pkg/sampler" "github.com/tnorris/canary/pkg/sensor" ) // Publisher implements the canary.Publisher interface and // is our means of ingesting canary.Measurements and converting // them to Librato metrics. type Publisher struct { aggregator *libratoaggregator.Aggregator } // New takes a user, token and source and return a pointer // to a Publisher. func New(user, token, source string) (p *Publisher) { p = &Publisher{ aggregator: libratoaggregator.New(user, token, source), } return } // NewFromEnv is a convenience func that wraps New, // and populates the required arguments via environment variables. // If required variables cannot be found, errors are returned. func
() (*Publisher, error) { user := os.Getenv("LIBRATO_USER") if user == "" { return nil, fmt.Errorf("LIBRATO_USER not set in ENV") } token := os.Getenv("LIBRATO_TOKEN") if token == "" { return nil, fmt.Errorf("LIBRATO_TOKEN not set in ENV") } var err error source := os.Getenv("SOURCE") if source == "" { source, err = os.Hostname() if err != nil { return nil, err } } return New(user, token, source), nil } // Publish takes a canary.Measurement and delivers it to the aggregator. func (p *Publisher) Publish(m sensor.Measurement) (err error) { // convert our measurement into a map of metrics // send the map on to the librato aggregator p.aggregator.C <- mapMeasurement(m) return } // mapMeasurments takes a canary.Measurement and returns a map with all of the appropriate metrics func mapMeasurement(m sensor.Measurement) map[string]float64 { metrics := make(map[string]float64) // latency latency := m.Sample.TimeEnd.Sub(m.Sample.TimeStart).Seconds() * 1000 metrics["canary."+m.Target.Name+".latency"] = latency if m.Error != nil { // increment a general error metric metrics["canary."+m.Target.Name+".errors"] = 1 // increment a specific error metric switch m.Error.(type) { case sampler.StatusCodeError: metrics["canary."+m.Target.Name+".errors.http"] = 1 default: metrics["canary."+m.Target.Name+".errors.sampler"] = 1 } } return metrics }
NewFromEnv
approx_attention.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: skip-file import tensorflow.compat.v1 as tf import math from combiner.tf import attention from combiner.tf import ops import functools def shift_right(x, axis): """Shift input x to the right along given axis.""" pad_widths = [(0, 0)] * len(x.shape) pad_widths[axis] = (1, 0) padded = tf.pad(x, pad_widths) return tf.slice(padded, begin=[0]*len(x.shape), size=x.shape) def shift_left(x, axis): """Shift input x to the left along given axis.""" pad_widths = [(0, 0)] * len(x.shape) pad_widths[axis] = (0, 1) padded = tf.pad(x, pad_widths) begin = [0]*len(x.shape) begin[axis] = 1 return tf.slice(padded, begin=begin, size=x.shape) def approx_cummax(x, axis, exclusive=False, reverse=False): """Approximate the cummax operation in jax.""" sum_x = tf.math.cumsum(x, axis, exclusive=exclusive, reverse=reverse) # return tf.math.cumsum(tf.nn.relu(x), axis, reverse=reverse) return sum_x def get_causal_mask(x, axis, is_strict, upper=False): """Get attention mask bias (keep a lower triangle). Args: x: input tensor axis: across which dim to make mask is_strict: if True, the diagonal will be masked out as well. upper: upper or lower triangle Returns: mask: tensor of {0, -1e9} ^ (x.shape[axis], x.shape[axis]) """ seq_len = tf.shape(x)[axis] if is_strict: if upper: mask = tf.linalg.band_part( tf.ones([seq_len, seq_len], dtype=x.dtype), num_lower=-1, num_upper=0) else: mask = tf.linalg.band_part( tf.ones([seq_len, seq_len], dtype=x.dtype), num_lower=0, num_upper=-1) else: if upper: mask = 1.0 - tf.linalg.band_part( tf.ones([seq_len, seq_len], dtype=x.dtype), num_lower=0, num_upper=-1) else: mask = 1.0 - tf.linalg.band_part( tf.ones([seq_len, seq_len], dtype=x.dtype), num_lower=-1, num_upper=0) mask = -1e9 * mask return mask def pooling_summary(x, axis, local_summary, keepdims=False): """Perform a cheap pooling summary of a span. Args: x: input tensor axis: over which axis to summarize local_summary: str of format activation-pooling, choose from {relu, identity}-{max, sum, mean} keepdims: whether to keep the summarized singleton axis. Returns: y: the same shape as x for other axis, except y.shape[axis] = 1 if keepdims=True, otherwise y.rank = x.rank + 1 """ act, pool = local_summary.split('-') if act == 'relu': x = tf.nn.relu(x) elif act == 'identity': pass elif act == 'deepset': x = ops.trail_dense(x, x.shape.as_list()[-1], bias=False) x = tf.nn.relu(x) else: raise ValueError('Unsupported activation: %s' % act) if pool == 'mean': x = tf.math.reduce_mean(x, axis=axis, keepdims=keepdims) elif pool == 'max': x = tf.math.reduce_max(x, axis=axis, keepdims=keepdims) elif pool == 'sum': x = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims) else: raise ValueError('Unsupported pooling: %s' % pool) return x def axial_mixture_unidir(x, config, is_training=True, causal=True): """Full attention matrix with axial pattern as local and mixture for global summary.""" del is_training assert causal bsize = x.shape[0] query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size, num_heads=config.num_heads, bias=config.dense_use_bias) head_dim = config.model_size // config.num_heads assert config.max_seq_len % config.max_seg_len == 0 num_seg = config.max_seq_len // config.max_seg_len cur_query = tf.reshape(query, [bsize, num_seg, config.max_seg_len, config.num_heads, head_dim]) cur_key = tf.reshape(key, cur_query.shape) cur_val = tf.reshape(value, cur_query.shape) col_logit_expr = 'BSUNK,BTUNK->BUNST' col_attn_expr = 'BUNST,BTUNK->BSUNK' col_strict_mask = get_causal_mask(cur_query, axis=1, is_strict=True)[tf.newaxis, tf.newaxis, tf.newaxis, :, :] row_logit_expr = 'BUSNK,BUTNK->BUNST' row_attn_expr = 'BUNST,BUTNK->BUSNK' row_mask = get_causal_mask(cur_query, axis=2, is_strict=False)[tf.newaxis, tf.newaxis, tf.newaxis, :, :] col_logits = tf.einsum(col_logit_expr, cur_query, cur_key) + col_strict_mask row_logits = tf.einsum(row_logit_expr, cur_query, cur_key) + row_mask ################### col_up2down_query = approx_cummax(cur_query, axis=1) col_up2down_key = shift_right(approx_cummax(cur_key, axis=1), axis=1) col_mask = get_causal_mask( cur_query, axis=1, is_strict=False)[tf.newaxis, tf.newaxis, tf.newaxis, :, :] col_up2down_logits = tf.einsum(col_logit_expr, col_up2down_query, cur_key) + col_mask col_up2down_attn_weights = attention.float32_softmax( col_up2down_logits, axis=-1) col_up2down_summary = tf.einsum(col_attn_expr, col_up2down_attn_weights, cur_val) col_up2down_summary = shift_right(col_up2down_summary, axis=1) row_only_myself_mask = tf.eye(tf.shape(cur_query)[2], dtype=cur_query.dtype)[tf.newaxis, tf.newaxis, tf.newaxis, :, :] row_without_myself_mask = -1e9 * row_only_myself_mask all_maskout = tf.cast(tf.fill(row_without_myself_mask.shape, -1e9), cur_query.dtype) row_without_myself_mask = tf.concat([all_maskout] + [row_without_myself_mask] * (cur_query.shape[1] - 1), axis=1) previous_row_logits = tf.einsum(row_logit_expr, cur_query, col_up2down_key) + row_without_myself_mask ################### row_left2right_query = approx_cummax(cur_query, axis=2) row_left2right_key = shift_right(approx_cummax(cur_key, axis=2), axis=2) row_left2right_logits = tf.einsum(row_logit_expr, row_left2right_query, cur_key) + row_mask row_left2right_attn_weights = attention.float32_softmax( row_left2right_logits, axis=-1) row_left2right_summary = tf.einsum(row_attn_expr, row_left2right_attn_weights, cur_val) row_left2right_summary = shift_right(row_left2right_summary, axis=2) all_maskout = tf.cast(tf.fill(col_strict_mask.shape, -1e9), cur_query.dtype) col_strict_without_first_mask = tf.concat( [all_maskout] + [col_strict_mask] * (cur_query.shape[2] - 1), axis=1) top_left_col_logits = tf.einsum( col_logit_expr, cur_query, row_left2right_key) + col_strict_without_first_mask ################### row_right2left_query = approx_cummax(cur_query, axis=2, reverse=True) row_right2left_key = shift_left( approx_cummax(cur_key, axis=2, reverse=True), axis=2) row_upper_mask = get_causal_mask( cur_query, axis=2, is_strict=False, upper=True)[tf.newaxis, tf.newaxis, tf.newaxis, :, :] row_right2left_logits = tf.einsum(row_logit_expr, row_right2left_query, cur_key) + row_upper_mask row_right2left_attn_weights = attention.float32_softmax( row_right2left_logits, axis=-1) row_right2left_summary = tf.einsum(row_attn_expr, row_right2left_attn_weights, cur_val) row_right2left_summary = shift_left(row_right2left_summary, axis=2) col_strict_without_last_mask = tf.concat( [col_strict_mask] * (cur_query.shape[2] - 1) + [all_maskout], axis=1) top_right_col_logits = tf.einsum( col_logit_expr, cur_query, row_right2left_key) + col_strict_without_last_mask ################### joint_logits = tf.concat([ tf.transpose(col_logits, perm=[0, 3, 2, 1, 4]), row_logits, previous_row_logits, tf.transpose(top_left_col_logits, perm=[0, 3, 2, 1, 4]), tf.transpose(top_right_col_logits, perm=[0, 3, 2, 1, 4]) ], axis=-1) attn_weights = attention.float32_softmax(joint_logits, axis=-1) col_att, row_att, previous_row_att, top_left_col_att, top_right_col_att = tf.split(attn_weights, [num_seg, config.max_seg_len, config.max_seg_len, num_seg, num_seg], axis=-1) col_att = tf.transpose(col_att, [0, 3, 2, 1, 4]) top_left_col_att = tf.transpose(top_left_col_att, [0, 3, 2, 1, 4]) top_right_col_att = tf.transpose(top_right_col_att, [0, 3, 2, 1, 4]) col_merged = tf.einsum(col_attn_expr, col_att, cur_val) row_merged = tf.einsum(row_attn_expr, row_att, cur_val) previous_row_merged = tf.einsum(row_attn_expr, previous_row_att, col_up2down_summary) top_left_merged = tf.einsum(col_attn_expr, top_left_col_att, row_left2right_summary) top_right_merged = tf.einsum(col_attn_expr, top_right_col_att, row_right2left_summary) joint_merged = tf.reshape( col_merged + row_merged + previous_row_merged + top_left_merged + top_right_merged, [bsize, num_seg * config.max_seg_len, config.num_heads, head_dim]) output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2) return output def sqrt_fixed_full(x, config, is_training=True, causal=True): """Full attention matrix with sqrt decomposition.""" bsize = x.shape[0] query, key, value = attention.get_qkv(x, x, x, hidden_size=config.model_size, num_heads=config.num_heads, bias=config.dense_use_bias) head_dim = config.model_size // config.num_heads assert config.max_seq_len % config.max_seg_len == 0 num_seg = config.max_seq_len // config.max_seg_len cur_query = tf.reshape(query, [-1, num_seg, config.max_seg_len, config.num_heads, head_dim]) with tf.variable_scope('pooling_query'): merged_query = pooling_summary(cur_query, axis=2, local_summary=config.local_summary, keepdims=True) cur_key = tf.reshape(key, cur_query.shape) cur_val = tf.reshape(value, cur_query.shape) span_val = attention.dot_product_attention(merged_query, cur_key, cur_val, is_training=is_training, attn_axis=1, dropatt=config.dropatt) span_val = tf.squeeze(span_val, axis=2) with tf.variable_scope('pooling_key'): span_key = pooling_summary(cur_key, axis=2, local_summary=config.local_summary, keepdims=False) local_logits = tf.einsum('bsqhd,bskhd->bsqhk', cur_query, cur_key) if causal: local_mask = get_causal_mask(cur_query, axis=2, is_strict=False) local_mask = tf.expand_dims(local_mask, axis=-2) local_logits += local_mask prev_logits = tf.einsum('bqhd,bkhd->bqhk', query, span_key) if causal: prev_mask = get_causal_mask(cur_query, axis=1, is_strict=True) prev_mask = tf.repeat(prev_mask, [config.max_seg_len] * num_seg, axis=0) prev_logits += tf.expand_dims(prev_mask, axis=1) joint_logits = tf.concat([tf.reshape(local_logits, [bsize, config.max_seq_len, config.num_heads, -1]), prev_logits], axis=-1) attn_weights = attention.float32_softmax(joint_logits, axis=-1) local_att, prev_att = tf.split(attn_weights, [config.max_seg_len, num_seg], axis=-1) if is_training: local_att = tf.nn.dropout(local_att, rate=config.dropatt) local_att = tf.reshape(local_att, [bsize, num_seg, config.max_seg_len, config.num_heads, config.max_seg_len]) local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, cur_val) prev_merged = tf.einsum('bqhk,bkhd->bqhd', prev_att, span_val) joint_merged = prev_merged + tf.reshape(local_merged, prev_merged.shape) output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2) return output def axial_rowmajor(x, config, is_training=True, causal=True): """Full attention matrix with sqrt decomposition."""
num_seg = seq_len // config.max_seg_len x_sqr = tf.reshape(x, [bsize, num_seg, config.max_seg_len, config.model_size]) q_row_local, key_row_local, value_row_local = attention.get_qkv( x_sqr, x_sqr, x_sqr, hidden_size=config.model_size, num_heads=config.num_heads, bias=config.dense_use_bias) local_logits = tf.einsum('bsqhd,bskhd->bsqhk', q_row_local, key_row_local) row_probs = attention.float32_softmax(local_logits, axis=-1) if is_training: row_probs = tf.nn.dropout(row_probs, rate=config.dropatt) row_attn_out = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, value_row_local) if config.row_summary == 'none': key_row = key_row_local elif config.row_summary in ['wsum', 'proj', 'wsum_proj']: if 'wsum' in config.row_summary: pre_summary = tf.einsum('bsqhk,bskhd->bsqhd', row_probs, key_row_local) else: pre_summary = row_attn_out if 'proj' in config.row_summary: with tf.variable_scope('rowmajor_param_post'): key_row = ops.trail_dense(pre_summary, config.model_size, begin_axis=-2, bias=config.dense_use_bias) key_row = ops.postprocess(x_sqr, key_row, config, is_training) _, key_row = ops.preprocess(key_row, config) key_row = ops.trail_dense(key_row, [config.num_heads, head_dim], bias=config.dense_use_bias) else: key_row = pre_summary else: raise ValueError('Unknown row summary %s' % config.row_summary) if causal: local_mask = get_causal_mask(q_row_local, axis=2, is_strict=False) local_logits += local_mask[:, tf.newaxis, :] global_logits = tf.einsum('bqlhd,bklhd->bqlhk', q_row_local, key_row) if causal: global_mask = get_causal_mask(q_row_local, axis=1, is_strict=True) global_logits += global_mask[:, tf.newaxis, tf.newaxis, :] # (bsize, num_seg, seg_len, n_head, seg_len + num_seg) joint_logits = tf.concat([local_logits, global_logits], axis=-1) attn_probs = attention.float32_softmax(joint_logits, axis=-1) local_att, global_att = tf.split(attn_probs, [config.max_seg_len, num_seg], axis=-1) if is_training: local_att = tf.nn.dropout(local_att, rate=config.dropatt) local_merged = tf.einsum('bsqhk,bskhd->bsqhd', local_att, value_row_local) global_merged = tf.einsum('bqlhv,bvlhd->bqlhd', global_att, row_attn_out) joint_merged = tf.reshape(local_merged + global_merged, [bsize, seq_len, config.num_heads, head_dim]) output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2, bias=config.dense_use_bias) return output def axial_mixture_bidir(x, config, is_training=True, causal=False): """Full attention matrix with axial mixture decomposition.""" assert not causal bsize = x.shape[0] seq_len = x.shape.as_list()[1] head_dim = config.model_size // config.num_heads assert seq_len % config.max_seg_len == 0 num_seg = seq_len // config.max_seg_len x_sqr = tf.reshape(x, [bsize, num_seg, config.max_seg_len, config.model_size]) query, key, value = attention.get_qkv( x_sqr, x_sqr, x_sqr, hidden_size=config.model_size, num_heads=config.num_heads, bias=config.dense_use_bias) local_row_logits = tf.einsum('bushd,buthd->bhust', query, key) local_col_logits = tf.einsum('bsuhd,btuhd->bhsut', query, key) # TODO: add self-mask for local_col_logits span_attn_fn = functools.partial(attention.dot_product_attention, key_heads=key, value_heads=value, is_training=is_training, dropatt=config.dropatt) # === top-down summary === col_query_topdown = approx_cummax(query, 1, exclusive=True) col_key_topdown = approx_cummax(key, 1, exclusive=True) col_t2d_mask = get_causal_mask(x_sqr, axis=1, is_strict=True) col_t2d_val = span_attn_fn(query_heads=col_query_topdown, attn_axis=0, attn_bias=col_t2d_mask) # === bottom-up summary === col_query_bottomup = approx_cummax(query, 1, exclusive=True, reverse=True) col_key_bottomup = approx_cummax(key, 1, exclusive=True, reverse=True) col_b2t_mask = get_causal_mask(x_sqr, axis=1, is_strict=True, upper=True) col_b2t_val = span_attn_fn(query_heads=col_query_bottomup, attn_axis=0, attn_bias=col_b2t_mask) # === left2right summary === row_query_left2right = approx_cummax(query, 2, exclusive=True) row_key_left2right = approx_cummax(key, 2, exclusive=True) row_l2r_mask = get_causal_mask(x_sqr, axis=2, is_strict=True) row_l2r_val = span_attn_fn(query_heads=row_query_left2right, attn_axis=1, attn_bias=row_l2r_mask) # === right2left summary === row_query_right2left = approx_cummax(query, 2, exclusive=True, reverse=True) row_key_right2left = approx_cummax(key, 2, exclusive=True, reverse=True) row_r2l_mask = get_causal_mask(x_sqr, axis=2, is_strict=True, upper=True) row_r2l_val = span_attn_fn(query_heads=row_query_right2left, attn_axis=1, attn_bias=row_r2l_mask) global_t2d_logits = tf.einsum('bushd,buthd->bhust', query, col_key_topdown) global_b2t_logits = tf.einsum('bushd,buthd->bhust', query, col_key_bottomup) global_l2r_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_left2right) global_r2l_logits = tf.einsum('bsuhd,btuhd->bhsut', query, row_key_right2left) joint_logits = tf.concat([local_row_logits, local_col_logits, global_t2d_logits, global_b2t_logits, global_l2r_logits, global_r2l_logits], axis=-1) attn_probs = attention.float32_softmax(joint_logits, axis=-1) prow, pcol, pt2d, pb2t, pl2r, pr2l = tf.split( attn_probs, [config.max_seg_len, num_seg, config.max_seg_len, config.max_seg_len, num_seg, num_seg], axis=-1) mrow = tf.einsum('bhust,buthd->bushd', prow, value) mcol = tf.einsum('bhsut,btuhd->bsuhd', pcol, value) mt2d = tf.einsum('bhust,buthd->bushd', pt2d, col_t2d_val) mb2t = tf.einsum('bhust,buthd->bushd', pb2t, col_b2t_val) ml2r = tf.einsum('bhsut,btuhd->bsuhd', pl2r, row_l2r_val) mr2l = tf.einsum('bhsut,btuhd->bsuhd', pr2l, row_r2l_val) joint_merged = mrow + mcol + mt2d + mb2t + ml2r + mr2l joint_merged = tf.reshape(joint_merged, [bsize, seq_len, config.num_heads, head_dim]) output = ops.trail_dense(joint_merged, config.model_size, begin_axis=-2, bias=config.dense_use_bias) return output
bsize = x.shape[0] seq_len = x.shape.as_list()[1] head_dim = config.model_size // config.num_heads assert seq_len % config.max_seg_len == 0
ent.go
// Code generated by entc, DO NOT EDIT. package ent import ( "errors" "fmt" "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/go-kratos/kratos/examples/transaction/ent/internal/data/ent/card" "github.com/go-kratos/kratos/examples/transaction/ent/internal/data/ent/user" ) // ent aliases to avoid import conflicts in user's code. type ( Op = ent.Op Hook = ent.Hook Value = ent.Value Query = ent.Query Policy = ent.Policy Mutator = ent.Mutator Mutation = ent.Mutation MutateFunc = ent.MutateFunc ) // OrderFunc applies an ordering on the sql selector. type OrderFunc func(*sql.Selector) // columnChecker returns a function indicates if the column exists in the given column. func columnChecker(table string) func(string) error { checks := map[string]func(string) bool{ card.Table: card.ValidColumn, user.Table: user.ValidColumn, } check, ok := checks[table] if !ok { return func(string) error { return fmt.Errorf("unknown table %q", table) } } return func(column string) error { if !check(column) { return fmt.Errorf("unknown column %q for table %q", column, table) } return nil } } // Asc applies the given fields in ASC order. func Asc(fields ...string) OrderFunc { return func(s *sql.Selector) { check := columnChecker(s.TableName()) for _, f := range fields { if err := check(f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Asc(s.C(f))) } } } // Desc applies the given fields in DESC order. func Desc(fields ...string) OrderFunc { return func(s *sql.Selector) { check := columnChecker(s.TableName()) for _, f := range fields { if err := check(f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Desc(s.C(f))) } } } // AggregateFunc applies an aggregation step on the group-by traversal/selector. type AggregateFunc func(*sql.Selector) string // As is a pseudo aggregation function for renaming another other functions with custom names. For example: // // GroupBy(field1, field2). // Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). // Scan(ctx, &v) // func As(fn AggregateFunc, end string) AggregateFunc { return func(s *sql.Selector) string { return sql.As(fn(s), end) } } // Count applies the "count" aggregation function on each group. func Count() AggregateFunc { return func(s *sql.Selector) string { return sql.Count("*") } } // Max applies the "max" aggregation function on the given field of each group. func Max(field string) AggregateFunc { return func(s *sql.Selector) string { check := columnChecker(s.TableName()) if err := check(field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } return sql.Max(s.C(field)) } } // Mean applies the "mean" aggregation function on the given field of each group. func Mean(field string) AggregateFunc { return func(s *sql.Selector) string { check := columnChecker(s.TableName()) if err := check(field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } return sql.Avg(s.C(field)) } } // Min applies the "min" aggregation function on the given field of each group. func Min(field string) AggregateFunc { return func(s *sql.Selector) string { check := columnChecker(s.TableName()) if err := check(field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } return sql.Min(s.C(field)) } } // Sum applies the "sum" aggregation function on the given field of each group. func Sum(field string) AggregateFunc { return func(s *sql.Selector) string { check := columnChecker(s.TableName()) if err := check(field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } return sql.Sum(s.C(field)) } } // ValidationError returns when validating a field fails. type ValidationError struct { Name string // Field or edge name. err error } // Error implements the error interface. func (e *ValidationError) Error() string { return e.err.Error() } // Unwrap implements the errors.Wrapper interface. func (e *ValidationError) Unwrap() error { return e.err } // IsValidationError returns a boolean indicating whether the error is a validation error. func IsValidationError(err error) bool { if err == nil { return false } var e *ValidationError return errors.As(err, &e) } // NotFoundError returns when trying to fetch a specific entity and it was not found in the database. type NotFoundError struct { label string } // Error implements the error interface. func (e *NotFoundError) Error() string { return "ent: " + e.label + " not found" } // IsNotFound returns a boolean indicating whether the error is a not found error. func IsNotFound(err error) bool { if err == nil { return false } var e *NotFoundError return errors.As(err, &e) } // MaskNotFound masks not found error. func MaskNotFound(err error) error { if IsNotFound(err) { return nil } return err } // NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. type NotSingularError struct { label string } // Error implements the error interface. func (e *NotSingularError) Error() string { return "ent: " + e.label + " not singular" } // IsNotSingular returns a boolean indicating whether the error is a not singular error. func IsNotSingular(err error) bool { if err == nil { return false } var e *NotSingularError return errors.As(err, &e) } // NotLoadedError returns when trying to get a node that was not loaded by the query. type NotLoadedError struct { edge string } // Error implements the error interface. func (e *NotLoadedError) Error() string { return "ent: " + e.edge + " edge was not loaded" } // IsNotLoaded returns a boolean indicating whether the error is a not loaded error. func IsNotLoaded(err error) bool
// ConstraintError returns when trying to create/update one or more entities and // one or more of their constraints failed. For example, violation of edge or // field uniqueness. type ConstraintError struct { msg string wrap error } // Error implements the error interface. func (e ConstraintError) Error() string { return "ent: constraint failed: " + e.msg } // Unwrap implements the errors.Wrapper interface. func (e *ConstraintError) Unwrap() error { return e.wrap } // IsConstraintError returns a boolean indicating whether the error is a constraint failure. func IsConstraintError(err error) bool { if err == nil { return false } var e *ConstraintError return errors.As(err, &e) }
{ if err == nil { return false } var e *NotLoadedError return errors.As(err, &e) }
day6.py
from collections import defaultdict from aoc.util import load_input def turn(d, fun, sxy, exy): sx, sy = map(int, sxy.split(",")) ex, ey = map(int, exy.split(",")) for x in range(sx, ex + 1): for y in range(sy, ey + 1): d[(x, y)] = fun(d[(x, y)]) def run(data, toggle, turn_on, turn_off):
token = line.split() if line.startswith("toggle"): turn(grid, toggle, token[1], token[3]) elif line.startswith("turn on"): turn(grid, turn_on, token[2], token[4]) elif line.startswith("turn off"): turn(grid, turn_off, token[2], token[4]) else: raise Exception return sum(grid.values()) def part1(lines): return run(lines, lambda v: not v, lambda _: True, lambda _: False) def part2(lines): return run(lines, lambda x: x + 2, lambda x: x + 1, lambda x: max(0, x - 1)) if __name__ == "__main__": data = load_input(__file__, 2015, "6") print(part1(data)) print(part2(data))
grid = defaultdict(lambda: 0) for line in data:
urls.py
from django.urls import path from . import views urlpatterns = [ path('signup/', views.signup, name='signup'), path('login/', views.login_func, name='login'), path('logout/', views.logout_func, name='logout'), path('profile/', views.profile, name='profile'),
]
index.tsx
import * as React from "react"; import styled from "styled-components"; import { Colors } from "../../util/constants"; import PublicationsIcon from "../icons/publications"; import VectorShapeIcon from "../ui/icons/vector-shape"; import FloppyDiskIcon from "../ui/icons/floppy-disk"; import DownloadIcon from "../ui/icons/download"; import CopyIcon from "../ui/icons/copy"; import CutIcon from "../ui/icons/cut"; import PasteIcon from "../ui/icons/paste"; import ZoomInIcon from "../ui/icons/zoom-in"; import ZoomOutIcon from "../ui/icons/zoom-out"; import Spacer from "../ui/spacer"; import TitleBarButton from "./title-bar-button"; import downloadPdfAction from "../../util/download-pdf"; import { ClipboardAction } from "../../types/data"; import Menu, { MenuDivider, MenuItem } from "../ui/menu"; import { Shapes } from "../../util/new-shapes"; import ArrowDownIcon from "../ui/icons/arrow-down"; import { navigate } from "@reach/router"; import PencilRulerIcon from "../ui/icons/pencil-ruler"; import GarbageIcon from "../ui/icons/garbage"; import { useAppStateContext } from "../../contexts/app-state-provider"; const Container = styled.header` background: ${Colors.TitleBar.Background}; border-bottom: 1px solid hsla(0, 0%, 0%, 0.15); display: flex; align-items: center; justify-content: space-between; width: 100vw; height: 24px; z-index: 1; `; const ControlGroup = styled.div` padding: 0 0.5em; display: flex; align-items: center; justify-content: flex-start; font-size: 13px; color: ${Colors.TitleBar.Text}; `; const ZoomLabel = styled.span<{ disabled: boolean }>` font-size: 10px; font-weight: 600; line-height: 25px; width: 35px; text-align: center; color: ${({ disabled }) => disabled ? Colors.TitleBar.DisabledText : Colors.TitleBar.Text}; `; const LeftControlGroup = styled(ControlGroup)` font-weight: bold; svg { margin-right: 0.25em; } `; const TitleBar: React.FC = () => { const { actions, currentDocument, clipboardContents, selectedObject, zoom, user, userFetching, selectedDocumentItem, } = useAppStateContext(); const hasValidUserAuthenticated = !userFetching && !!user; const hasNoUserAuthenticated = !userFetching && !user; const saveDocument = React.useCallback(() => actions.saveDocument(), [ actions, ]); const downloadPdf = React.useCallback( () => downloadPdfAction(currentDocument), [currentDocument] ); const zoomIn = React.useCallback( () => actions.setZoom(Math.min(4.0, zoom + 0.25)), [actions, zoom] ); const zoomOut = React.useCallback( () => actions.setZoom(Math.max(0.25, zoom - 0.25)), [actions, zoom] ); const handleDocumentItemDoubleClick = React.useCallback(() => { navigate(`edit/${selectedDocumentItem.id}`); actions.setSelectedDocumentItem(null); }, [selectedDocumentItem, actions]); const handleSaveChangesButtonSelected = React.useCallback(async () => { await actions.saveDocument(); actions.setSaveDialogVisible(false); navigate("/"); actions.setCurrentDocument(null); }, [actions]); return ( <Container> <LeftControlGroup> <Menu renderButton={(setMenuActive, menuActive) => ( <TitleBarButton active={menuActive} onClick={() => setMenuActive(prevState => !prevState)} > <PublicationsIcon stroke={ menuActive ? Colors.Button.ActiveBackground : Colors.TitleBar.Background } size={20} /> Publications&nbsp; <ArrowDownIcon /> </TitleBarButton> )} renderMenu={ <> {currentDocument && hasValidUserAuthenticated && ( <> <MenuItem onClick={handleSaveChangesButtonSelected}> View All Documents </MenuItem> <MenuDivider /> </> )} {hasValidUserAuthenticated && ( <MenuItem onClick={actions.logout}>Log Out</MenuItem> )} {hasNoUserAuthenticated && ( <> <MenuItem onClick={() => actions.setLoginModalVisible(true)}> Log In… </MenuItem> <MenuItem onClick={() => actions.setNewAccountModalVisible(true)} > Create Account… </MenuItem> </> )} <MenuDivider /> <MenuItem onClick={() => actions.setAboutModalVisible(true)}> About Publications… </MenuItem> </> } /> </LeftControlGroup> {!currentDocument && ( <> <ControlGroup> <TitleBarButton disabled={!selectedDocumentItem} onClick={handleDocumentItemDoubleClick} > <PencilRulerIcon /> Edit </TitleBarButton> <TitleBarButton disabled={!selectedDocumentItem} onClick={() => actions.setDeleteDocumentDialogVisible(true)} > <GarbageIcon /> Delete </TitleBarButton> </ControlGroup> </> )} {currentDocument && ( <> <ControlGroup> <TitleBarButton disabled={!currentDocument || !user} onClick={saveDocument} > <FloppyDiskIcon /> Save </TitleBarButton> <TitleBarButton disabled={!currentDocument} onClick={downloadPdf}> <DownloadIcon /> PDF </TitleBarButton> <Menu renderButton={(setMenuActive, active) => ( <TitleBarButton disabled={!currentDocument} active={active}
> <VectorShapeIcon /> Objects&nbsp; <ArrowDownIcon /> </TitleBarButton> )} renderMenu={ <> <MenuItem onClick={() => actions.addObject(Shapes.Rectangle)}> Rectangle </MenuItem> <MenuItem onClick={() => actions.addObject(Shapes.Ellipse)}> Ellipse </MenuItem> <MenuItem onClick={() => actions.addObject(Shapes.Text)}> Text Box </MenuItem> </> } /> <Spacer width="1em" /> <TitleBarButton disabled={!currentDocument || !selectedObject} onClick={() => actions.handleClipboardAction(ClipboardAction.Cut)} > <CutIcon /> Cut </TitleBarButton> <TitleBarButton disabled={!currentDocument || !selectedObject} onClick={() => actions.handleClipboardAction(ClipboardAction.Copy) } > <CopyIcon /> Copy </TitleBarButton> <TitleBarButton disabled={!currentDocument || !clipboardContents} onClick={() => actions.handleClipboardAction(ClipboardAction.Paste) } > <PasteIcon /> Paste </TitleBarButton> <Spacer width="1em" /> <TitleBarButton disabled={!currentDocument} noLabel onClick={zoomIn} > <ZoomInIcon /> </TitleBarButton> <ZoomLabel disabled={!currentDocument}>{zoom * 100}%</ZoomLabel> <TitleBarButton disabled={!currentDocument} noLabel onClick={zoomOut} > <ZoomOutIcon /> </TitleBarButton> </ControlGroup> </> )} </Container> ); }; export default TitleBar;
onClick={() => setMenuActive(prevState => !prevState)}
oauth2-gen.go
// Copyright 2019 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated file. DO NOT EDIT. // Package oauth2 provides access to the Google OAuth2 API. // // For product documentation, see: https://developers.google.com/accounts/docs/OAuth2 // // Creating a client // // Usage example: // // import "google.golang.org/api/oauth2/v2" // ... // ctx := context.Background() // oauth2Service, err := oauth2.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // // oauth2Service, err := oauth2.NewService(ctx, option.WithScopes(oauth2.UserinfoProfileScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // // oauth2Service, err := oauth2.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // oauth2Service, err := oauth2.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package oauth2 // import "google.golang.org/api/oauth2/v2" import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strconv" "strings" googleapi "google.golang.org/api/googleapi" gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled const apiId = "oauth2:v2" const apiName = "oauth2" const apiVersion = "v2" const basePath = "https://www.googleapis.com/" // OAuth2 scopes used by this API. const ( // Associate you with your personal info on Google PlusMeScope = "https://www.googleapis.com/auth/plus.me" // View your email address UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.email" // See your personal info, including any personal info you've made // publicly available UserinfoProfileScope = "https://www.googleapis.com/auth/userinfo.profile" ) // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { scopesOption := option.WithScopes( "https://www.googleapis.com/auth/plus.me", "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/userinfo.profile", ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err } s, err := New(client) if err != nil { return nil, err } if endpoint != ""
return s, nil } // New creates a new Service. It uses the provided http.Client for requests. // // Deprecated: please use NewService instead. // To provide a custom HTTP client, use option.WithHTTPClient. // If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Userinfo = NewUserinfoService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Userinfo *UserinfoService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewUserinfoService(s *Service) *UserinfoService { rs := &UserinfoService{s: s} rs.V2 = NewUserinfoV2Service(s) return rs } type UserinfoService struct { s *Service V2 *UserinfoV2Service } func NewUserinfoV2Service(s *Service) *UserinfoV2Service { rs := &UserinfoV2Service{s: s} rs.Me = NewUserinfoV2MeService(s) return rs } type UserinfoV2Service struct { s *Service Me *UserinfoV2MeService } func NewUserinfoV2MeService(s *Service) *UserinfoV2MeService { rs := &UserinfoV2MeService{s: s} return rs } type UserinfoV2MeService struct { s *Service } type Jwk struct { Keys []*JwkKeys `json:"keys,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Keys") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Keys") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Jwk) MarshalJSON() ([]byte, error) { type NoMethod Jwk raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type JwkKeys struct { Alg string `json:"alg,omitempty"` E string `json:"e,omitempty"` Kid string `json:"kid,omitempty"` Kty string `json:"kty,omitempty"` N string `json:"n,omitempty"` Use string `json:"use,omitempty"` // ForceSendFields is a list of field names (e.g. "Alg") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Alg") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *JwkKeys) MarshalJSON() ([]byte, error) { type NoMethod JwkKeys raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type Tokeninfo struct { // AccessType: The access type granted with this token. It can be // offline or online. AccessType string `json:"access_type,omitempty"` // Audience: Who is the intended audience for this token. In general the // same as issued_to. Audience string `json:"audience,omitempty"` // Email: The email address of the user. Present only if the email scope // is present in the request. Email string `json:"email,omitempty"` // ExpiresIn: The expiry time of the token, as number of seconds left // until expiry. ExpiresIn int64 `json:"expires_in,omitempty"` // IssuedTo: To whom was the token issued to. In general the same as // audience. IssuedTo string `json:"issued_to,omitempty"` // Scope: The space separated list of scopes granted to this token. Scope string `json:"scope,omitempty"` // TokenHandle: The token handle associated with this token. TokenHandle string `json:"token_handle,omitempty"` // UserId: The obfuscated user id. UserId string `json:"user_id,omitempty"` // VerifiedEmail: Boolean flag which is true if the email address is // verified. Present only if the email scope is present in the request. VerifiedEmail bool `json:"verified_email,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "AccessType") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AccessType") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Tokeninfo) MarshalJSON() ([]byte, error) { type NoMethod Tokeninfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type Userinfoplus struct { // Email: The user's email address. Email string `json:"email,omitempty"` // FamilyName: The user's last name. FamilyName string `json:"family_name,omitempty"` // Gender: The user's gender. Gender string `json:"gender,omitempty"` // GivenName: The user's first name. GivenName string `json:"given_name,omitempty"` // Hd: The hosted domain e.g. example.com if the user is Google apps // user. Hd string `json:"hd,omitempty"` // Id: The obfuscated ID of the user. Id string `json:"id,omitempty"` // Link: URL of the profile page. Link string `json:"link,omitempty"` // Locale: The user's preferred locale. Locale string `json:"locale,omitempty"` // Name: The user's full name. Name string `json:"name,omitempty"` // Picture: URL of the user's picture image. Picture string `json:"picture,omitempty"` // VerifiedEmail: Boolean flag which is true if the email address is // verified. Always verified because we only return the user's primary // email address. // // Default: true VerifiedEmail *bool `json:"verified_email,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Email") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Email") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Userinfoplus) MarshalJSON() ([]byte, error) { type NoMethod Userinfoplus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "oauth2.getCertForOpenIdConnect": type GetCertForOpenIdConnectCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // GetCertForOpenIdConnect: func (s *Service) GetCertForOpenIdConnect() *GetCertForOpenIdConnectCall { c := &GetCertForOpenIdConnectCall{s: s, urlParams_: make(gensupport.URLParams)} return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *GetCertForOpenIdConnectCall) Fields(s ...googleapi.Field) *GetCertForOpenIdConnectCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *GetCertForOpenIdConnectCall) IfNoneMatch(entityTag string) *GetCertForOpenIdConnectCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *GetCertForOpenIdConnectCall) Context(ctx context.Context) *GetCertForOpenIdConnectCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *GetCertForOpenIdConnectCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *GetCertForOpenIdConnectCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191221") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/certs") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "oauth2.getCertForOpenIdConnect" call. // Exactly one of *Jwk or error will be non-nil. Any non-2xx status code // is an error. Response headers are in either // *Jwk.ServerResponse.Header or (if a response was returned at all) in // error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check // whether the returned error was because http.StatusNotModified was // returned. func (c *GetCertForOpenIdConnectCall) Do(opts ...googleapi.CallOption) (*Jwk, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Jwk{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "httpMethod": "GET", // "id": "oauth2.getCertForOpenIdConnect", // "path": "oauth2/v2/certs", // "response": { // "$ref": "Jwk" // } // } } // method id "oauth2.tokeninfo": type TokeninfoCall struct { s *Service urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Tokeninfo: func (s *Service) Tokeninfo() *TokeninfoCall { c := &TokeninfoCall{s: s, urlParams_: make(gensupport.URLParams)} return c } // AccessToken sets the optional parameter "access_token": func (c *TokeninfoCall) AccessToken(accessToken string) *TokeninfoCall { c.urlParams_.Set("access_token", accessToken) return c } // IdToken sets the optional parameter "id_token": func (c *TokeninfoCall) IdToken(idToken string) *TokeninfoCall { c.urlParams_.Set("id_token", idToken) return c } // TokenHandle sets the optional parameter "token_handle": func (c *TokeninfoCall) TokenHandle(tokenHandle string) *TokeninfoCall { c.urlParams_.Set("token_handle", tokenHandle) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *TokeninfoCall) Fields(s ...googleapi.Field) *TokeninfoCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *TokeninfoCall) Context(ctx context.Context) *TokeninfoCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *TokeninfoCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *TokeninfoCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191221") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/tokeninfo") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "oauth2.tokeninfo" call. // Exactly one of *Tokeninfo or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Tokeninfo.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *TokeninfoCall) Do(opts ...googleapi.CallOption) (*Tokeninfo, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Tokeninfo{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "httpMethod": "POST", // "id": "oauth2.tokeninfo", // "parameters": { // "access_token": { // "location": "query", // "type": "string" // }, // "id_token": { // "location": "query", // "type": "string" // }, // "token_handle": { // "location": "query", // "type": "string" // } // }, // "path": "oauth2/v2/tokeninfo", // "response": { // "$ref": "Tokeninfo" // } // } } // method id "oauth2.userinfo.get": type UserinfoGetCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: func (r *UserinfoService) Get() *UserinfoGetCall { c := &UserinfoGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *UserinfoGetCall) Fields(s ...googleapi.Field) *UserinfoGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *UserinfoGetCall) IfNoneMatch(entityTag string) *UserinfoGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *UserinfoGetCall) Context(ctx context.Context) *UserinfoGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *UserinfoGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *UserinfoGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191221") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "oauth2/v2/userinfo") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "oauth2.userinfo.get" call. // Exactly one of *Userinfoplus or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Userinfoplus.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *UserinfoGetCall) Do(opts ...googleapi.CallOption) (*Userinfoplus, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Userinfoplus{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "httpMethod": "GET", // "id": "oauth2.userinfo.get", // "path": "oauth2/v2/userinfo", // "response": { // "$ref": "Userinfoplus" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.me", // "https://www.googleapis.com/auth/userinfo.email", // "https://www.googleapis.com/auth/userinfo.profile" // ] // } } // method id "oauth2.userinfo.v2.me.get": type UserinfoV2MeGetCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: func (r *UserinfoV2MeService) Get() *UserinfoV2MeGetCall { c := &UserinfoV2MeGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *UserinfoV2MeGetCall) Fields(s ...googleapi.Field) *UserinfoV2MeGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *UserinfoV2MeGetCall) IfNoneMatch(entityTag string) *UserinfoV2MeGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *UserinfoV2MeGetCall) Context(ctx context.Context) *UserinfoV2MeGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *UserinfoV2MeGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *UserinfoV2MeGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191221") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "userinfo/v2/me") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "oauth2.userinfo.v2.me.get" call. // Exactly one of *Userinfoplus or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Userinfoplus.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *UserinfoV2MeGetCall) Do(opts ...googleapi.CallOption) (*Userinfoplus, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Userinfoplus{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "httpMethod": "GET", // "id": "oauth2.userinfo.v2.me.get", // "path": "userinfo/v2/me", // "response": { // "$ref": "Userinfoplus" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.me", // "https://www.googleapis.com/auth/userinfo.email", // "https://www.googleapis.com/auth/userinfo.profile" // ] // } }
{ s.BasePath = endpoint }
jest.config.js
module.exports = { name: 'angular-app', preset: '../../jest.config.js', coverageDirectory: '../../coverage/apps/angular-app/', snapshotSerializers: [ 'jest-preset-angular/AngularSnapshotSerializer.js', 'jest-preset-angular/HTMLCommentSerializer.js' ]
};
qtum_block_header.py
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.blocktools import * from test_framework.mininode import * from test_framework.address import * from test_framework.qtum import * import time from test_framework.key import ECKey from test_framework.script import * import struct import io def find_unspent(node, amount): for unspent in node.listunspent(): if unspent['amount'] == amount and unspent['spendable']: return CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0) assert(False) class
(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [[]] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.nodes[0].add_p2p_connection(P2PDataStore()) self.nodes[0].p2p.wait_for_getheaders(timeout=5) node = self.nodes[0] #mocktime = 1490247077 #node.setmocktime(mocktime) node.generate(10) self.block_time = int(time.time())+20 for i in range(500): self.tip = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount()+1), self.block_time+i) self.tip.solve() self.sync_blocks([self.tip]) #node.generate(COINBASE_MATURITY+50) mocktime = COINBASE_MATURITY+50 spendable_addresses = [] # store some addresses to use later for unspent in node.listunspent(): spendable_addresses.append(unspent['address']) # first make sure that what is a valid block is accepted coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(time.time()+mocktime+100)) self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.solve() self.sync_blocks([self.tip]) coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() # A block that has an OP_CREATE tx, butwith an incorrect state root """ pragma solidity ^0.4.11; contract Test { function() payable {} } """ tx_hex = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029", 1000000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction'] f = io.BytesIO(hex_str_to_bytes(tx_hex)) tx = CTransaction() tx.deserialize(f) coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+200)) self.tip.vtx.append(tx) self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.solve() self.sync_blocks([self.tip], success=False, reconnect=True) # Create a contract for use later. """ pragma solidity ^0.4.11; contract Test { function() payable {} } """ contract_address = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029")['address'] node.generate(1) realHashUTXORoot = int(node.getblock(node.getbestblockhash())['hashUTXORoot'], 16) realHashStateRoot = int(node.getblock(node.getbestblockhash())['hashStateRoot'], 16) # A block with both an invalid hashStateRoot and hashUTXORoot coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+300)) self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.solve() self.sync_blocks([self.tip], success=False, reconnect=True) # A block with a tx, but without updated state hashes tx_hex = node.sendtocontract(contract_address, "00", 1, 100000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction'] f = io.BytesIO(hex_str_to_bytes(tx_hex)) tx = CTransaction() tx.deserialize(f) coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+400)) self.tip.hashUTXORoot = realHashUTXORoot self.tip.hashStateRoot = realHashStateRoot self.tip.vtx.append(tx) self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.solve() self.sync_blocks([self.tip], success=False, reconnect=True) # A block with an invalid hashUTXORoot coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+500)) self.tip.hashStateRoot = realHashStateRoot self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.solve() self.sync_blocks([self.tip], success=False, reconnect=True) # A block with an invalid hashStateRoot coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+600)) self.tip.hashUTXORoot = realHashUTXORoot self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.solve() self.sync_blocks([self.tip], success=False, reconnect=True) # Verify that blocks with a correct hashStateRoot and hashUTXORoot are accepted. coinbase = create_coinbase(node.getblockcount()+1) coinbase.rehash() self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+700)) self.tip.hashUTXORoot = realHashUTXORoot self.tip.hashStateRoot = realHashStateRoot self.tip.hashMerkleRoot = self.tip.calc_merkle_root() self.tip.solve() self.sync_blocks([self.tip]) def reconnect_p2p(self): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() self.nodes[0].add_p2p_connection(P2PDataStore()) self.nodes[0].p2p.wait_for_getheaders(timeout=5) def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, force_send=False, reconnect=False, timeout=5): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block.""" self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect) if reconnect: self.reconnect_p2p() if __name__ == '__main__': QtumBlockHeaderTest().main()
QtumBlockHeaderTest
mod.rs
// Copyright 2016 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Core types pub mod block; pub mod build; pub mod hash; pub mod pmmr; pub mod target; pub mod transaction; // pub mod txoset; #[allow(dead_code)] use std::fmt; use std::cmp::Ordering; use std::num::ParseFloatError; use consensus::GRIN_BASE; use util::{secp, static_secp_instance}; use util::secp::pedersen::*; pub use self::block::*; pub use self::transaction::*; use self::hash::Hashed; use ser::{Error, Readable, Reader, Writeable, Writer}; use global; /// Implemented by types that hold inputs and outputs including Pedersen /// commitments. Handles the collection of the commitments as well as their /// summing, taking potential explicit overages of fees into account. pub trait Committed { /// Gathers commitments and sum them. fn sum_commitments(&self) -> Result<Commitment, secp::Error> { // first, verify each range proof let ref outputs = self.outputs_committed(); for output in *outputs { try!(output.verify_proof()) } // then gather the commitments let mut input_commits = map_vec!(self.inputs_committed(), |inp| inp.commitment()); let mut output_commits = map_vec!(self.outputs_committed(), |out| out.commitment()); // add the overage as output commitment if positive, as an input commitment if // negative let overage = self.overage(); if overage != 0 { let over_commit = { let secp = static_secp_instance(); let secp = secp.lock().unwrap(); secp.commit_value(overage.abs() as u64).unwrap() }; if overage < 0 { input_commits.push(over_commit); } else { output_commits.push(over_commit); } } // sum all that stuff { let secp = static_secp_instance(); let secp = secp.lock().unwrap(); secp.commit_sum(output_commits, input_commits) } } /// Vector of committed inputs to verify fn inputs_committed(&self) -> &Vec<Input>; /// Vector of committed inputs to verify fn outputs_committed(&self) -> &Vec<Output>; /// The overage amount expected over the commitments. Can be negative (a /// fee) or positive (a reward). fn overage(&self) -> i64; } /// Proof of work pub struct Proof { /// The nonces pub nonces: Vec<u32>, /// The proof size pub proof_size: usize, } impl fmt::Debug for Proof { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { try!(write!(f, "Cuckoo(")); for (i, val) in self.nonces[..].iter().enumerate() { try!(write!(f, "{:x}", val)); if i < self.nonces.len() - 1 { try!(write!(f, " ")); } } write!(f, ")") } } impl PartialOrd for Proof { fn partial_cmp(&self, other: &Proof) -> Option<Ordering> { self.nonces.partial_cmp(&other.nonces) } } impl PartialEq for Proof { fn eq(&self, other: &Proof) -> bool { self.nonces[..] == other.nonces[..] } } impl Eq for Proof {} impl Clone for Proof { fn clone(&self) -> Proof { let mut out_nonces = Vec::new(); for n in self.nonces.iter() { out_nonces.push(*n as u32); } Proof { proof_size: out_nonces.len(), nonces: out_nonces, } } } impl Proof { /// Builds a proof with all bytes zeroed out pub fn new(in_nonces: Vec<u32>) -> Proof { Proof { proof_size: in_nonces.len(), nonces: in_nonces, } } /// Builds a proof with all bytes zeroed out pub fn zero(proof_size: usize) -> Proof { Proof { proof_size: proof_size, nonces: vec![0; proof_size], } } /// Converts the proof to a vector of u64s pub fn to_u64s(&self) -> Vec<u64> { let mut out_nonces = Vec::with_capacity(self.proof_size); for n in self.nonces.iter() { out_nonces.push(*n as u64); } out_nonces } /// Converts the proof to a vector of u32s pub fn to_u32s(&self) -> Vec<u32> { self.clone().nonces } /// Converts the proof to a proof-of-work Target so they can be compared. /// Hashes the Cuckoo Proof data. pub fn to_difficulty(self) -> target::Difficulty { target::Difficulty::from_hash(&self.hash()) } } impl Readable for Proof { fn read(reader: &mut Reader) -> Result<Proof, Error> { let proof_size = global::proofsize(); let mut pow = vec![0u32; proof_size]; for n in 0..proof_size { pow[n] = try!(reader.read_u32()); } Ok(Proof::new(pow)) } } impl Writeable for Proof { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> { for n in 0..self.proof_size { try!(writer.write_u32(self.nonces[n])); } Ok(()) } } /// Common method for parsing an amount from human-readable, and converting /// to internally-compatible u64 pub fn amount_from_hr_string(amount: &str) -> Result<u64, ParseFloatError> { let amount = amount.parse::<f64>()?; Ok((amount * GRIN_BASE as f64) as u64) } /// Common method for converting an amount to a human-readable string pub fn amount_to_hr_string(amount: u64) -> String { let amount = (amount as f64 / GRIN_BASE as f64) as f64; let places = (GRIN_BASE as f64).log(10.0) as usize + 1; String::from(format!("{:.*}", places, amount)) } #[cfg(test)] mod test { use super::*; use core::hash::ZERO_HASH; use core::build::{initial_tx, input, output, with_excess, with_fee, with_lock_height}; use core::block::Error::KernelLockHeight; use ser; use keychain; use keychain::{BlindingFactor, Keychain}; #[test] pub fn test_amount_to_hr() { assert!(50123456789 == amount_from_hr_string("50.123456789").unwrap()); assert!(50 == amount_from_hr_string(".000000050").unwrap()); assert!(1 == amount_from_hr_string(".000000001").unwrap()); assert!(0 == amount_from_hr_string(".0000000009").unwrap()); assert!(500_000_000_000 == amount_from_hr_string("500").unwrap()); assert!( 5_000_000_000_000_000_000 == amount_from_hr_string("5000000000.00000000000").unwrap() ); } #[test] pub fn test_hr_to_amount() { assert!("50.123456789" == amount_to_hr_string(50123456789)); assert!("0.000000050" == amount_to_hr_string(50)); assert!("0.000000001" == amount_to_hr_string(1)); assert!("500.000000000" == amount_to_hr_string(500_000_000_000)); assert!("5000000000.000000000" == amount_to_hr_string(5_000_000_000_000_000_000)); } #[test] #[should_panic(expected = "InvalidSecretKey")] fn test_zero_commit_fails() { let keychain = Keychain::from_random_seed().unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap(); // blinding should fail as signing with a zero r*G shouldn't work build::transaction( vec![ input(10, key_id1.clone()), output(9, key_id1.clone()), with_fee(1), ], &keychain, ).unwrap(); } #[test] fn simple_tx_ser() { let tx = tx2i1o(); let mut vec = Vec::new(); ser::serialize(&mut vec, &tx).expect("serialized failed"); assert!(vec.len() > 5360); assert!(vec.len() < 5380); } #[test] fn simple_tx_ser_deser() { let tx = tx2i1o(); let mut vec = Vec::new(); ser::serialize(&mut vec, &tx).expect("serialization failed"); let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap(); assert_eq!(dtx.fee, 2); assert_eq!(dtx.inputs.len(), 2); assert_eq!(dtx.outputs.len(), 1); assert_eq!(tx.hash(), dtx.hash()); } #[test] fn tx_double_ser_deser() { // checks serializing doesn't mess up the tx and produces consistent results let btx = tx2i1o(); let mut vec = Vec::new(); assert!(ser::serialize(&mut vec, &btx).is_ok()); let dtx: Transaction = ser::deserialize(&mut &vec[..]).unwrap(); let mut vec2 = Vec::new(); assert!(ser::serialize(&mut vec2, &btx).is_ok()); let dtx2: Transaction = ser::deserialize(&mut &vec2[..]).unwrap(); assert_eq!(btx.hash(), dtx.hash()); assert_eq!(dtx.hash(), dtx2.hash()); } #[test] fn hash_output() { let keychain = Keychain::from_random_seed().unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap(); let key_id3 = keychain.derive_key_id(3).unwrap(); let (tx, _) = build::transaction( vec![ input(75, key_id1), output(42, key_id2), output(32, key_id3), with_fee(1), ], &keychain, ).unwrap(); let h = tx.outputs[0].hash(); assert!(h != ZERO_HASH); let h2 = tx.outputs[1].hash(); assert!(h != h2); } #[test] fn blind_tx() { let btx = tx2i1o(); btx.verify_sig().unwrap(); // unwrap will panic if invalid // checks that the range proof on our blind output is sufficiently hiding let Output { proof, .. } = btx.outputs[0]; let secp = static_secp_instance(); let secp = secp.lock().unwrap(); let info = secp.range_proof_info(proof); assert!(info.min == 0); assert!(info.max == u64::max_value()); }
#[test] fn tx_hash_diff() { let btx1 = tx2i1o(); let btx2 = tx1i1o(); if btx1.hash() == btx2.hash() { panic!("diff txs have same hash") } } /// Simulate the standard exchange between 2 parties when creating a basic /// 2 inputs, 2 outputs transaction. #[test] fn tx_build_exchange() { let keychain = Keychain::from_random_seed().unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap(); let key_id3 = keychain.derive_key_id(3).unwrap(); let key_id4 = keychain.derive_key_id(4).unwrap(); let tx_alice: Transaction; let blind_sum: BlindingFactor; { // Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they // become inputs in the new transaction let (in1, in2) = (input(4, key_id1), input(3, key_id2)); // Alice builds her transaction, with change, which also produces the sum // of blinding factors before they're obscured. let (tx, sum) = build::transaction(vec![in1, in2, output(1, key_id3), with_fee(2)], &keychain) .unwrap(); tx_alice = tx; blind_sum = sum; } // From now on, Bob only has the obscured transaction and the sum of // blinding factors. He adds his output, finalizes the transaction so it's // ready for broadcast. let (tx_final, _) = build::transaction( vec![ initial_tx(tx_alice), with_excess(blind_sum), output(4, key_id4), ], &keychain, ).unwrap(); tx_final.validate().unwrap(); } #[test] fn reward_empty_block() { let keychain = keychain::Keychain::from_random_seed().unwrap(); let key_id = keychain.derive_key_id(1).unwrap(); let b = Block::new(&BlockHeader::default(), vec![], &keychain, &key_id).unwrap(); b.compact().validate().unwrap(); } #[test] fn reward_with_tx_block() { let keychain = keychain::Keychain::from_random_seed().unwrap(); let key_id = keychain.derive_key_id(1).unwrap(); let mut tx1 = tx2i1o(); tx1.verify_sig().unwrap(); let b = Block::new(&BlockHeader::default(), vec![&mut tx1], &keychain, &key_id).unwrap(); b.compact().validate().unwrap(); } #[test] fn simple_block() { let keychain = keychain::Keychain::from_random_seed().unwrap(); let key_id = keychain.derive_key_id(1).unwrap(); let mut tx1 = tx2i1o(); let mut tx2 = tx1i1o(); let b = Block::new( &BlockHeader::default(), vec![&mut tx1, &mut tx2], &keychain, &key_id, ).unwrap(); b.validate().unwrap(); } #[test] fn test_block_with_timelocked_tx() { let keychain = keychain::Keychain::from_random_seed().unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap(); let key_id3 = keychain.derive_key_id(3).unwrap(); // first check we can add a timelocked tx where lock height matches current block height // and that the resulting block is valid let tx1 = build::transaction( vec![ input(5, key_id1.clone()), output(3, key_id2.clone()), with_fee(2), with_lock_height(1), ], &keychain, ).map(|(tx, _)| tx) .unwrap(); let b = Block::new( &BlockHeader::default(), vec![&tx1], &keychain, &key_id3.clone(), ).unwrap(); b.validate().unwrap(); // now try adding a timelocked tx where lock height is greater than current block height let tx1 = build::transaction( vec![ input(5, key_id1.clone()), output(3, key_id2.clone()), with_fee(2), with_lock_height(2), ], &keychain, ).map(|(tx, _)| tx) .unwrap(); let b = Block::new( &BlockHeader::default(), vec![&tx1], &keychain, &key_id3.clone(), ).unwrap(); match b.validate() { Err(KernelLockHeight { lock_height: height }) => { assert_eq!(height, 2); } _ => panic!("expecting KernelLockHeight error here"), } } #[test] pub fn test_verify_1i1o_sig() { let tx = tx1i1o(); tx.verify_sig().unwrap(); } #[test] pub fn test_verify_2i1o_sig() { let tx = tx2i1o(); tx.verify_sig().unwrap(); } // utility producing a transaction with 2 inputs and a single outputs pub fn tx2i1o() -> Transaction { let keychain = keychain::Keychain::from_random_seed().unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap(); let key_id3 = keychain.derive_key_id(3).unwrap(); build::transaction( vec![ input(10, key_id1), input(11, key_id2), output(19, key_id3), with_fee(2), ], &keychain, ).map(|(tx, _)| tx) .unwrap() } // utility producing a transaction with a single input and output pub fn tx1i1o() -> Transaction { let keychain = keychain::Keychain::from_random_seed().unwrap(); let key_id1 = keychain.derive_key_id(1).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap(); build::transaction( vec![input(5, key_id1), output(3, key_id2), with_fee(2)], &keychain, ).map(|(tx, _)| tx) .unwrap() } }
test_qbs_submission_data.py
from tests.integration.integration_test_case import IntegrationTestCase class TestQbsSubmissionData(IntegrationTestCase): def test_submission_data_2_0001(self): self.submission_data('2', '0001') def submission_data(self, eq_id, form_type_id):
self.launchSurvey(eq_id, form_type_id, roles=['dumper']) # We are on the introduction page self.assertInPage('>Start survey<') self.assertInPage('Quarterly Business Survey') # We proceed to the questionnaire self.post(action='start_questionnaire') # We are in the Questionnaire self.assertInPage('>Quarterly Business Survey</') self.assertInPage('what was the number of employees for Integration Tests?') self.assertInPage('>Save and continue<') # When I submit answers self.post(post_data={'number-of-employees-total': '10'}) self.post(post_data={'number-of-employees-male-more-30-hours': '1', 'number-of-employees-male-less-30-hours': '2', 'number-of-employees-female-more-30-hours': '3', 'number-of-employees-female-less-30-hours': '4'}) # There are no validation errors (we're on the summary screen) self.assertInUrl('summary') self.assertInPage('>Quarterly Business Survey</') self.assertInPage('>Check your answers and submit<') self.assertInPage('You can check your answers below') self.assertInPage('>Submit answers<') # And the JSON response contains the data I submitted actual = self.dumpSubmission() expected = { 'submission': { 'origin': 'uk.gov.ons.edc.eq', 'started_at': actual['submission']['started_at'], 'submitted_at': actual['submission']['submitted_at'], 'case_id': actual['submission']['case_id'], 'collection': { 'exercise_sid': '789', 'period': '201604', 'instrument_id': '0001' }, 'survey_id': '139', 'flushed': False, 'tx_id': actual['submission']['tx_id'], 'data': { '50': '10', '51': '1', '52': '2', '53': '3', '54': '4' }, 'type': 'uk.gov.ons.edc.eq:surveyresponse', 'version': '0.0.1', 'metadata': { 'ref_period_end_date': '2016-04-30', 'ref_period_start_date': '2016-04-01', 'ru_ref': '123456789012A', 'user_id': 'integration-test' } } } # Enable full dictionary diffs on test failure self.maxDiff = None self.assertDictEqual(actual, expected)
pub_utils.py
import datetime import re class PublicationUtils: @staticmethod def get_month(bibtex_entry): month = bibtex_entry.get("month") m = None try: m = int(month) except Exception: pass try: m = datetime.datetime.strptime(month, "%b").month except Exception: pass try: m = datetime.datetime.strptime(month, "%B").month except Exception: pass return m @staticmethod def get_forum(bibtex_entry): forum = [] if "journal" in bibtex_entry: forum.append(bibtex_entry["journal"]) if "booktitle" in bibtex_entry: forum.append(bibtex_entry["booktitle"]) if "series" in bibtex_entry: forum.append(bibtex_entry["series"]) if "publisher" in bibtex_entry: forum.append(bibtex_entry["publisher"]) if "school" in bibtex_entry: forum.append(bibtex_entry["school"]) if "institution" in bibtex_entry: forum.append(bibtex_entry["institution"]) if "address" in bibtex_entry: forum.append(bibtex_entry["address"]) return ",".join(forum) @staticmethod def
(bibtex_entry): if "note" in bibtex_entry: m = re.search("^\\\\url{(.+?)}$", bibtex_entry["note"]) if m: return m.group(1) if "howpublished" in bibtex_entry: m = re.search("^\\\\url{(.+?)}$", bibtex_entry["howpublished"]) if m: return m.group(1) return None
get_link
form-xeditable.init.js
"use strict";$(function(){$.fn.editableform.buttons='<button type="submit" class="btn btn-primary editable-submit btn-sm waves-effect waves-light"><i class="mdi mdi-check"></i></button><button type="button" class="btn btn-danger editable-cancel btn-sm waves-effect"><i class="mdi mdi-close"></i></button>',$("#inline-username").editable({type:"text",pk:1,name:"username",title:"Enter username",mode:"inline",inputclass:"form-control-sm form-control"}),$("#inline-firstname").editable({validate:function(e){if(""==$.trim(e))return"This field is required"},mode:"inline",inputclass:"form-control-sm form-control"}),$("#inline-sex").editable({prepend:"not selected",mode:"inline",inputclass:"form-select-sm form-select",source:[{value:1,text:"Male"},{value:2,text:"Female"}],display:function(t,e){e=$.grep(e,function(e){return e.value==t});e.length?$(this).text(e[0].text).css("color",{"":"gray",1:"green",2:"blue"}[t]):$(this).empty()}}),$("#inline-group").editable({showbuttons:!1,mode:"inline",inputclass:"form-select-sm form-select"}),$("#inline-status").editable({mode:"inline",inputclass:"form-select-sm form-select"}),$("#inline-dob").editable({mode:"inline",inputclass:"form-select-sm form-select"}),$("#inline-event").editable({placement:"bottom",showbuttons:"bottom",mode:"inline",combodate:{firstItem:"name"},inputclass:"form-select-sm form-select"}),$("#inline-comments").editable({showbuttons:"bottom",mode:"inline",inputclass:"form-control-sm form-control"}),$("#inline-fruits").editable({pk:1,limit:3,mode:"inline",inputclass:"form-check-input",source:[{value:1,text:"Banana"},{value:2,text:"Peach"},{value:3,text:"Apple"},{value:4,text:"Watermelon"},{value:5,text:"Orange"}]})});
utils.py
import re import pandas as pd from ....core import flatten from ....utils import natural_sort_key class Engine: """ The API necessary to provide a new Parquet reader/writer """ @classmethod def read_metadata( cls, fs, paths, categories=None, index=None, gather_statistics=None, filters=None, **kwargs ): """Gather metadata about a Parquet Dataset to prepare for a read This function is called once in the user's Python session to gather important metadata about the parquet dataset. Parameters ---------- fs: FileSystem paths: List[str] A list of paths to files (or their equivalents) categories: list, dict or None Column(s) containing categorical data. index: str, List[str], or False The column name(s) to be used as the index. If set to ``None``, pandas metadata (if available) can be used to reset the value in this function gather_statistics: bool Whether or not to gather statistics data. If ``None``, we only gather statistics data if there is a _metadata file available to query (cheaply) filters: list List of filters to apply, like ``[('x', '>', 0), ...]``. **kwargs: dict (of dicts) User-specified arguments to pass on to backend. Top level key can be used by engine to select appropriate dict. Returns ------- meta: pandas.DataFrame An empty DataFrame object to use for metadata. Should have appropriate column names and dtypes but need not have any actual data statistics: Optional[List[Dict]] Either None, if no statistics were found, or a list of dictionaries of statistics data, one dict for every partition (see the next return value). The statistics should look like the following: [ {'num-rows': 1000, 'columns': [ {'name': 'id', 'min': 0, 'max': 100}, {'name': 'x', 'min': 0.0, 'max': 1.0}, ]}, ... ] parts: List[object] A list of objects to be passed to ``Engine.read_partition``. Each object should represent a piece of data (usually a row-group). The type of each object can be anything, as long as the engine's read_partition function knows how to interpret it. """ raise NotImplementedError() @classmethod def read_partition(cls, fs, piece, columns, index, **kwargs): """Read a single piece of a Parquet dataset into a Pandas DataFrame This function is called many times in individual tasks Parameters ---------- fs: FileSystem piece: object This is some token that is returned by Engine.read_metadata. Typically it represents a row group in a Parquet dataset columns: List[str] List of column names to pull out of that row group index: str, List[str], or False The index name(s). **kwargs: Includes `"kwargs"` values stored within the `parts` output of `engine.read_metadata`. May also include arguments to be passed to the backend (if stored under a top-level `"read"` key). Returns ------- A Pandas DataFrame """ raise NotImplementedError() @classmethod def initialize_write( cls, df, fs, path, append=False, partition_on=None, ignore_divisions=False, division_info=None, **kwargs ): """Perform engine-specific initialization steps for this dataset Parameters ---------- df: dask.dataframe.DataFrame fs: FileSystem path: str Destination directory for data. Prepend with protocol like ``s3://`` or ``hdfs://`` for remote data. append: bool If True, may use existing metadata (if any) and perform checks against the new data being stored. partition_on: List(str) Column(s) to use for dataset partitioning in parquet. ignore_divisions: bool Whether or not to ignore old divisions when appending. Otherwise, overlapping divisions will lead to an error being raised. division_info: dict Dictionary containing the divisions and corresponding column name. **kwargs: dict Other keyword arguments (including `index_cols`) Returns ------- tuple: engine-specific instance list of filenames, one per partition """ raise NotImplementedError @classmethod def write_partition( cls, df, path, fs, filename, partition_on, return_metadata, **kwargs ): """ Output a partition of a dask.DataFrame. This will correspond to one output file, unless partition_on is set, in which case, it will correspond to up to one file in each sub-directory. Parameters ---------- df: dask.dataframe.DataFrame path: str Destination directory for data. Prepend with protocol like ``s3://`` or ``hdfs://`` for remote data. fs: FileSystem filename: str partition_on: List(str) Column(s) to use for dataset partitioning in parquet. return_metadata : bool Whether to return list of instances from this write, one for each output file. These will be passed to write_metadata if an output metadata file is requested. **kwargs: dict Other keyword arguments (including `fmd` and `index_cols`) Returns ------- List of metadata-containing instances (if `return_metadata` is `True`) or empty list """ raise NotImplementedError @classmethod def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs): """ Write the shared metadata file for a parquet dataset. Parameters ---------- parts: List Contains metadata objects to write, of the type undrestood by the specific implementation meta: non-chunk metadata Details that do not depend on the specifics of each chunk write, typically the schema and pandas metadata, in a format the writer can use. fs: FileSystem path: str Output file to write to, usually ``"_metadata"`` in the root of the output dataset append: boolean Whether or not to consolidate new metadata with existing (True) or start from scratch (False) **kwargs: dict Other keyword arguments (including `compression`) """ raise NotImplementedError() @classmethod def collect_file_metadata(cls, path, fs, file_path): """ Collect parquet metadata from a file and set the file_path. Parameters ---------- path: str Parquet-file path to extract metadata from. fs: FileSystem file_path: str Relative path to set as `file_path` in the metadata. Returns ------- A metadata object. The specific type should be recognized by the aggregate_metadata method. """ raise NotImplementedError() @classmethod def aggregate_metadata(cls, meta_list, fs, out_path):
def _parse_pandas_metadata(pandas_metadata): """Get the set of names from the pandas metadata section Parameters ---------- pandas_metadata : dict Should conform to the pandas parquet metadata spec Returns ------- index_names : list List of strings indicating the actual index names column_names : list List of strings indicating the actual column names storage_name_mapping : dict Pairs of storage names (e.g. the field names for PyArrow) and actual names. The storage and field names will differ for index names for certain writers (pyarrow > 0.8). column_indexes_names : list The names for ``df.columns.name`` or ``df.columns.names`` for a MultiIndex in the columns Notes ----- This should support metadata written by at least * fastparquet>=0.1.3 * pyarrow>=0.7.0 """ index_storage_names = [ n["name"] if isinstance(n, dict) else n for n in pandas_metadata["index_columns"] ] index_name_xpr = re.compile(r"__index_level_\d+__") # older metadatas will not have a 'field_name' field so we fall back # to the 'name' field pairs = [ (x.get("field_name", x["name"]), x["name"]) for x in pandas_metadata["columns"] ] # Need to reconcile storage and real names. These will differ for # pyarrow, which uses __index_leveL_d__ for the storage name of indexes. # The real name may be None (e.g. `df.index.name` is None). pairs2 = [] for storage_name, real_name in pairs: if real_name and index_name_xpr.match(real_name): real_name = None pairs2.append((storage_name, real_name)) index_names = [name for (storage_name, name) in pairs2 if name != storage_name] # column_indexes represents df.columns.name # It was added to the spec after pandas 0.21.0+, and implemented # in PyArrow 0.8. It was added to fastparquet in 0.3.1. column_index_names = pandas_metadata.get("column_indexes", [{"name": None}]) column_index_names = [x["name"] for x in column_index_names] # Now we need to disambiguate between columns and index names. PyArrow # 0.8.0+ allows for duplicates between df.index.names and df.columns if not index_names: # For PyArrow < 0.8, Any fastparquet. This relies on the facts that # 1. Those versions used the real index name as the index storage name # 2. Those versions did not allow for duplicate index / column names # So we know that if a name is in index_storage_names, it must be an # index name if index_storage_names and isinstance(index_storage_names[0], dict): # Cannot handle dictionary case index_storage_names = [] index_names = list(index_storage_names) # make a copy index_storage_names2 = set(index_storage_names) column_names = [ name for (storage_name, name) in pairs if name not in index_storage_names2 ] else: # For newer PyArrows the storage names differ from the index names # iff it's an index level. Though this is a fragile assumption for # other systems... column_names = [name for (storage_name, name) in pairs2 if name == storage_name] storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully return index_names, column_names, storage_name_mapping, column_index_names def _normalize_index_columns(user_columns, data_columns, user_index, data_index): """Normalize user and file-provided column and index names Parameters ---------- user_columns : None, str or list of str data_columns : list of str user_index : None, str, or list of str data_index : list of str Returns ------- column_names : list of str index_names : list of str """ specified_columns = user_columns is not None specified_index = user_index is not None if user_columns is None: user_columns = list(data_columns) elif isinstance(user_columns, str): user_columns = [user_columns] else: user_columns = list(user_columns) if user_index is None: user_index = data_index elif user_index is False: # When index is False, use no index and all fields should be treated as # columns (unless `columns` provided). user_index = [] data_columns = data_index + data_columns elif isinstance(user_index, str): user_index = [user_index] else: user_index = list(user_index) if specified_index and not specified_columns: # Only `index` provided. Use specified index, and all column fields # that weren't specified as indices index_names = user_index column_names = [x for x in data_columns if x not in index_names] elif specified_columns and not specified_index: # Only `columns` provided. Use specified columns, and all index fields # that weren't specified as columns column_names = user_columns index_names = [x for x in data_index if x not in column_names] elif specified_index and specified_columns: # Both `index` and `columns` provided. Use as specified, but error if # they intersect. column_names = user_columns index_names = user_index if set(column_names).intersection(index_names): raise ValueError("Specified index and column names must not intersect") else: # Use default columns and index from the metadata column_names = data_columns index_names = data_index return column_names, index_names def _sort_and_analyze_paths(file_list, fs, root=False): file_list = sorted(file_list, key=natural_sort_key) base, fns = _analyze_paths(file_list, fs, root=root) return file_list, base, fns def _analyze_paths(file_list, fs, root=False): """Consolidate list of file-paths into parquet relative paths Note: This function was mostly copied from dask/fastparquet to use in both `FastParquetEngine` and `ArrowEngine`.""" def _join_path(*path): def _scrub(i, p): # Convert path to standard form # this means windows path separators are converted to linux p = p.replace(fs.sep, "/") if p == "": # empty path is assumed to be a relative path return "." if p[-1] == "/": # trailing slashes are not allowed p = p[:-1] if i > 0 and p[0] == "/": # only the first path can start with / p = p[1:] return p abs_prefix = "" if path and path[0]: if path[0][0] == "/": abs_prefix = "/" path = list(path) path[0] = path[0][1:] elif fs.sep == "\\" and path[0][1:].startswith(":/"): # If windows, then look for the "c:/" prefix abs_prefix = path[0][0:3] path = list(path) path[0] = path[0][3:] _scrubbed = [] for i, p in enumerate(path): _scrubbed.extend(_scrub(i, p).split("/")) simpler = [] for s in _scrubbed: if s == ".": pass elif s == "..": if simpler: if simpler[-1] == "..": simpler.append(s) else: simpler.pop() elif abs_prefix: raise Exception("can not get parent of root") else: simpler.append(s) else: simpler.append(s) if not simpler: if abs_prefix: joined = abs_prefix else: joined = "." else: joined = abs_prefix + ("/".join(simpler)) return joined path_parts_list = [_join_path(fn).split("/") for fn in file_list] if root is False: basepath = path_parts_list[0][:-1] for i, path_parts in enumerate(path_parts_list): j = len(path_parts) - 1 for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)): if base_part != path_part: j = k break basepath = basepath[:j] l = len(basepath) else: basepath = _join_path(root).split("/") l = len(basepath) assert all( p[:l] == basepath for p in path_parts_list ), "All paths must begin with the given root" out_list = [] for path_parts in path_parts_list: out_list.append( "/".join(path_parts[l:]) ) # use '/'.join() instead of _join_path to be consistent with split('/') return ( "/".join(basepath), out_list, ) # use '/'.join() instead of _join_path to be consistent with split('/') def _flatten_filters(filters): """Flatten DNF-formatted filters (list of tuples)""" return ( set(flatten(tuple(flatten(filters, container=list)), container=tuple)) if filters else [] ) def _aggregate_stats( file_path, file_row_group_stats, file_row_group_column_stats, stat_col_indices, ): """Utility to aggregate the statistics for N row-groups into a single dictionary. Used by `Engine._construct_parts` """ if len(file_row_group_stats) < 1: # Empty statistics return {} elif len(file_row_group_column_stats) == 0: assert len(file_row_group_stats) == 1 return file_row_group_stats[0] else: # Note: It would be better to avoid df_rgs and df_cols # construction altogether. It makes it fast to aggregate # the statistics for many row groups, but isn't # worthwhile for a small number of row groups. if len(file_row_group_stats) > 1: df_rgs = pd.DataFrame(file_row_group_stats) s = { "file_path_0": file_path, "num-rows": df_rgs["num-rows"].sum(), "total_byte_size": df_rgs["total_byte_size"].sum(), "columns": [], } else: s = { "file_path_0": file_path, "num-rows": file_row_group_stats[0]["num-rows"], "total_byte_size": file_row_group_stats[0]["total_byte_size"], "columns": [], } df_cols = None if len(file_row_group_column_stats) > 1: df_cols = pd.DataFrame(file_row_group_column_stats) for ind, name in enumerate(stat_col_indices): i = ind * 2 if df_cols is None: s["columns"].append( { "name": name, "min": file_row_group_column_stats[0][i], "max": file_row_group_column_stats[0][i + 1], } ) else: s["columns"].append( { "name": name, "min": df_cols.iloc[:, i].min(), "max": df_cols.iloc[:, i + 1].max(), } ) return s def _row_groups_to_parts( gather_statistics, split_row_groups, file_row_groups, file_row_group_stats, file_row_group_column_stats, stat_col_indices, make_part_func, make_part_kwargs, ): # Construct `parts` and `stats` parts = [] stats = [] if split_row_groups: # Create parts from each file, # limiting the number of row_groups in each piece split_row_groups = int(split_row_groups) for filename, row_groups in file_row_groups.items(): row_group_count = len(row_groups) for i in range(0, row_group_count, split_row_groups): i_end = i + split_row_groups rg_list = row_groups[i:i_end] part = make_part_func( filename, rg_list, **make_part_kwargs, ) if part is None: continue parts.append(part) if gather_statistics: stat = _aggregate_stats( filename, file_row_group_stats[filename][i:i_end], file_row_group_column_stats[filename][i:i_end], stat_col_indices, ) stats.append(stat) else: for filename, row_groups in file_row_groups.items(): part = make_part_func( filename, row_groups, **make_part_kwargs, ) if part is None: continue parts.append(part) if gather_statistics: stat = _aggregate_stats( filename, file_row_group_stats[filename], file_row_group_column_stats[filename], stat_col_indices, ) stats.append(stat) return parts, stats
""" Aggregate a list of metadata objects and optionally write out the final result as a _metadata file. Parameters ---------- meta_list: list List of metadata objects to be aggregated into a single metadata object, and optionally written to disk. The specific element type can be engine specific. fs: FileSystem out_path: str or None Directory to write the final _metadata file. If None is specified, the aggregated metadata will be returned, and nothing will be written to disk. Returns ------- If out_path is None, an aggregate metadata object is returned. Otherwise, None is returned. """ raise NotImplementedError()
test6.py
#!/usr/bin/env python3 import time import Adafruit_GPIO.SPI as SPI import Adafruit_SSD1306 # import RPi.GPIO as GPIO from signal import pause #import dht11 import math # from PIL import Image from PIL import ImageDraw from PIL import ImageFont #import subprocess # #import sys import board import pigpio import DHT import time import datetime # Raspberry Pi pin configuration: RST = None # on the PiOLED this pin isnt used # Note the following are only used with SPI: DC = 23 SPI_PORT = 0 SPI_DEVICE = 0 # 128x32 display with hardware I2C: disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST) # Initialize library. disp.begin() # Clear display. disp.clear() disp.display() # Create blank image for drawing. # Make sure to create image with mode '1' for 1-bit color. width = disp.width height = disp.height image = Image.new('1', (width, height)) # Get drawing object to draw on image. draw = ImageDraw.Draw(image) # Draw a black filled box to clear the image. draw.rectangle((0,0,width,height), outline=0, fill=0) # Draw some shapes. # First define some constants to allow easy resizing of shapes. padding = -2 top = padding bottom = height-padding # Move left to right keeping track of the current x position for drawing shapes. x = 0 # Load default font. font = ImageFont.load_default() LED_red = 20 num_readings = 0 time_even = 0.0 time_odd = 0.0 time_difference = 0.0 temperature = 0.0 humidity = 0.0 sensor = DHT.DHT11 pi = pigpio.pi() pin = 4 # Data - Pin 7 (BCM 4) s = DHT.sensor(pi, pin, model = sensor) #Magnet sensor GPIO_PIN = 16 def setup(): GPIO.setmode(GPIO.BCM) # Numbers GPIOs by physical location GPIO.setwarnings(False) GPIO.setup(LED_red,GPIO.OUT) GPIO.setup(GPIO_PIN, GPIO.IN, pull_up_down = GPIO.PUD_DOWN) GPIO.add_event_detect(GPIO_PIN, GPIO.RISING, callback=outputFunction, bouncetime=100) # function called when sensor is tripped def outputFunction(null):
GPIO.output(LED_red,GPIO.HIGH) num_readings += num_readings def loop(): while True: global temperature global humidity global GPIO_PIN GPIO.output(LED_red,GPIO.LOW) time.sleep(2) timestamp, gpio, status, temperature, humidity = s.read() #read DHT device # Draw a black filled box to clear the image. draw.rectangle((0,0,width,height), outline=0, fill=0) # Write two lines of text. draw.text((x, top), "Weather Station ", font=font, fill=255) draw.text((x, top+8), "Temperature: C " + str(temperature), font=font, fill=255) draw.text((x, top+16), "Wind Speed: kts ", font=font, fill=255) draw.text((x, top+25), "Humidity: % " + str(humidity), font=font, fill=255) # Display image. disp.image(image) disp.display() def destroy(): GPIO.cleanup() # Release resource if __name__ == '__main__': # Program start from here setup() try: loop() except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed. destroy()
global num_readings print("Sensor is blocked")
gen_assets.go
// Code generated by "esc -pkg frontend -o examples/hotrod/services/frontend/gen_assets.go -prefix examples/hotrod/services/frontend/web_assets examples/hotrod/services/frontend/web_assets"; DO NOT EDIT. package frontend import ( "bytes" "compress/gzip" "encoding/base64" "fmt" "io" "io/ioutil" "net/http" "os" "path" "sync" "time" ) type _escLocalFS struct{} var _escLocal _escLocalFS type _escStaticFS struct{} var _escStatic _escStaticFS type _escDirectory struct { fs http.FileSystem name string } type _escFile struct { compressed string size int64 modtime int64 local string isDir bool once sync.Once data []byte name string } func (_escLocalFS) Open(name string) (http.File, error) { f, present := _escData[path.Clean(name)] if !present { return nil, os.ErrNotExist } return os.Open(f.local) } func (_escStaticFS) prepare(name string) (*_escFile, error) { f, present := _escData[path.Clean(name)] if !present { return nil, os.ErrNotExist } var err error f.once.Do(func() { f.name = path.Base(name) if f.size == 0 { return } var gr *gzip.Reader b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) gr, err = gzip.NewReader(b64) if err != nil { return } f.data, err = ioutil.ReadAll(gr) }) if err != nil { return nil, err } return f, nil } func (fs _escStaticFS) Open(name string) (http.File, error) { f, err := fs.prepare(name) if err != nil { return nil, err } return f.File() } func (dir _escDirectory) Open(name string) (http.File, error) { return dir.fs.Open(dir.name + name) } func (f *_escFile) File() (http.File, error) { type httpFile struct { *bytes.Reader *_escFile } return &httpFile{ Reader: bytes.NewReader(f.data), _escFile: f, }, nil } func (f *_escFile) Close() error { return nil } func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { if !f.isDir { return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name) } fis, ok := _escDirs[f.local] if !ok { return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local) } limit := count if count <= 0 || limit > len(fis) { limit = len(fis) } if len(fis) == 0 && count > 0 { return nil, io.EOF } return fis[0:limit], nil } func (f *_escFile) Stat() (os.FileInfo, error) { return f, nil } func (f *_escFile) Name() string { return f.name } func (f *_escFile) Size() int64 { return f.size } func (f *_escFile) Mode() os.FileMode { return 0 } func (f *_escFile) ModTime() time.Time { return time.Unix(f.modtime, 0) } func (f *_escFile) IsDir() bool { return f.isDir } func (f *_escFile) Sys() interface{} { return f } // FS returns a http.Filesystem for the embedded assets. If useLocal is true, // the filesystem's contents are instead used. func FS(useLocal bool) http.FileSystem { if useLocal { return _escLocal } return _escStatic } // Dir returns a http.Filesystem for the embedded assets on a given prefix dir. // If useLocal is true, the filesystem's contents are instead used. func Dir(useLocal bool, name string) http.FileSystem { if useLocal { return _escDirectory{fs: _escLocal, name: name} } return _escDirectory{fs: _escStatic, name: name} } // FSByte returns the named file from the embedded assets. If useLocal is // true, the filesystem's contents are instead used. func FSByte(useLocal bool, name string) ([]byte, error) { if useLocal { f, err := _escLocal.Open(name) if err != nil
b, err := ioutil.ReadAll(f) _ = f.Close() return b, err } f, err := _escStatic.prepare(name) if err != nil { return nil, err } return f.data, nil } // FSMustByte is the same as FSByte, but panics if name is not present. func FSMustByte(useLocal bool, name string) []byte { b, err := FSByte(useLocal, name) if err != nil { panic(err) } return b } // FSString is the string version of FSByte. func FSString(useLocal bool, name string) (string, error) { b, err := FSByte(useLocal, name) return string(b), err } // FSMustString is the string version of FSMustByte. func FSMustString(useLocal bool, name string) string { return string(FSMustByte(useLocal, name)) } var _escData = map[string]*_escFile{ "/index.html": { name: "index.html", local: "examples/hotrod/services/frontend/web_assets/index.html", size: 3384, modtime: 1551364059, compressed: ` H4sIAAAAAAAC/9RX/1PbOBb/PX/FG23v7FywnRAoNMS54UiX0r0uvUC709vpD7L0YivYkivJgSzD/34j 2wkJlJv9cZsZQHrfP++LXhhntsgnHYBxgZYCy6g2aGNycXUZHB8fvgkG5JEraYExWQq8LZW2BJiSFqWN ya3gNos5LgXDoL7sgZDCCpoHhtEc40HY34OC3omiKrZJlUFd32mSY9xvnGVIuTsAjK2wOU7eKTu7nEIA M8HRwKWEKRZU8nHU8BtZw7QoLRjNYpJZW5pRFDHFMVx8q1CvQqaKqDkGw3AQDsJCyHBhyGQcNaqtnVzI G9CYx8TYVY4mQ7QEMo3zR7sFvWNcholS1lhNS3dx9jeEaBgOw6OIGfNIqx0yYwgIaTHVwq5iYjI6PD4I /vX5ixBXFz/jLwN+Xryfnd6sWPXu9N0sHe5fFp/Y7e2RksPZF54efKa9j8XVtfkj+uX18TLhbxfZQUWA aWWM0iIVMiZUKrkqVGXI/0nOnwWxeIph8V0I1+zw4j8i6e8ffVuuFlcf5u8Wlx/ov2/m1W+f7/579+mj PHt/epTvF2e//XpRnr8pzs+mx7fnv16wj9Oj6zv6MoTHArVgXF0mnbCqBId7KKhOhQysKkcwOCzvTuCh E2bKasWDpLJWSbiHknIuZDqC/b6TYJU2So+gVA6IPtk10v+ekVGmlqjh/rnuXOQW9QgSLdLMSjTGPz78 W9eZ+Kk1kav0hUh/sqJ8gVWDjVq0bjKi9WiME8VXbWm5WALLqTExcRNJhUTdln2XW6eL5qht8zsQcq5c drlYbuQZOkzrq5vGgZs/mIWX4TQcR9lgm3cwGWMxeTaWWEzGUXawJbkVhla35JHzHEIeFDwYgjuYInj9 RLZpgJLKZ1T3aY0kVkJiZQ2wPiS5YjewU07yXQOcWhqwylhVoI7JYH9IJjPKMsw9Az/nStMcpmhEKs04 cmE8QbKdy786uOGbfTK51qqAs0wxlVMrUP/wqI6GAzJ5T0sq0aCrFWr749fq8PURmZwW9A8hUzhT8zki zBQ1FvWfAff06nAKHhMrSjI5ywW7ASVh7a7e9EATtUSwCpTmqIECozp8ydDjM0fW2HOk/OnrEm0/LxvW OGqes85mUU06nXklmRVKwlzpgtpppam7+rw9dOG+A7CkGjjEsKZCBP6gX3/gHzBo/rzud09a2UoKayAG rxDSc0SNttISPlCbhVpVkvu8C71G7qTz0Ok4LZYLlPbTp4spxNuizZFKrgq/2/pzvpxOTo2d4bcKja3V +iedziuf1EuLdEP3xcsnX1Sl4RaT1oNnQPCRW3BayXRCoLftugfEbYOG1W3N7bZSN2SumP46eT4ubZOo nXB6vXU+9FaEu64C531Ha60z12iyM6ohhlf+K59s7TjSDUuNJUrue9vDVKsEjGpSL4ypMCW1LHPN3PRV GP6u8dsIvN4mop73td0krk28bsgykXON0u/+3v+6qeimaWPApQ0t1Sna0I2PQRuuuU7aLU/Urvr3bT96 C4op6iChaUpT9EbgGTRGKBl7T3Pv7a2TVfM2YXYAHpx1pqRROYa5Sv3W0ybGBOdKI8QwpRZDqW79mvUq pAt653sRb/Pxz83A1+7XyHrg/V0qabAm7zTdXoul9ThaH/ZqaoE2U3wE3vnba68hmYoxNGYEmxZxqdoD i3f2ylJbme4mPS50Ord1bncjr5+4LcDOxoZRz+R6HuNn80stDd9en27E1+3UjITXftkfJxMHtpaearFs 0jCOkglQrcXStY6QUMusffXAg7aNtivUFC+nFiVbNTy/hhU0dXHz7hXmq9eG9OAy9dA96bif+nnaPErj qPmH6X8BAAD//7STX3s4DQAA `, }, "/": { name: "/", local: `examples/hotrod/services/frontend/web_assets`, isDir: true, }, } var _escDirs = map[string][]os.FileInfo{ "examples/hotrod/services/frontend/web_assets": { _escData["/index.html"], }, }
{ return nil, err }
reader_test.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package zlib import ( "bytes" "io" "testing" ) type zlibTest struct { desc string raw string compressed []byte dict []byte err error } // Compare-to-golden test data was generated by the ZLIB example program at // http://www.zlib.net/zpipe.c var zlibTests = []zlibTest{ { "truncated empty", "", []byte{}, nil, io.ErrUnexpectedEOF, }, { "truncated dict", "", []byte{0x78, 0xbb}, []byte{0x00}, io.ErrUnexpectedEOF, }, { "truncated checksum", "", []byte{0x78, 0xbb, 0x00, 0x01, 0x00, 0x01, 0xca, 0x48, 0xcd, 0xc9, 0xc9, 0xd7, 0x51, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0x01, 0x04, 0x00, 0x00, 0xff, 0xff, }, []byte{0x00}, io.ErrUnexpectedEOF, }, { "empty", "", []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01}, nil, nil, }, { "goodbye", "goodbye, world", []byte{ 0x78, 0x9c, 0x4b, 0xcf, 0xcf, 0x4f, 0x49, 0xaa, 0x4c, 0xd5, 0x51, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0x01, 0x00, 0x28, 0xa5, 0x05, 0x5e, }, nil, nil, }, { "bad header", "", []byte{0x78, 0x9f, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01}, nil, ErrHeader, }, { "bad checksum", "", []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff}, nil, ErrChecksum, }, { "not enough data", "", []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00}, nil, io.ErrUnexpectedEOF, }, { "excess data is silently ignored", "", []byte{ 0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, 0x78, 0x9c, 0xff, }, nil, nil, }, { "dictionary", "Hello, World!\n", []byte{ 0x78, 0xbb, 0x1c, 0x32, 0x04, 0x27, 0xf3, 0x00, 0xb1, 0x75, 0x20, 0x1c, 0x45, 0x2e, 0x00, 0x24, 0x12, 0x04, 0x74, }, []byte{ 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x0a, }, nil, }, { "wrong dictionary", "", []byte{ 0x78, 0xbb, 0x1c, 0x32, 0x04, 0x27, 0xf3, 0x00, 0xb1, 0x75, 0x20, 0x1c, 0x45, 0x2e, 0x00, 0x24, 0x12, 0x04, 0x74, }, []byte{ 0x48, 0x65, 0x6c, 0x6c, }, ErrDictionary, }, } func TestDecompressor(t *testing.T)
{ b := new(bytes.Buffer) for _, tt := range zlibTests { in := bytes.NewReader(tt.compressed) zr, err := NewReaderDict(in, tt.dict) if err != nil { if err != tt.err { t.Errorf("%s: NewReader: %s", tt.desc, err) } continue } defer zr.Close() // Read and verify correctness of data. b.Reset() n, err := io.Copy(b, zr) if err != nil { if err != tt.err { t.Errorf("%s: io.Copy: %v want %v", tt.desc, err, tt.err) } continue } s := b.String() if s != tt.raw { t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.desc, n, s, len(tt.raw), tt.raw) } // Check for sticky errors. if n, err := zr.Read([]byte{0}); n != 0 || err != io.EOF { t.Errorf("%s: Read() = (%d, %v), want (0, io.EOF)", tt.desc, n, err) } if err := zr.Close(); err != nil { t.Errorf("%s: Close() = %v, want nil", tt.desc, err) } } }
availability_domain.go
// Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved. // This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. // Code generated. DO NOT EDIT. // Blockchain Platform Control Plane API // // Blockchain Platform Control Plane API // package blockchain import ( "github.com/oracle/oci-go-sdk/v36/common" ) // AvailabilityDomain Availability Domains type AvailabilityDomain struct { // Availability Domain Identifiers Ads AvailabilityDomainAdsEnum `mandatory:"false" json:"ads,omitempty"` } func (m AvailabilityDomain) String() string { return common.PointerString(m) } // AvailabilityDomainAdsEnum Enum with underlying type: string type AvailabilityDomainAdsEnum string // Set of constants representing the allowable values for AvailabilityDomainAdsEnum const ( AvailabilityDomainAdsAd1 AvailabilityDomainAdsEnum = "AD1" AvailabilityDomainAdsAd2 AvailabilityDomainAdsEnum = "AD2" AvailabilityDomainAdsAd3 AvailabilityDomainAdsEnum = "AD3" ) var mappingAvailabilityDomainAds = map[string]AvailabilityDomainAdsEnum{ "AD1": AvailabilityDomainAdsAd1, "AD2": AvailabilityDomainAdsAd2, "AD3": AvailabilityDomainAdsAd3, } // GetAvailabilityDomainAdsEnumValues Enumerates the set of values for AvailabilityDomainAdsEnum func
() []AvailabilityDomainAdsEnum { values := make([]AvailabilityDomainAdsEnum, 0) for _, v := range mappingAvailabilityDomainAds { values = append(values, v) } return values }
GetAvailabilityDomainAdsEnumValues
publishzipandmod.go
package _go import ( "net/http" "net/url" "strings" rthttpclient "github.com/cobalt77/jfrog-client-go/artifactory/httpclient" "github.com/cobalt77/jfrog-client-go/artifactory/services/utils" "github.com/cobalt77/jfrog-client-go/auth" "github.com/cobalt77/jfrog-client-go/utils/errorutils" "github.com/cobalt77/jfrog-client-go/utils/io/fileutils" "github.com/cobalt77/jfrog-client-go/utils/io/httputils" "github.com/cobalt77/jfrog-client-go/utils/version" ) func
() { register(&publishZipAndModApi{}) } const ArtifactoryMinSupportedVersionForInfoFile = "6.10.0" // Support for Artifactory 6.6.1 and above API type publishZipAndModApi struct { artifactoryVersion string clientDetails httputils.HttpClientDetails client *rthttpclient.ArtifactoryHttpClient } func (pwa *publishZipAndModApi) isCompatible(artifactoryVersion string) bool { propertiesApi := "6.6.1" version := version.NewVersion(artifactoryVersion) pwa.artifactoryVersion = artifactoryVersion return version.AtLeast(propertiesApi) } func (pwa *publishZipAndModApi) PublishPackage(params GoParams, client *rthttpclient.ArtifactoryHttpClient, ArtDetails auth.ServiceDetails) error { url, err := utils.BuildArtifactoryUrl(ArtDetails.GetUrl(), "api/go/"+params.GetTargetRepo(), make(map[string]string)) if err != nil { return err } pwa.clientDetails = ArtDetails.CreateHttpClientDetails() pwa.client = client moduleId := strings.Split(params.GetModuleId(), ":") // Upload zip file err = pwa.upload(params.GetZipPath(), moduleId[0], params.GetVersion(), params.GetProps(), ".zip", url) if err != nil { return err } // Upload mod file err = pwa.upload(params.GetModPath(), moduleId[0], params.GetVersion(), params.GetProps(), ".mod", url) if err != nil { return err } if version.NewVersion(pwa.artifactoryVersion).AtLeast(ArtifactoryMinSupportedVersionForInfoFile) && params.GetInfoPath() != "" { // Upload info file. This supported from Artifactory version 6.10.0 and above return pwa.upload(params.GetInfoPath(), moduleId[0], params.GetVersion(), params.GetProps(), ".info", url) } return nil } func addGoVersion(version string, urlPath *string) { *urlPath += ";go.version=" + url.QueryEscape(version) } // localPath - The location of the file on the file system. // moduleId - The name of the module for example github.com/cobalt77/jfrog-client-go. // version - The version of the project that being uploaded. // props - The properties to be assigned for each artifact // ext - The extension of the file: zip, mod, info. This extension will be joined with the version for the path. For example v1.2.3.info or v1.2.3.zip // urlPath - The url including the repository. For example: http://127.0.0.1/artifactory/api/go/go-local func (pwa *publishZipAndModApi) upload(localPath, moduleId, version, props, ext, urlPath string) error { err := CreateUrlPath(moduleId, version, props, ext, &urlPath) if err != nil { return err } addGoVersion(version, &urlPath) details, err := fileutils.GetFileDetails(localPath) if err != nil { return err } utils.AddChecksumHeaders(pwa.clientDetails.Headers, details) resp, _, err := pwa.client.UploadFile(localPath, urlPath, "", &pwa.clientDetails, GoUploadRetries, nil) if err != nil { return err } return errorutils.CheckResponseStatus(resp, http.StatusCreated) }
init
manifest_test.go
package config import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestLoadManifest(t *testing.T)
func TestLoadManifestWithDependencies(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/porter.yaml", Name) c.TestContext.AddTestDirectory("testdata/bundles", "bundles") require.NoError(t, c.LoadManifest()) assert.NotNil(t, c.Manifest) assert.Equal(t, []string{"helm", "exec"}, c.Manifest.Mixins) assert.Len(t, c.Manifest.Install, 2) installStep := c.Manifest.Install[0] assert.NotNil(t, installStep.Description) mixin := installStep.GetMixinName() assert.Equal(t, "helm", mixin) } func TestConfig_LoadManifest_BundleDependencyNotInstalled(t *testing.T) { c := NewTestConfig(t) c.TestContext.AddTestFile("testdata/missingdep.porter.yaml", Name) err := c.LoadManifest() require.Errorf(t, err, "bundle missingdep not installed in PORTER_HOME") } func TestAction_Validate_RequireMixinDeclaration(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/simple.porter.yaml", Name) err := c.LoadManifest() require.NoError(t, err) // Sabotage! c.Manifest.Mixins = []string{} err = c.Manifest.Install.Validate(c.Manifest) assert.EqualError(t, err, "mixin (exec) was not declared") } func TestAction_Validate_RequireMixinData(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/simple.porter.yaml", Name) err := c.LoadManifest() require.NoError(t, err) // Sabotage! c.Manifest.Install[0].Data = nil err = c.Manifest.Install.Validate(c.Manifest) assert.EqualError(t, err, "no mixin specified") } func TestAction_Validate_RequireSingleMixinData(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/simple.porter.yaml", Name) err := c.LoadManifest() require.NoError(t, err) // Sabotage! c.Manifest.Install[0].Data["rando-mixin"] = "" err = c.Manifest.Install.Validate(c.Manifest) assert.EqualError(t, err, "more than one mixin specified") } func TestResolveMapParam(t *testing.T) { m := &Manifest{ Parameters: []ParameterDefinition{ ParameterDefinition{ Name: "person", }, }, } os.Setenv("PERSON", "Ralpha") s := &Step{ Description: "a test step", Data: map[string]interface{}{ "Parameters": map[string]interface{}{ "Thing": map[string]interface{}{ "source": "bundle.parameters.person", }, }, }, } err := m.ResolveStep(s) require.NoError(t, err) pms, ok := s.Data["Parameters"].(map[string]interface{}) assert.True(t, ok) val, ok := pms["Thing"].(string) assert.True(t, ok) assert.Equal(t, "Ralpha", val) } func TestResolveMapParamUnknown(t *testing.T) { m := &Manifest{ Parameters: []ParameterDefinition{}, } s := &Step{ Description: "a test step", Data: map[string]interface{}{ "Parameters": map[string]interface{}{ "Thing": map[string]interface{}{ "source": "bundle.parameters.person", }, }, }, } err := m.ResolveStep(s) require.Error(t, err) assert.Equal(t, "unable to set value for Thing: no value found for source specification: bundle.parameters.person", err.Error()) } func TestResolveArrayUnknown(t *testing.T) { m := &Manifest{ Parameters: []ParameterDefinition{ ParameterDefinition{ Name: "name", }, }, } s := &Step{ Description: "a test step", Data: map[string]interface{}{ "Arguments": []string{ "source: bundle.parameters.person", }, }, } err := m.ResolveStep(s) require.Error(t, err) assert.Equal(t, "unable to source value: no value found for source specification: bundle.parameters.person", err.Error()) } func TestResolveArray(t *testing.T) { m := &Manifest{ Parameters: []ParameterDefinition{ ParameterDefinition{ Name: "person", }, }, } os.Setenv("PERSON", "Ralpha") s := &Step{ Description: "a test step", Data: map[string]interface{}{ "Arguments": []string{ "source: bundle.parameters.person", }, }, } err := m.ResolveStep(s) require.NoError(t, err) args, ok := s.Data["Arguments"].([]string) assert.True(t, ok) assert.Equal(t, "Ralpha", args[0]) } func TestDependency_Validate_NameRequired(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/porter.yaml", Name) c.TestContext.AddTestDirectory("testdata/bundles", "bundles") err := c.LoadManifest() require.NoError(t, err) // Sabotage! c.Manifest.Dependencies[0].Name = "" err = c.Manifest.Dependencies[0].Validate() assert.EqualError(t, err, "dependency name is required") } func TestManifest_MergeDependency(t *testing.T) { m := &Manifest{ Mixins: []string{"helm"}, Install: Steps{ &Step{Description: "install wordpress"}, }, Uninstall: Steps{ &Step{Description: "uninstall wordpress"}, }, } dep := &Dependency{ m: &Manifest{ Mixins: []string{"exec", "helm"}, Install: Steps{ &Step{Description: "install mysql"}, }, Uninstall: Steps{ &Step{Description: "uninstall mysql"}, }, Credentials: []CredentialDefinition{ {Name: "kubeconfig", Path: "/root/.kube/config"}, }, }, } err := m.MergeDependency(dep) require.NoError(t, err) assert.Equal(t, []string{"exec", "helm"}, m.Mixins) assert.Len(t, m.Install, 2) assert.Equal(t, "install mysql", m.Install[0].Description) assert.Equal(t, "install wordpress", m.Install[1].Description) assert.Len(t, m.Uninstall, 2) assert.Equal(t, "uninstall wordpress", m.Uninstall[0].Description) assert.Equal(t, "uninstall mysql", m.Uninstall[1].Description) assert.Len(t, m.Credentials, 1) } func TestMergeCredentials(t *testing.T) { testcases := []struct { name string c1, c2, wantResult CredentialDefinition wantError string }{ { name: "combine path and environment variable", c1: CredentialDefinition{Name: "foo", Path: "p1"}, c2: CredentialDefinition{Name: "foo", EnvironmentVariable: "v2"}, wantResult: CredentialDefinition{Name: "foo", Path: "p1", EnvironmentVariable: "v2"}, }, { name: "same path", c1: CredentialDefinition{Name: "foo", Path: "p"}, c2: CredentialDefinition{Name: "foo", Path: "p"}, wantResult: CredentialDefinition{Name: "foo", Path: "p"}, }, { name: "conflicting path", c1: CredentialDefinition{Name: "foo", Path: "p1"}, c2: CredentialDefinition{Name: "foo", Path: "p2"}, wantError: "cannot merge credential foo: conflict on path", }, { name: "same environment variable", c1: CredentialDefinition{Name: "foo", EnvironmentVariable: "v"}, c2: CredentialDefinition{Name: "foo", EnvironmentVariable: "v"}, wantResult: CredentialDefinition{Name: "foo", EnvironmentVariable: "v"}, }, { name: "conflicting environment variable", c1: CredentialDefinition{Name: "foo", EnvironmentVariable: "v1"}, c2: CredentialDefinition{Name: "foo", EnvironmentVariable: "v2"}, wantError: "cannot merge credential foo: conflict on environment variable", }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { result, err := mergeCredentials(tc.c1, tc.c2) if tc.wantError == "" { require.NoError(t, err) assert.Equal(t, tc.wantResult, result) } else { require.Contains(t, err.Error(), tc.wantError) } }) } } func TestManifest_ApplyBundleOutputs(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/simple.porter.yaml", Name) require.NoError(t, c.LoadManifest()) depStep := c.Manifest.Install[0] err := c.Manifest.ApplyOutputs(depStep, []string{"foo=bar"}) require.NoError(t, err) assert.Contains(t, c.Manifest.outputs, "foo") assert.Equal(t, "bar", c.Manifest.outputs["foo"]) } func TestManifest_ApplyDependencyOutputs(t *testing.T) { testcases := []struct { name string rawOutputs []string wantOutputs map[string]string wantError string }{ { name: "happy path", rawOutputs: []string{"host=localhost"}, wantOutputs: map[string]string{"host": "localhost"}, }, { name: "value with equals sign", rawOutputs: []string{"cert=abc123==="}, wantOutputs: map[string]string{"cert": "abc123==="}, }, { name: "missing equals sign", rawOutputs: []string{"foo"}, wantError: "invalid output assignment", }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/porter.yaml", Name) c.TestContext.AddTestDirectory("testdata/bundles", "bundles") require.NoError(t, c.LoadManifest()) depStep := c.Manifest.Install[0] err := c.Manifest.ApplyOutputs(depStep, tc.rawOutputs) if tc.wantError == "" { require.NoError(t, err) } else { require.Contains(t, err.Error(), tc.wantError) return } depM := c.Manifest.Dependencies[0].m for wantKey, wantValue := range tc.wantOutputs { assert.Contains(t, depM.outputs, wantKey) assert.Equal(t, wantValue, depM.outputs[wantKey]) } }) } } func TestManifest_resolveSource(t *testing.T) { testcases := []struct { name string outputs map[string]string source string wantResult interface{} wantError string }{ { name: "happy path", outputs: map[string]string{"foo": "bar"}, source: "bundle.outputs.foo", wantResult: "bar", }, { name: "missing output", outputs: map[string]string{"foo": "bar"}, source: "bundle.outputs.missing", wantError: "no value found for source specification: bundle.outputs.missing", }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { m := &Manifest{ outputs: tc.outputs, } result, err := m.resolveValue(tc.source) if tc.wantError == "" { require.NoError(t, err) } else { require.Contains(t, err.Error(), tc.wantError) return } assert.Equal(t, tc.wantResult, result) }) } } func TestManifest_MergeParameters(t *testing.T) { dep := &Dependency{ Name: "mysql", Parameters: map[string]string{"database": "wordpress"}, m: &Manifest{ Name: "mysql", Parameters: []ParameterDefinition{ {Name: "database"}, }, }, } m := &Manifest{ Name: "wordpress", Dependencies: []*Dependency{dep}, } err := m.MergeParameters(dep) require.NoError(t, err) require.Len(t, m.Parameters, 1) assert.Equal(t, "wordpress", m.Parameters[0].DefaultValue) }
{ c := NewTestConfig(t) c.SetupPorterHome() c.TestContext.AddTestFile("testdata/simple.porter.yaml", Name) require.NoError(t, c.LoadManifest()) assert.NotNil(t, c.Manifest) assert.Equal(t, []string{"exec"}, c.Manifest.Mixins) assert.Len(t, c.Manifest.Install, 1) installStep := c.Manifest.Install[0] assert.NotNil(t, installStep.Description) mixin := installStep.GetMixinName() assert.Equal(t, "exec", mixin) }
test_linked_list.py
import pytest from my_python_algorithms.data_structures.linked_list import LinkedList, Node def test_node(): n = Node(1) assert 1 == n.value assert None is n.next def test_empty_ll(): ll = LinkedList() assert None is ll.head def test_ll_with_head(): ll = LinkedList(1) assert 1 == ll.head.value def test_append_with_no_head(): ll = LinkedList() ll.append(1) assert 1 == ll.head.value def test_append(): ll = LinkedList(1) ll.append(2) assert 2 == ll.head.next.value def test_indexing_1(): ll = LinkedList(1) assert 1 == ll[0] def test_indexing_2(): ll = LinkedList(1) ll.append(2) assert 2 == ll[1] def
(): ll = LinkedList() with pytest.raises(IndexError): ll[0] def test_indexing_error_2(): ll = LinkedList(1) ll.append(2) with pytest.raises(IndexError): ll[2] def test_index(): ll = LinkedList(1) assert 0 == ll.index(1) def test_index_error_1(): ll = LinkedList() with pytest.raises(ValueError): ll.index(1) def test_index_error_1(): ll = LinkedList(1) with pytest.raises(ValueError): ll.index(2) def test_insert_head(): ll = LinkedList(1) ll.insert(0, "hello") assert "hello" == ll[0] def test_insert_1(): ll = LinkedList(1) ll.append(2) ll.append(3) ll.insert(1, "hello") assert 1 == ll[0] assert "hello" == ll[1] assert 2 == ll[2] assert 3 == ll[3] def test_insert_2(): ll = LinkedList(1) ll.insert(1, 'hey') assert 'hey' == ll[1] def test_insert_error_1(): ll = LinkedList() with pytest.raises(IndexError): ll.insert(1, 1) def test_insert_error_2(): ll = LinkedList(1) with pytest.raises(IndexError): ll.insert(2, 1) def test_insert_error_3(): ll = LinkedList(1) ll.append(2) ll.append(3) with pytest.raises(IndexError): ll.insert(4, "hey") def test_delete_head(): ll = LinkedList(1) ll.delete(0) assert None is ll.head def test_delete_1(): ll = LinkedList(1) ll.append(2) ll.delete(0) assert 2 == ll[0] with pytest.raises(IndexError): ll[1] def test_delete_error_1(): ll = LinkedList() with pytest.raises(IndexError): ll.delete(0) def test_delete_error_2(): ll = LinkedList(1) ll.append(2) with pytest.raises(IndexError): ll.delete(3)
test_indexing_error_1
decode.py
from functools import partial from pyais.exceptions import UnknownMessageException import typing import bitarray from pyais.constants import ( NavigationStatus, ManeuverIndicator, TransmitMode, EpfdType, ShipType, StationType, StationIntervals, NavAid ) from pyais.util import get_int, encode_bin_as_ascii6 def decode_msg_1(bit_arr: bitarray.bitarray) -> typing.Dict: """ AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access) Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'status': NavigationStatus(get_int_from_data(38, 42)), 'turn': get_int_from_data(42, 50, signed=True), 'speed': get_int_from_data(50, 60) / 10.0, 'accuracy': bit_arr[60], 'lon': get_int_from_data(61, 89, signed=True) / 600000.0, 'lat': get_int_from_data(89, 116, signed=True) / 600000.0, 'course': get_int_from_data(116, 128) * 0.1, 'heading': get_int_from_data(128, 137), 'second': get_int_from_data(137, 143), 'maneuver': ManeuverIndicator(get_int_from_data(143, 145)), 'raim': bit_arr[148], 'radio': get_int_from_data(149, bit_arr.length()), } def decode_msg_2(bit_arr: bitarray.bitarray) -> typing.Dict: """AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access) Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a """ return decode_msg_1(bit_arr) def decode_msg_3(bit_arr: bitarray.bitarray) -> typing.Dict: """ AIS Vessel position report using ITDMA (Incremental Time Division Multiple Access) Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a """ return decode_msg_1(bit_arr) def decode_msg_4(bit_arr: bitarray.bitarray) -> typing.Dict: """ AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access) Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_4_base_station_report """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'year': get_int_from_data(38, 52), 'month': get_int_from_data(52, 56), 'day': get_int_from_data(56, 61), 'hour': get_int_from_data(61, 66), 'minute': get_int_from_data(66, 72), 'second': get_int_from_data(72, 78), 'accuracy': bit_arr[78], 'lon': get_int_from_data(79, 107, signed=True) / 600000.0, 'lat': get_int_from_data(107, 134, signed=True) / 600000.0, 'epfd': EpfdType(get_int_from_data(134, 138)), 'raim': bit_arr[148], 'radio': get_int_from_data(148, len(bit_arr)), } def decode_msg_5(bit_arr: bitarray.bitarray) -> typing.Dict: """ Static and Voyage Related Data Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_5_static_and_voyage_related_data """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'ais_version': get_int_from_data(38, 40), 'imo': get_int_from_data(40, 70), 'callsign': encode_bin_as_ascii6(bit_arr[70:112]), 'shipname': encode_bin_as_ascii6(bit_arr[112:232]), 'shiptype': ShipType(get_int_from_data(232, 240)), 'to_bow': get_int_from_data(240, 249), 'to_stern': get_int_from_data(249, 258), 'to_port': get_int_from_data(258, 264), 'to_starboard': get_int_from_data(264, 270), 'epfd': EpfdType(get_int_from_data(270, 274)), 'month': get_int_from_data(274, 278), 'day': get_int_from_data(278, 283), 'hour': get_int_from_data(283, 288), 'minute': get_int_from_data(288, 294), 'draught': get_int_from_data(294, 302) / 10.0, 'destination': encode_bin_as_ascii6(bit_arr[302:422]), 'dte': bit_arr[-2] } def decode_msg_6(bit_arr: bitarray.bitarray) -> typing.Dict: """ Binary Addresses Message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_4_base_station_report """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'seqno': get_int_from_data(38, 40), 'dest_mmsi': get_int_from_data(40, 70), 'retransmit': bit_arr[70], 'dac': get_int_from_data(72, 82), 'fid': get_int_from_data(82, 88), 'data': bit_arr[88:].to01() } def decode_msg_7(bit_arr: bitarray.bitarray) -> typing.Dict: """ Binary Acknowledge Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_7_binary_acknowledge """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'mmsi1': get_int_from_data(40, 70), 'mmsiseq1': get_int_from_data(70, 72), 'mmsi2': get_int_from_data(72, 102), 'mmsiseq2': get_int_from_data(102, 104), 'mmsi3': get_int_from_data(104, 134), 'mmsiseq3': get_int_from_data(134, 136), 'mmsi4': get_int_from_data(136, 166), 'mmsiseq4': get_int_from_data(166, 168) } def decode_msg_8(bit_arr: bitarray.bitarray) -> typing.Dict: """ Binary Acknowledge Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_8_binary_broadcast_message """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'dac': get_int_from_data(40, 50), 'fid': get_int_from_data(50, 56), 'data': bit_arr[56:].to01() } def decode_msg_9(bit_arr: bitarray.bitarray) -> typing.Dict: """ Standard SAR Aircraft Position Report Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_9_standard_sar_aircraft_position_report """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'alt': get_int_from_data(38, 50), 'speed': get_int_from_data(50, 60), 'accuracy': bit_arr[60], 'lon': get_int_from_data(61, 89, signed=True) / 600000.0, 'lat': get_int_from_data(89, 116, signed=True) / 600000.0, 'course': get_int_from_data(116, 128) * 0.1, 'second': get_int_from_data(128, 134), 'dte': bit_arr[142], 'assigned': bit_arr[146], 'raim': bit_arr[147], 'radio': get_int_from_data(148, 168) } def decode_msg_10(bit_arr: bitarray.bitarray) -> typing.Dict: """ UTC/Date Inquiry Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_10_utc_date_inquiry """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'dest_mmsi': get_int_from_data(40, 70) } def decode_msg_11(bit_arr: bitarray.bitarray) -> typing.Dict: """ UTC/Date Response Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_11_utc_date_response """ return decode_msg_4(bit_arr) def decode_msg_12(bit_arr: bitarray.bitarray) -> typing.Dict: """ Addressed Safety-Related Message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_12_addressed_safety_related_message """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'seqno': get_int_from_data(38, 40), 'dest_mmsi': get_int_from_data(40, 70), 'retransmit': bit_arr[70], 'text': encode_bin_as_ascii6(bit_arr[72:]) } def decode_msg_13(bit_arr: bitarray.bitarray) -> typing.Dict: """ Identical to type 7 """ return decode_msg_7(bit_arr) def decode_msg_14(bit_arr: bitarray.bitarray) -> typing.Dict: """ Safety-Related Broadcast Message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_14_safety_related_broadcast_message """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'text': encode_bin_as_ascii6(bit_arr[40:]) } def decode_msg_15(bit_arr: bitarray.bitarray) -> typing.Dict: """ Interrogation Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_15_interrogation """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'mmsi1': get_int_from_data(40, 70), 'type1_1': get_int_from_data(70, 76), 'offset1_1': get_int_from_data(76, 88), 'type1_2': get_int_from_data(90, 96), 'offset1_2': get_int_from_data(96, 108), 'mmsi2': get_int_from_data(110, 140), 'type2_1': get_int_from_data(140, 146), 'offset2_1': get_int_from_data(146, 157), } def decode_msg_16(bit_arr: bitarray.bitarray) -> typing.Dict: """ Assignment Mode Command Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_16_assignment_mode_command """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'mmsi1': get_int_from_data(40, 70), 'offset1': get_int_from_data(70, 82), 'increment1': get_int_from_data(82, 92), 'mmsi2': get_int_from_data(92, 122), 'offset2': get_int_from_data(122, 134), 'increment2': get_int_from_data(134, 144) } def decode_msg_17(bit_arr: bitarray.bitarray) -> typing.Dict: """ DGNSS Broadcast Binary Message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_17_dgnss_broadcast_binary_message """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(6, 8), 'mmsi': get_int_from_data(8, 38), 'lon': get_int_from_data(40, 58, signed=True), 'lat': get_int_from_data(58, 75, signed=True), 'data': get_int_from_data(80, 816) } def decode_msg_18(bit_arr: bitarray.bitarray) -> typing.Dict: """ Standard Class B CS Position Report Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_18_standard_class_b_cs_position_report """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'speed': get_int_from_data(46, 56) * 0.1, 'accuracy': bit_arr[56], 'lon': get_int_from_data(57, 85, signed=True) / 600000.0, 'lat': get_int_from_data(85, 112, signed=True) / 600000.0, 'course': get_int_from_data(112, 124) * 0.1, 'heading': get_int_from_data(124, 133), 'second': get_int_from_data(133, 139), 'regional': get_int_from_data(139, 141), 'cs': bit_arr[141], 'display': bit_arr[142], 'dsc': bit_arr[143], 'band': bit_arr[144], 'msg22': bit_arr[145], 'assigned': bit_arr[146], 'raim': bit_arr[147], 'radio': get_int_from_data(148, len(bit_arr)), } def decode_msg_19(bit_arr: bitarray.bitarray) -> typing.Dict: """ Extended Class B CS Position Report Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_19_extended_class_b_cs_position_report """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'speed': get_int_from_data(46, 56) * 0.1, 'accuracy': bit_arr[56], 'lon': get_int_from_data(57, 85, signed=True) / 600000.0, 'lat': get_int_from_data(85, 112, signed=True) / 600000.0, 'course': get_int_from_data(112, 124) * 0.1, 'heading': get_int_from_data(124, 133), 'second': get_int_from_data(133, 139), 'regional': get_int_from_data(139, 143), 'shipname': encode_bin_as_ascii6(bit_arr[143:263]), 'shiptype': ShipType(get_int_from_data(263, 271)), 'to_bow': get_int_from_data(271, 280), 'to_stern': get_int_from_data(280, 289), 'to_port': get_int_from_data(289, 295), 'to_starboard': get_int_from_data(295, 301), 'epfd': EpfdType(get_int_from_data(301, 305)), 'raim': bit_arr[305], 'dte': bit_arr[306], 'assigned': bit_arr[307], } def decode_msg_20(bit_arr: bitarray.bitarray) -> typing.Dict: """ Data Link Management Message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_20_data_link_management_message """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'offset1': get_int_from_data(40, 52), 'number1': get_int_from_data(52, 56), 'timeout1': get_int_from_data(56, 59), 'increment1': get_int_from_data(59, 70), 'offset2': get_int_from_data(70, 82), 'number2': get_int_from_data(82, 86), 'timeout2': get_int_from_data(86, 89), 'increment2': get_int_from_data(89, 100), 'offset3': get_int_from_data(100, 112), 'number3': get_int_from_data(112, 116), 'timeout3': get_int_from_data(116, 119), 'increment3': get_int_from_data(110, 130), 'offset4': get_int_from_data(130, 142), 'number4': get_int_from_data(142, 146), 'timeout4': get_int_from_data(146, 149), 'increment4': get_int_from_data(149, 160), } def decode_msg_21(bit_arr: bitarray.bitarray) -> typing.Dict: """ Aid-to-Navigation Report Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_21_aid_to_navigation_report """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'aid_type': NavAid(get_int_from_data(38, 43)), 'name': encode_bin_as_ascii6(bit_arr[43:163]), 'accuracy': bit_arr[163], 'lon': get_int_from_data(164, 192, signed=True) / 600000.0, 'lat': get_int_from_data(192, 219, signed=True) / 600000.0, 'to_bow': get_int_from_data(219, 228), 'to_stern': get_int_from_data(228, 237), 'to_port': get_int_from_data(237, 243), 'to_starboard': get_int_from_data(243, 249), 'epfd': EpfdType(get_int_from_data(249, 253)), 'second': get_int_from_data(253, 259), 'off_position': bit_arr[259], 'regional': get_int_from_data(260, 268), 'raim': bit_arr[268], 'virtual_aid': bit_arr[269], 'assigned': bit_arr[270], 'name_extension': encode_bin_as_ascii6(bit_arr[272:]), } def decode_msg_22(bit_arr: bitarray.bitarray) -> typing.Dict: """ Channel Management Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_22_channel_management """ get_int_from_data = partial(get_int, bit_arr) data = { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'channel_a': get_int_from_data(40, 52), 'channel_b': get_int_from_data(52, 64), 'txrx': get_int_from_data(64, 68),
'zonesize': get_int_from_data(142, 145), } # Broadcast if data['addressed']: d = { 'dest1': get_int_from_data(69, 99), 'dest2': get_int_from_data(104, 134), } # Addressed else: d = { 'ne_lon': get_int_from_data(69, 87, signed=True) * 0.1, 'ne_lat': get_int_from_data(87, 104, signed=True) * 0.1, 'sw_lon': get_int_from_data(104, 122, signed=True) * 0.1, 'sw_lat': get_int_from_data(122, 139, signed=True) * 0.1, } data.update(d) return data def decode_msg_23(bit_arr: bitarray.bitarray) -> typing.Dict: """ Group Assignment Command Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_23_group_assignment_command """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'ne_lon': get_int_from_data(40, 58, signed=True) * 0.1, 'ne_lat': get_int_from_data(58, 75, signed=True) * 0.1, 'sw_lon': get_int_from_data(75, 93, signed=True) * 0.1, 'sw_lat': get_int_from_data(93, 110, signed=True) * 0.1, 'station_type': StationType(get_int_from_data(110, 114)), 'ship_type': ShipType(get_int_from_data(114, 122)), 'txrx': TransmitMode(get_int_from_data(144, 146)), 'interval': StationIntervals(get_int_from_data(146, 150)), 'quiet': get_int_from_data(150, 154), } def decode_msg_24(bit_arr: bitarray.bitarray) -> typing.Dict: """ Static Data Report Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_24_static_data_report """ get_int_from_data = partial(get_int, bit_arr) data: typing.Dict = { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'partno': get_int_from_data(38, 40) } if not data['partno']: # Part A d: typing.Dict = { 'shipname': encode_bin_as_ascii6(bit_arr[40: 160]) } else: # Part B d: typing.Dict = { 'shiptype': ShipType(get_int_from_data(40, 48)), 'vendorid': encode_bin_as_ascii6(bit_arr[48: 66]), 'model': get_int_from_data(66, 70), 'serial': get_int_from_data(70, 90), 'callsign': encode_bin_as_ascii6(bit_arr[90: 132]), 'to_bow': get_int_from_data(132, 141), 'to_stern': get_int_from_data(141, 150), 'to_port': get_int_from_data(150, 156), 'to_starboard': get_int_from_data(156, 162), 'mothership_mmsi': get_int_from_data(132, 162) } data.update(d) return data def decode_msg_25(bit_arr: bitarray.bitarray) -> typing.Dict: """ Single Slot Binary Message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_25_single_slot_binary_message NOTE: This message type is quite uncommon and I was not able find any real world occurrence of the type. Also documentation seems to vary. Use with caution. """ get_int_from_data = partial(get_int, bit_arr) data = { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'addressed': bit_arr[38], 'structured': bit_arr[39], } if data['addressed']: d = { 'dest_mmsi': get_int_from_data(40, 70), } data.update(d) lo_ix = 40 if data['addressed'] else 70 hi_ix = lo_ix + 16 if data['structured']: d = { 'app_id': get_int_from_data(lo_ix, hi_ix), 'data': bit_arr[hi_ix:].to01() } else: d = { 'data': bit_arr[lo_ix:].to01() } data.update(d) return data def decode_msg_26(bit_arr: bitarray.bitarray) -> typing.Dict: """ Multiple Slot Binary Message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_26_multiple_slot_binary_message NOTE: This message type is quite uncommon and I was not able find any real world occurrence of the type. Also documentation seems to vary. Use with caution. """ get_int_from_data = partial(get_int, bit_arr) radio_status_offset = len(bit_arr) - 20 data = { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'addressed': bit_arr[38], 'structured': bit_arr[39], 'radio': get_int_from_data(radio_status_offset, len(bit_arr)) } if data['addressed']: d = { 'dest_mmsi': get_int_from_data(40, 70), } data.update(d) lo_ix = 40 if data['addressed'] else 70 hi_ix = lo_ix + 16 if data['structured']: d = { 'app_id': get_int_from_data(lo_ix, hi_ix), 'data': bit_arr[hi_ix:radio_status_offset].to01() } else: d = { 'data': bit_arr[lo_ix:radio_status_offset].to01() } data.update(d) return data def decode_msg_27(bit_arr: bitarray.bitarray) -> typing.Dict: """ Long Range AIS Broadcast message Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_27_long_range_ais_broadcast_message """ get_int_from_data = partial(get_int, bit_arr) return { 'type': get_int_from_data(0, 6), 'repeat': get_int_from_data(8, 8), 'mmsi': get_int_from_data(8, 38), 'accuracy': bit_arr[38], 'raim': bit_arr[39], 'status': NavigationStatus(get_int_from_data(40, 44)), 'lon': get_int_from_data(44, 62, signed=True) / 600.0, 'lat': get_int_from_data(62, 79, signed=True) / 600.0, 'speed': get_int_from_data(79, 85), 'course': get_int_from_data(85, 94), 'gnss': bit_arr[94], } # Decoding Lookup Table DECODE_MSG = [ decode_msg_1, # there are messages with a zero (0) as an id. these seem to be the same as type 1 messages decode_msg_1, decode_msg_2, decode_msg_3, decode_msg_4, decode_msg_5, decode_msg_6, decode_msg_7, decode_msg_8, decode_msg_9, decode_msg_10, decode_msg_11, decode_msg_12, decode_msg_13, decode_msg_14, decode_msg_15, decode_msg_16, decode_msg_17, decode_msg_18, decode_msg_19, decode_msg_20, decode_msg_21, decode_msg_22, decode_msg_23, decode_msg_24, decode_msg_25, decode_msg_26, decode_msg_27, ] def _decode(msg) -> typing.Dict: """ Decodes a given NMEA message. """ try: return DECODE_MSG[msg.ais_id](msg.bit_array) except IndexError as e: raise UnknownMessageException(f"The message {msg} is not currently supported!") from e def decode(msg) -> typing.Dict: """ Decodes a given message. @param msg: A object of type NMEAMessage to decode """ return _decode(msg)
'power': bit_arr[68], 'addressed': bit_arr[139], 'band_a': bit_arr[140], 'band_b': bit_arr[141],
base_node_grpc_server.rs
// Copyright 2021. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ cmp, convert::{TryFrom, TryInto}, }; use either::Either; use futures::{channel::mpsc, SinkExt}; use log::*; use tari_app_grpc::{ tari_rpc, tari_rpc::{CalcType, Sorting}, }; use tari_app_utilities::consts; use tari_common_types::types::{Commitment, PublicKey, Signature}; use tari_comms::{Bytes, CommsNode}; use tari_core::{ base_node::{ comms_interface::CommsInterfaceError, state_machine_service::states::StateInfo, LocalNodeCommsInterface, StateMachineHandle, }, blocks::{Block, BlockHeader, NewBlockTemplate}, chain_storage::{ChainStorageError, PrunedOutput}, consensus::{emission::Emission, ConsensusDecoding, ConsensusEncoding, ConsensusManager, NetworkConsensus}, iterators::NonOverlappingIntegerPairIter, mempool::{service::LocalMempoolService, TxStorageResponse}, proof_of_work::PowAlgorithm, transactions::{aggregated_body::AggregateBody, transaction_components::Transaction}, }; use tari_p2p::{auto_update::SoftwareUpdaterHandle, services::liveness::LivenessHandle}; use tari_utilities::{hex::Hex, message_format::MessageFormat, ByteArray, Hashable}; use tokio::task; use tonic::{Request, Response, Status}; use crate::{ builder::BaseNodeContext, grpc::{ blocks::{block_fees, block_heights, block_size, GET_BLOCKS_MAX_HEIGHTS, GET_BLOCKS_PAGE_SIZE}, hash_rate::HashRateMovingAverage, helpers::{mean, median}, }, }; const LOG_TARGET: &str = "tari::base_node::grpc"; const GET_TOKENS_IN_CIRCULATION_MAX_HEIGHTS: usize = 1_000_000; const GET_TOKENS_IN_CIRCULATION_PAGE_SIZE: usize = 1_000; // The maximum number of difficulty ints that can be requested at a time. These will be streamed to the // client, so memory is not really a concern here, but a malicious client could request a large // number here to keep the node busy const GET_DIFFICULTY_MAX_HEIGHTS: u64 = 10_000; const GET_DIFFICULTY_PAGE_SIZE: usize = 1_000; // The maximum number of headers a client can request at a time. If the client requests more than // this, this is the maximum that will be returned. const LIST_HEADERS_MAX_NUM_HEADERS: u64 = 10_000; // The number of headers to request via the local interface at a time. These are then streamed to // client. const LIST_HEADERS_PAGE_SIZE: usize = 10; // The `num_headers` value if none is provided. const LIST_HEADERS_DEFAULT_NUM_HEADERS: u64 = 10; const BLOCK_TIMING_MAX_BLOCKS: u64 = 10_000; pub struct BaseNodeGrpcServer { node_service: LocalNodeCommsInterface, mempool_service: LocalMempoolService, network: NetworkConsensus, state_machine_handle: StateMachineHandle, consensus_rules: ConsensusManager, software_updater: SoftwareUpdaterHandle, comms: CommsNode, liveness: LivenessHandle, } impl BaseNodeGrpcServer { pub fn from_base_node_context(ctx: &BaseNodeContext) -> Self { Self { node_service: ctx.local_node(), mempool_service: ctx.local_mempool(), network: ctx.network().into(), state_machine_handle: ctx.state_machine(), consensus_rules: ctx.consensus_rules().clone(), software_updater: ctx.software_updater(), comms: ctx.base_node_comms().clone(), liveness: ctx.liveness(), } } } pub async fn get_heights( request: &tari_rpc::HeightRequest, handler: LocalNodeCommsInterface, ) -> Result<(u64, u64), Status> { block_heights(handler, request.start_height, request.end_height, request.from_tip).await } #[tonic::async_trait] impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { type FetchMatchingUtxosStream = mpsc::Receiver<Result<tari_rpc::FetchMatchingUtxosResponse, Status>>; type GetBlocksStream = mpsc::Receiver<Result<tari_rpc::HistoricalBlock, Status>>; type GetMempoolTransactionsStream = mpsc::Receiver<Result<tari_rpc::GetMempoolTransactionsResponse, Status>>; type GetNetworkDifficultyStream = mpsc::Receiver<Result<tari_rpc::NetworkDifficultyResponse, Status>>; type GetPeersStream = mpsc::Receiver<Result<tari_rpc::GetPeersResponse, Status>>; type GetTokensInCirculationStream = mpsc::Receiver<Result<tari_rpc::ValueAtHeightResponse, Status>>; type GetTokensStream = mpsc::Receiver<Result<tari_rpc::GetTokensResponse, Status>>; type ListAssetRegistrationsStream = mpsc::Receiver<Result<tari_rpc::ListAssetRegistrationsResponse, Status>>; type ListHeadersStream = mpsc::Receiver<Result<tari_rpc::BlockHeader, Status>>; type SearchKernelsStream = mpsc::Receiver<Result<tari_rpc::HistoricalBlock, Status>>; type SearchUtxosStream = mpsc::Receiver<Result<tari_rpc::HistoricalBlock, Status>>; async fn get_network_difficulty( &self, request: Request<tari_rpc::HeightRequest>, ) -> Result<Response<Self::GetNetworkDifficultyStream>, Status> { let request = request.into_inner(); debug!( target: LOG_TARGET, "Incoming GRPC request for GetNetworkDifficulty: from_tip: {:?} start_height: {:?} end_height: {:?}", request.from_tip, request.start_height, request.end_height ); let mut handler = self.node_service.clone(); let (start_height, end_height) = get_heights(&request, handler.clone()).await?; // Overflow safety: checked in get_heights let num_requested = end_height - start_height; if num_requested > GET_DIFFICULTY_MAX_HEIGHTS { return Err(Status::invalid_argument(format!( "Number of headers requested exceeds maximum. Expected less than {} but got {}", GET_DIFFICULTY_MAX_HEIGHTS, num_requested ))); } let (mut tx, rx) = mpsc::channel(cmp::min(num_requested as usize, GET_DIFFICULTY_PAGE_SIZE)); let mut sha3_hash_rate_moving_average = HashRateMovingAverage::new(PowAlgorithm::Sha3, self.consensus_rules.clone()); let mut monero_hash_rate_moving_average = HashRateMovingAverage::new(PowAlgorithm::Monero, self.consensus_rules.clone()); task::spawn(async move { let page_iter = NonOverlappingIntegerPairIter::new(start_height, end_height + 1, GET_DIFFICULTY_PAGE_SIZE); for (start, end) in page_iter { // headers are returned by height let headers = match handler.get_headers(start..=end).await { Ok(headers) => headers, Err(err) => { warn!(target: LOG_TARGET, "Base node service error: {:?}", err,); let _ = tx .send(Err(Status::internal("Internal error when fetching blocks"))) .await; return; }, }; if headers.is_empty() { let _network_difficulty_response = tx.send(Err(Status::invalid_argument(format!( "No blocks found within range {} - {}", start, end )))); return; } for chain_header in &headers { let current_difficulty = chain_header.accumulated_data().target_difficulty; let current_timestamp = chain_header.header().timestamp; let current_height = chain_header.header().height; let pow_algo = chain_header.header().pow.pow_algo; // update the moving average calculation with the header data let current_hash_rate_moving_average = match pow_algo { PowAlgorithm::Monero => &mut monero_hash_rate_moving_average, PowAlgorithm::Sha3 => &mut sha3_hash_rate_moving_average, }; current_hash_rate_moving_average.add(current_height, current_difficulty); let sha3_estimated_hash_rate = sha3_hash_rate_moving_average.average(); let monero_estimated_hash_rate = monero_hash_rate_moving_average.average(); let estimated_hash_rate = sha3_estimated_hash_rate + monero_estimated_hash_rate; let difficulty = tari_rpc::NetworkDifficultyResponse { difficulty: current_difficulty.as_u64(), estimated_hash_rate, sha3_estimated_hash_rate, monero_estimated_hash_rate, height: current_height, timestamp: current_timestamp.as_u64(), pow_algo: pow_algo.as_u64(), }; if let Err(err) = tx.send(Ok(difficulty)).await { warn!(target: LOG_TARGET, "Error sending difficulties via GRPC: {}", err); return; } } } }); debug!( target: LOG_TARGET, "Sending GetNetworkDifficulty response stream to client" ); Ok(Response::new(rx)) } async fn get_mempool_transactions( &self, request: Request<tari_rpc::GetMempoolTransactionsRequest>, ) -> Result<Response<Self::GetMempoolTransactionsStream>, Status> { let _request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for GetMempoolTransactions",); let mut mempool = self.mempool_service.clone(); let (mut tx, rx) = mpsc::channel(1000); task::spawn(async move { let transactions = match mempool.get_mempool_state().await { Err(err) => { warn!(target: LOG_TARGET, "Error communicating with base node: {}", err,); return; }, Ok(data) => data, }; for transaction in transactions.unconfirmed_pool { let transaction = match tari_rpc::Transaction::try_from(transaction) { Ok(t) => t, Err(e) => { warn!( target: LOG_TARGET, "Error sending converting transaction for GRPC: {}", e ); match tx.send(Err(Status::internal("Error converting transaction"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, }; match tx .send(Ok(tari_rpc::GetMempoolTransactionsResponse { transaction: Some(transaction), })) .await { Ok(_) => (), Err(err) => { warn!( target: LOG_TARGET, "Error sending mempool transaction via GRPC: {}", err ); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } }); debug!(target: LOG_TARGET, "Sending GetMempool response stream to client"); Ok(Response::new(rx)) } async fn list_headers( &self, request: Request<tari_rpc::ListHeadersRequest>, ) -> Result<Response<Self::ListHeadersStream>, Status> { let request = request.into_inner(); debug!( target: LOG_TARGET, "Incoming GRPC request for ListHeaders: from_height: {}, num_headers:{}, sorting:{}", request.from_height, request.num_headers, request.sorting ); let mut handler = self.node_service.clone(); let tip = match handler.get_metadata().await { Err(err) => { warn!(target: LOG_TARGET, "Error communicating with base node: {}", err,); return Err(Status::internal(err.to_string())); }, Ok(data) => data.height_of_longest_chain(), }; let sorting: Sorting = request.sorting(); let num_headers = match request.num_headers { 0 => LIST_HEADERS_DEFAULT_NUM_HEADERS, _ => request.num_headers, }; let num_headers = cmp::min(num_headers, LIST_HEADERS_MAX_NUM_HEADERS); let (mut tx, rx) = mpsc::channel(LIST_HEADERS_PAGE_SIZE); let from_height = cmp::min(request.from_height, tip); let (header_range, is_reversed) = if from_height == 0 { match sorting { Sorting::Desc => { let from = match tip.overflowing_sub(num_headers) { (_, true) => 0, (res, false) => res + 1, }; (from..=tip, true) }, Sorting::Asc => (0..=num_headers.saturating_sub(1), false), } } else { match sorting { Sorting::Desc => { let from = match from_height.overflowing_sub(num_headers) { (_, true) => 0, (res, false) => res + 1, }; (from..=from_height, true) }, Sorting::Asc => { let to = from_height.saturating_add(num_headers).saturating_sub(1); (from_height..=to, false) }, } }; task::spawn(async move { debug!( target: LOG_TARGET, "Starting base node request {}-{}", header_range.start(), header_range.end() ); let page_iter = NonOverlappingIntegerPairIter::new( *header_range.start(), *header_range.end() + 1, LIST_HEADERS_PAGE_SIZE, ); let page_iter = if is_reversed { Either::Left(page_iter.rev())
debug!(target: LOG_TARGET, "Page: {}-{}", start, end); // TODO: Better error handling let result_headers = match handler.get_headers(start..=end).await { Err(err) => { warn!(target: LOG_TARGET, "Internal base node service error: {}", err); return; }, Ok(data) => { if is_reversed { data.into_iter().rev().collect::<Vec<_>>() } else { data } }, }; let result_size = result_headers.len(); debug!(target: LOG_TARGET, "Result headers: {}", result_size); for header in result_headers { debug!(target: LOG_TARGET, "Sending block header: {}", header.height()); match tx.send(Ok(header.into_header().into())).await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending block header via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } } }); debug!(target: LOG_TARGET, "Sending ListHeaders response stream to client"); Ok(Response::new(rx)) } async fn get_tokens( &self, request: Request<tari_rpc::GetTokensRequest>, ) -> Result<Response<Self::GetTokensStream>, Status> { let request = request.into_inner(); debug!( target: LOG_TARGET, "Incoming GRPC request for GetTokens: asset_pub_key: {}, unique_ids: [{}]", request.asset_public_key.to_hex(), request .unique_ids .iter() .map(|s| s.to_hex()) .collect::<Vec<_>>() .join(",") ); let pub_key = PublicKey::from_bytes(&request.asset_public_key) .map_err(|err| Status::invalid_argument(format!("Asset public Key is not a valid public key:{}", err)))?; let mut handler = self.node_service.clone(); let (mut tx, rx) = mpsc::channel(50); task::spawn(async move { let asset_pub_key_hex = request.asset_public_key.to_hex(); debug!( target: LOG_TARGET, "Starting thread to process GetTokens: asset_pub_key: {}", asset_pub_key_hex, ); let tokens = match handler.get_tokens(pub_key, request.unique_ids).await { Ok(tokens) => tokens, Err(err) => { warn!(target: LOG_TARGET, "Error communicating with base node: {:?}", err,); let _get_token_response = tx.send(Err(Status::internal("Internal error"))); return; }, }; debug!( target: LOG_TARGET, "Found {} tokens for {}", tokens.len(), asset_pub_key_hex ); for (token, mined_height) in tokens { let features = match token.features.clone().try_into() { Ok(f) => f, Err(err) => { warn!(target: LOG_TARGET, "Could not convert features: {}", err,); let _get_token_response = tx.send(Err(Status::internal(format!("Could not convert features:{}", err)))); break; }, }; match tx .send(Ok(tari_rpc::GetTokensResponse { asset_public_key: token .features .parent_public_key .map(|pk| pk.to_vec()) .unwrap_or_default(), unique_id: token.features.unique_id.unwrap_or_default(), owner_commitment: token.commitment.to_vec(), mined_in_block: vec![], mined_height, script: token.script.as_bytes(), features: Some(features), })) .await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending token via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } }); Ok(Response::new(rx)) } async fn get_asset_metadata( &self, request: Request<tari_rpc::GetAssetMetadataRequest>, ) -> Result<Response<tari_rpc::GetAssetMetadataResponse>, Status> { let request = request.into_inner(); let mut handler = self.node_service.clone(); let metadata = handler .get_asset_metadata( PublicKey::from_bytes(&request.asset_public_key) .map_err(|_e| Status::invalid_argument("Not a valid asset public key"))?, ) .await .map_err(|e| Status::internal(e.to_string()))?; if let Some(m) = metadata { let mined_height = m.mined_height; let mined_in_block = m.header_hash.clone(); match m.output { PrunedOutput::Pruned { output_hash: _, witness_hash: _, } => return Err(Status::not_found("Output has been pruned")), PrunedOutput::NotPruned { output } => { if let Some(ref asset) = output.features.asset { const ASSET_METADATA_TEMPLATE_ID: u32 = 1; if asset.template_ids_implemented.contains(&ASSET_METADATA_TEMPLATE_ID) { // TODO: move to a better location, or better yet, have the grpc caller split the metadata let m = String::from_utf8(Vec::from(&output.features.metadata[1..])).unwrap(); let mut m = m .as_str() .split('|') .map(|s| s.to_string()) .collect::<Vec<String>>() .into_iter(); let name = m.next(); let description = m.next(); let image = m.next(); // TODO Perhaps this should just return metadata and have the client read the metadata in a // pattern described by the template return Ok(Response::new(tari_rpc::GetAssetMetadataResponse { name: name.unwrap_or_default(), description: description.unwrap_or_default(), image: image.unwrap_or_default(), owner_commitment: Vec::from(output.commitment.as_bytes()), features: Some(output.features.clone().into()), mined_height, mined_in_block, })); } } return Ok(Response::new(tari_rpc::GetAssetMetadataResponse { name: "".into(), description: "".into(), image: "".into(), owner_commitment: Vec::from(output.commitment.as_bytes()), features: Some(output.features.into()), mined_height, mined_in_block, })); }, }; // Err(Status::unknown("Could not find a matching arm")) } else { Err(Status::not_found("Could not find any utxo")) } } async fn list_asset_registrations( &self, request: Request<tari_rpc::ListAssetRegistrationsRequest>, ) -> Result<Response<Self::ListAssetRegistrationsStream>, Status> { let request = request.into_inner(); let mut handler = self.node_service.clone(); let (mut tx, rx) = mpsc::channel(50); task::spawn(async move { debug!( target: LOG_TARGET, "Starting thread to process ListAssetRegistrationsStream: {:?}", request, ); let start = request.offset as usize; let end = (request.offset + request.count) as usize; let outputs = match handler.get_asset_registrations(start..=end).await { Ok(outputs) => outputs, Err(err) => { warn!(target: LOG_TARGET, "Error communicating with base node: {:?}", err,); let _list_assest_registrations_response = tx.send(Err(Status::internal("Internal error"))); return; }, }; debug!(target: LOG_TARGET, "Found {} tokens", outputs.len(),); for output in outputs { let mined_height = output.mined_height; let header_hash = output.header_hash; let output = match output.output.into_unpruned_output() { Some(output) => output, None => { continue; }, }; let features = match output.features.clone().try_into() { Ok(f) => f, Err(err) => { warn!(target: LOG_TARGET, "Could not convert features: {}", err,); let _list_assest_registrations_response = tx.send(Err(Status::internal(format!("Could not convert features:{}", err)))); break; }, }; let response = tari_rpc::ListAssetRegistrationsResponse { asset_public_key: output .features .asset .map(|asset| asset.public_key.to_vec()) .unwrap_or_default(), unique_id: output.features.unique_id.unwrap_or_default(), owner_commitment: output.commitment.to_vec(), mined_in_block: header_hash, mined_height, script: output.script.as_bytes(), features: Some(features), }; if let Err(err) = tx.send(Ok(response)).await { // This error can only happen if the Receiver has dropped, meaning the request was // cancelled/disconnected warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", err); return; } } }); Ok(Response::new(rx)) } async fn get_new_block_template( &self, request: Request<tari_rpc::NewBlockTemplateRequest>, ) -> Result<Response<tari_rpc::NewBlockTemplateResponse>, Status> { let request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for get new block template"); trace!(target: LOG_TARGET, "Request {:?}", request); let algo: PowAlgorithm = (u64::try_from( (request.algo) .ok_or_else(|| Status::invalid_argument("No valid pow algo selected".to_string()))? .pow_algo, ) .unwrap()) .try_into() .map_err(|_| Status::invalid_argument("No valid pow algo selected".to_string()))?; let mut handler = self.node_service.clone(); let new_template = handler .get_new_block_template(algo, request.max_weight) .await .map_err(|e| { warn!( target: LOG_TARGET, "Could not get new block template: {}", e.to_string() ); Status::internal(e.to_string()) })?; let status_watch = self.state_machine_handle.get_status_info_watch(); let pow = algo as i32; let response = tari_rpc::NewBlockTemplateResponse { miner_data: Some(tari_rpc::MinerData { reward: new_template.reward.into(), target_difficulty: new_template.target_difficulty.as_u64(), total_fees: new_template.total_fees.into(), algo: Some(tari_rpc::PowAlgo { pow_algo: pow }), }), new_block_template: Some(new_template.try_into().map_err(Status::internal)?), initial_sync_achieved: (*status_watch.borrow()).bootstrapped, }; debug!(target: LOG_TARGET, "Sending GetNewBlockTemplate response to client"); Ok(Response::new(response)) } async fn get_new_block( &self, request: Request<tari_rpc::NewBlockTemplate>, ) -> Result<Response<tari_rpc::GetNewBlockResult>, Status> { let request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for get new block"); let block_template: NewBlockTemplate = request .try_into() .map_err(|s| Status::invalid_argument(format!("Invalid block template: {}", s)))?; let mut handler = self.node_service.clone(); let new_block = match handler.get_new_block(block_template).await { Ok(b) => b, Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { return Err(Status::invalid_argument(message)); }, Err(CommsInterfaceError::ChainStorageError(ChainStorageError::CannotCalculateNonTipMmr(msg))) => { let status = Status::with_details( tonic::Code::FailedPrecondition, msg, Bytes::from_static(b"CannotCalculateNonTipMmr"), ); return Err(status); }, Err(e) => return Err(Status::internal(e.to_string())), }; // construct response let block_hash = new_block.hash(); let mining_hash = new_block.header.merged_mining_hash(); let block: Option<tari_rpc::Block> = Some(new_block.try_into().map_err(Status::internal)?); let response = tari_rpc::GetNewBlockResult { block_hash, block, merge_mining_hash: mining_hash, }; debug!(target: LOG_TARGET, "Sending GetNewBlock response to client"); Ok(Response::new(response)) } async fn get_new_block_blob( &self, request: Request<tari_rpc::NewBlockTemplate>, ) -> Result<Response<tari_rpc::GetNewBlockBlobResult>, Status> { let request = request.into_inner(); debug!(target: LOG_TARGET, "Incoming GRPC request for get new block blob"); let block_template: NewBlockTemplate = request .try_into() .map_err(|s| Status::invalid_argument(format!("Invalid block template: {}", s)))?; let mut handler = self.node_service.clone(); let new_block = match handler.get_new_block(block_template).await { Ok(b) => b, Err(CommsInterfaceError::ChainStorageError(ChainStorageError::InvalidArguments { message, .. })) => { return Err(Status::invalid_argument(message)); }, Err(CommsInterfaceError::ChainStorageError(ChainStorageError::CannotCalculateNonTipMmr(msg))) => { let status = Status::with_details( tonic::Code::FailedPrecondition, msg, Bytes::from_static(b"CannotCalculateNonTipMmr"), ); return Err(status); }, Err(e) => return Err(Status::internal(e.to_string())), }; // construct response let block_hash = new_block.hash(); let mining_hash = new_block.header.merged_mining_hash(); let (header, block_body) = new_block.into_header_body(); let mut header_bytes = Vec::new(); let _ = header.consensus_encode(&mut header_bytes)?; let mut block_body_bytes = Vec::new(); let _ = block_body.consensus_encode(&mut block_body_bytes)?; let response = tari_rpc::GetNewBlockBlobResult { block_hash, header: header_bytes, block_body: block_body_bytes, merge_mining_hash: mining_hash, utxo_mr: header.output_mr, }; debug!(target: LOG_TARGET, "Sending GetNewBlockBlob response to client"); Ok(Response::new(response)) } async fn submit_block( &self, request: Request<tari_rpc::Block>, ) -> Result<Response<tari_rpc::SubmitBlockResponse>, Status> { let request = request.into_inner(); let block = Block::try_from(request) .map_err(|e| Status::invalid_argument(format!("Failed to convert arguments. Invalid block: {:?}", e)))?; let block_height = block.header.height; debug!(target: LOG_TARGET, "Miner submitted block: {}", block); info!( target: LOG_TARGET, "Received SubmitBlock #{} request from client", block_height ); let mut handler = self.node_service.clone(); let block_hash = handler .submit_block(block) .await .map_err(|e| Status::internal(e.to_string()))?; debug!( target: LOG_TARGET, "Sending SubmitBlock #{} response to client", block_height ); Ok(Response::new(tari_rpc::SubmitBlockResponse { block_hash })) } async fn submit_block_blob( &self, request: Request<tari_rpc::BlockBlobRequest>, ) -> Result<Response<tari_rpc::SubmitBlockResponse>, Status> { debug!(target: LOG_TARGET, "Received block blob from miner: {:?}", request); let request = request.into_inner(); debug!(target: LOG_TARGET, "request: {:?}", request); let mut header_bytes = request.header_blob.as_slice(); let mut body_bytes = request.body_blob.as_slice(); debug!(target: LOG_TARGET, "doing header"); let header = BlockHeader::consensus_decode(&mut header_bytes).map_err(|e| Status::internal(e.to_string()))?; debug!(target: LOG_TARGET, "doing body"); let body = AggregateBody::consensus_decode(&mut body_bytes).map_err(|e| Status::internal(e.to_string()))?; // let body = request.body_blob.try_into().unwrap(); let block = Block::new(header, body); let block_height = block.header.height; debug!(target: LOG_TARGET, "Miner submitted block: {}", block); info!( target: LOG_TARGET, "Received SubmitBlock #{} request from client", block_height ); let mut handler = self.node_service.clone(); let block_hash = handler .submit_block(block) .await .map_err(|e| Status::internal(e.to_string()))?; debug!( target: LOG_TARGET, "Sending SubmitBlock #{} response to client", block_height ); Ok(Response::new(tari_rpc::SubmitBlockResponse { block_hash })) } async fn submit_transaction( &self, request: Request<tari_rpc::SubmitTransactionRequest>, ) -> Result<Response<tari_rpc::SubmitTransactionResponse>, Status> { let request = request.into_inner(); let txn: Transaction = request .transaction .ok_or_else(|| Status::invalid_argument("Transaction is empty"))? .try_into() .map_err(|e| Status::invalid_argument(format!("Failed to convert arguments. Invalid transaction.{}", e)))?; debug!( target: LOG_TARGET, "Received SubmitTransaction request from client ({} kernels, {} outputs, {} inputs)", txn.body.kernels().len(), txn.body.outputs().len(), txn.body.inputs().len() ); let mut handler = self.mempool_service.clone(); let res = handler.submit_transaction(txn).await.map_err(|e| { error!(target: LOG_TARGET, "Error submitting:{}", e); Status::internal(e.to_string()) })?; let response = match res { TxStorageResponse::UnconfirmedPool => tari_rpc::SubmitTransactionResponse { result: tari_rpc::SubmitTransactionResult::Accepted.into(), }, TxStorageResponse::ReorgPool | TxStorageResponse::NotStoredAlreadySpent => { tari_rpc::SubmitTransactionResponse { result: tari_rpc::SubmitTransactionResult::AlreadyMined.into(), } }, TxStorageResponse::NotStored | TxStorageResponse::NotStoredOrphan | TxStorageResponse::NotStoredConsensus | TxStorageResponse::NotStoredTimeLocked => tari_rpc::SubmitTransactionResponse { result: tari_rpc::SubmitTransactionResult::Rejected.into(), }, }; debug!(target: LOG_TARGET, "Sending SubmitTransaction response to client"); Ok(Response::new(response)) } async fn transaction_state( &self, request: Request<tari_rpc::TransactionStateRequest>, ) -> Result<Response<tari_rpc::TransactionStateResponse>, Status> { let request = request.into_inner(); let excess_sig: Signature = request .excess_sig .ok_or_else(|| Status::invalid_argument("excess_sig not provided".to_string()))? .try_into() .map_err(|_| Status::invalid_argument("excess_sig could not be converted".to_string()))?; debug!( target: LOG_TARGET, "Received TransactionState request from client ({} excess_sig)", excess_sig .to_json() .unwrap_or_else(|_| "Failed to serialize signature".into()), ); let mut node_handler = self.node_service.clone(); let mut mem_handler = self.mempool_service.clone(); let base_node_response = node_handler .get_kernel_by_excess_sig(excess_sig.clone()) .await .map_err(|e| { error!(target: LOG_TARGET, "Error submitting query:{}", e); Status::internal(e.to_string()) })?; if !base_node_response.is_empty() { let response = tari_rpc::TransactionStateResponse { result: tari_rpc::TransactionLocation::Mined.into(), }; debug!( target: LOG_TARGET, "Sending Transaction state response to client {:?}", response ); return Ok(Response::new(response)); } // Base node does not yet know of kernel excess sig, lets ask the mempool let res = mem_handler .get_transaction_state_by_excess_sig(excess_sig.clone()) .await .map_err(|e| { error!(target: LOG_TARGET, "Error submitting query:{}", e); Status::internal(e.to_string()) })?; let response = match res { TxStorageResponse::UnconfirmedPool => tari_rpc::TransactionStateResponse { result: tari_rpc::TransactionLocation::Mempool.into(), }, TxStorageResponse::ReorgPool | TxStorageResponse::NotStoredAlreadySpent => { tari_rpc::TransactionStateResponse { result: tari_rpc::TransactionLocation::Unknown.into(), /* We return Unknown here as the mempool * should not think its mined, but the * node does not think it is. */ } }, TxStorageResponse::NotStored | TxStorageResponse::NotStoredConsensus | TxStorageResponse::NotStoredOrphan | TxStorageResponse::NotStoredTimeLocked => tari_rpc::TransactionStateResponse { result: tari_rpc::TransactionLocation::NotStored.into(), }, }; debug!( target: LOG_TARGET, "Sending Transaction state response to client {:?}", response ); Ok(Response::new(response)) } async fn get_peers( &self, _request: Request<tari_rpc::GetPeersRequest>, ) -> Result<Response<Self::GetPeersStream>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for get all peers"); let peers = self .comms .peer_manager() .all() .await .map_err(|e| Status::unknown(e.to_string()))?; let peers: Vec<tari_rpc::Peer> = peers.into_iter().map(|p| p.into()).collect(); let (mut tx, rx) = mpsc::channel(peers.len()); task::spawn(async move { for peer in peers { let response = tari_rpc::GetPeersResponse { peer: Some(peer) }; match tx.send(Ok(response)).await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending peer via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } }); debug!(target: LOG_TARGET, "Sending peers response to client"); Ok(Response::new(rx)) } async fn get_blocks( &self, request: Request<tari_rpc::GetBlocksRequest>, ) -> Result<Response<Self::GetBlocksStream>, Status> { let request = request.into_inner(); debug!( target: LOG_TARGET, "Incoming GRPC request for GetBlocks: {:?}", request.heights ); let mut heights = request.heights; if heights.is_empty() { return Err(Status::invalid_argument("heights cannot be empty")); } heights.truncate(GET_BLOCKS_MAX_HEIGHTS); heights.sort_unstable(); // unreachable panic: `heights` is not empty let start = *heights.first().expect("unreachable"); let end = *heights.last().expect("unreachable"); let mut handler = self.node_service.clone(); let (mut tx, rx) = mpsc::channel(GET_BLOCKS_PAGE_SIZE); task::spawn(async move { let page_iter = NonOverlappingIntegerPairIter::new(start, end + 1, GET_BLOCKS_PAGE_SIZE); for (start, end) in page_iter { let blocks = match handler.get_blocks(start..=end).await { Err(err) => { warn!( target: LOG_TARGET, "Error communicating with local base node: {:?}", err, ); return; }, Ok(data) => { // TODO: Change this interface to a start-end ranged one (clients like the block explorer // convert start end ranges to integer lists anyway) data.into_iter().filter(|b| heights.contains(&b.header().height)) }, }; for block in blocks { debug!( target: LOG_TARGET, "GetBlock GRPC sending block #{}", block.header().height ); match tx .send( block .try_into() .map_err(|err| Status::internal(format!("Could not provide block: {}", err))), ) .await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending header via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } } }); debug!(target: LOG_TARGET, "Sending GetBlocks response stream to client"); Ok(Response::new(rx)) } async fn get_tip_info( &self, _request: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::TipInfoResponse>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for BN tip data"); let mut handler = self.node_service.clone(); let meta = handler .get_metadata() .await .map_err(|e| Status::internal(e.to_string()))?; // Determine if we are bootstrapped let status_watch = self.state_machine_handle.get_status_info_watch(); let state: tari_rpc::BaseNodeState = (&(*status_watch.borrow()).state_info).into(); let response = tari_rpc::TipInfoResponse { metadata: Some(meta.into()), initial_sync_achieved: (*status_watch.borrow()).bootstrapped, base_node_state: state.into(), }; debug!(target: LOG_TARGET, "Sending MetaData response to client"); Ok(Response::new(response)) } async fn search_kernels( &self, request: Request<tari_rpc::SearchKernelsRequest>, ) -> Result<Response<Self::SearchKernelsStream>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for SearchKernels"); let request = request.into_inner(); let converted: Result<Vec<_>, _> = request.signatures.into_iter().map(|s| s.try_into()).collect(); let kernels = converted.map_err(|_| Status::internal("Failed to convert one or more arguments."))?; let mut handler = self.node_service.clone(); let (mut tx, rx) = mpsc::channel(GET_BLOCKS_PAGE_SIZE); task::spawn(async move { let blocks = match handler.get_blocks_with_kernels(kernels).await { Err(err) => { warn!( target: LOG_TARGET, "Error communicating with local base node: {:?}", err, ); return; }, Ok(data) => data, }; for block in blocks { match tx .send( block .try_into() .map_err(|err| Status::internal(format!("Could not provide block:{}", err))), ) .await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending header via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } }); debug!(target: LOG_TARGET, "Sending SearchKernels response stream to client"); Ok(Response::new(rx)) } async fn search_utxos( &self, request: Request<tari_rpc::SearchUtxosRequest>, ) -> Result<Response<Self::SearchUtxosStream>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for SearchUtxos"); let request = request.into_inner(); let converted: Result<Vec<_>, _> = request .commitments .into_iter() .map(|s| Commitment::from_bytes(&s)) .collect(); let outputs = converted.map_err(|_| Status::internal("Failed to convert one or more arguments."))?; let mut handler = self.node_service.clone(); let (mut tx, rx) = mpsc::channel(GET_BLOCKS_PAGE_SIZE); task::spawn(async move { let blocks = match handler.fetch_blocks_with_utxos(outputs).await { Err(err) => { warn!( target: LOG_TARGET, "Error communicating with local base node: {:?}", err, ); return; }, Ok(data) => data, }; for block in blocks { match tx .send( block .try_into() .map_err(|err| Status::internal(format!("Could not provide block:{}", err))), ) .await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending header via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } }); debug!(target: LOG_TARGET, "Sending SearchUtxos response stream to client"); Ok(Response::new(rx)) } #[allow(clippy::useless_conversion)] async fn fetch_matching_utxos( &self, request: Request<tari_rpc::FetchMatchingUtxosRequest>, ) -> Result<Response<Self::FetchMatchingUtxosStream>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for FetchMatchingUtxos"); let request = request.into_inner(); let converted: Result<Vec<_>, _> = request.hashes.into_iter().map(|s| s.try_into()).collect(); let hashes = converted.map_err(|_| Status::internal("Failed to convert one or more arguments."))?; let mut handler = self.node_service.clone(); let (mut tx, rx) = mpsc::channel(GET_BLOCKS_PAGE_SIZE); task::spawn(async move { let outputs = match handler.fetch_matching_utxos(hashes).await { Err(err) => { warn!( target: LOG_TARGET, "Error communicating with local base node: {:?}", err, ); return; }, Ok(data) => data, }; for output in outputs { match tx .send(Ok(tari_rpc::FetchMatchingUtxosResponse { output: Some(output.into()), })) .await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending output via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } }); debug!( target: LOG_TARGET, "Sending FindMatchingUtxos response stream to client" ); Ok(Response::new(rx)) } async fn get_block_timing( &self, request: Request<tari_rpc::HeightRequest>, ) -> Result<Response<tari_rpc::BlockTimingResponse>, Status> { let request = request.into_inner(); debug!( target: LOG_TARGET, "Incoming GRPC request for GetBlockTiming: from_tip: {:?} start_height: {:?} end_height: {:?}", request.from_tip, request.start_height, request.end_height ); let mut handler = self.node_service.clone(); let (start, end) = get_heights(&request, handler.clone()).await?; let num_requested = end.saturating_sub(start); if num_requested > BLOCK_TIMING_MAX_BLOCKS { warn!( target: LOG_TARGET, "GetBlockTiming request for too many blocks. Requested: {}. Max: {}.", num_requested, BLOCK_TIMING_MAX_BLOCKS ); return Err(Status::invalid_argument("Max request size exceeded.")); } let headers = match handler.get_headers(start..=end).await { Ok(headers) => headers.into_iter().map(|h| h.into_header()).rev().collect::<Vec<_>>(), Err(err) => { warn!(target: LOG_TARGET, "Error getting headers for GRPC client: {}", err); Vec::new() }, }; let (max, min, avg) = BlockHeader::timing_stats(&headers); let response = tari_rpc::BlockTimingResponse { max, min, avg }; debug!(target: LOG_TARGET, "Sending GetBlockTiming response to client"); Ok(Response::new(response)) } async fn get_constants( &self, _request: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::ConsensusConstants>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for GetConstants",); debug!(target: LOG_TARGET, "Sending GetConstants response to client"); // TODO: Switch to request height Ok(Response::new( self.network.create_consensus_constants().pop().unwrap().into(), )) } async fn get_block_size( &self, request: Request<tari_rpc::BlockGroupRequest>, ) -> Result<Response<tari_rpc::BlockGroupResponse>, Status> { get_block_group(self.node_service.clone(), request, BlockGroupType::BlockSize).await } async fn get_block_fees( &self, request: Request<tari_rpc::BlockGroupRequest>, ) -> Result<Response<tari_rpc::BlockGroupResponse>, Status> { get_block_group(self.node_service.clone(), request, BlockGroupType::BlockFees).await } async fn get_version(&self, _request: Request<tari_rpc::Empty>) -> Result<Response<tari_rpc::StringValue>, Status> { Ok(Response::new(consts::APP_VERSION.to_string().into())) } async fn check_for_updates( &self, _request: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::SoftwareUpdate>, Status> { let mut resp = tari_rpc::SoftwareUpdate::default(); if let Some(ref update) = *self.software_updater.new_update_notifier().borrow() { resp.has_update = true; resp.version = update.version().to_string(); resp.sha = update.to_hash_hex(); resp.download_url = update.download_url().to_string(); } Ok(Response::new(resp)) } async fn get_tokens_in_circulation( &self, request: Request<tari_rpc::GetBlocksRequest>, ) -> Result<Response<Self::GetTokensInCirculationStream>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for GetTokensInCirculation",); let request = request.into_inner(); let mut heights = request.heights; heights = heights .drain(..cmp::min(heights.len(), GET_TOKENS_IN_CIRCULATION_MAX_HEIGHTS)) .collect(); let consensus_manager = ConsensusManager::builder(self.network.as_network()).build(); let (mut tx, rx) = mpsc::channel(GET_TOKENS_IN_CIRCULATION_PAGE_SIZE); task::spawn(async move { let mut page: Vec<u64> = heights .drain(..cmp::min(heights.len(), GET_TOKENS_IN_CIRCULATION_PAGE_SIZE)) .collect(); while !page.is_empty() { // TODO: This is not ideal. The main issue here is the interface to get_tokens_in_circulation includes // blocks at any height to be selected instead of a more coherent start - end range. This means we // cannot use the Emission iterator as intended and instead, must query the supply at a // given height for each block (the docs mention to use the iterator instead of supply_at_block in a // loop, however the Iterator was not exposed at the time this handler was written). let values: Vec<tari_rpc::ValueAtHeightResponse> = page .clone() .into_iter() .map(|height| tari_rpc::ValueAtHeightResponse { height, value: consensus_manager.emission_schedule().supply_at_block(height).into(), }) .collect(); let result_size = values.len(); for value in values { match tx.send(Ok(value)).await { Ok(_) => (), Err(err) => { warn!(target: LOG_TARGET, "Error sending value via GRPC: {}", err); match tx.send(Err(Status::unknown("Error sending data"))).await { Ok(_) => (), Err(send_err) => { warn!(target: LOG_TARGET, "Error sending error to GRPC client: {}", send_err) }, } return; }, } } if result_size < GET_TOKENS_IN_CIRCULATION_PAGE_SIZE { break; } page = heights .drain(..cmp::min(heights.len(), GET_TOKENS_IN_CIRCULATION_PAGE_SIZE)) .collect(); } }); debug!(target: LOG_TARGET, "Sending GetTokensInCirculation response to client"); Ok(Response::new(rx)) } async fn get_sync_progress( &self, _request: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::SyncProgressResponse>, Status> { let state = self .state_machine_handle .get_status_info_watch() .borrow() .state_info .clone(); let response = match state { StateInfo::HeaderSync(None) => tari_rpc::SyncProgressResponse { tip_height: 0, local_height: 0, state: tari_rpc::SyncState::HeaderStarting.into(), }, StateInfo::HeaderSync(Some(info)) => tari_rpc::SyncProgressResponse { tip_height: info.tip_height, local_height: info.local_height, state: tari_rpc::SyncState::Header.into(), }, StateInfo::BlockSyncStarting => tari_rpc::SyncProgressResponse { tip_height: 0, local_height: 0, state: tari_rpc::SyncState::BlockStarting.into(), }, StateInfo::BlockSync(info) => tari_rpc::SyncProgressResponse { tip_height: info.tip_height, local_height: info.local_height, state: tari_rpc::SyncState::Block.into(), }, _ => tari_rpc::SyncProgressResponse { tip_height: 0, local_height: 0, state: if state.is_synced() { tari_rpc::SyncState::Done.into() } else { tari_rpc::SyncState::Startup.into() }, }, }; Ok(Response::new(response)) } async fn get_sync_info( &self, _request: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::SyncInfoResponse>, Status> { debug!(target: LOG_TARGET, "Incoming GRPC request for BN sync data"); let response = self .state_machine_handle .get_status_info_watch() .borrow() .state_info .get_block_sync_info() .map(|info| { let node_ids = info.sync_peer.node_id().to_string().into_bytes(); tari_rpc::SyncInfoResponse { tip_height: info.tip_height, local_height: info.local_height, peer_node_id: vec![node_ids], } }) .unwrap_or_default(); debug!(target: LOG_TARGET, "Sending SyncData response to client"); Ok(Response::new(response)) } async fn get_header_by_hash( &self, request: Request<tari_rpc::GetHeaderByHashRequest>, ) -> Result<Response<tari_rpc::BlockHeaderResponse>, Status> { let tari_rpc::GetHeaderByHashRequest { hash } = request.into_inner(); let mut node_service = self.node_service.clone(); let hash_hex = hash.to_hex(); let block = node_service .get_block_by_hash(hash) .await .map_err(|err| Status::internal(err.to_string()))?; match block { Some(block) => { let (block, acc_data, confirmations, _) = block.dissolve(); let total_block_reward = self .consensus_rules .calculate_coinbase_and_fees(block.header.height, block.body.kernels()); let resp = tari_rpc::BlockHeaderResponse { difficulty: acc_data.achieved_difficulty.into(), num_transactions: block.body.kernels().len() as u32, confirmations, header: Some(block.header.into()), reward: total_block_reward.into(), }; Ok(Response::new(resp)) }, None => Err(Status::not_found(format!("Header not found with hash `{}`", hash_hex))), } } async fn get_header_by_height( &self, request: Request<tari_rpc::GetHeaderByHeightRequest>, ) -> Result<Response<tari_rpc::BlockHeaderResponse>, Status> { let tari_rpc::GetHeaderByHeightRequest { height } = request.into_inner(); let mut node_service = self.node_service.clone(); let block = node_service .get_block(height) .await .map_err(|err| Status::internal(err.to_string()))?; match block { Some(block) => { let (block, acc_data, confirmations, _) = block.dissolve(); let total_block_reward = self .consensus_rules .calculate_coinbase_and_fees(block.header.height, block.body.kernels()); let resp = tari_rpc::BlockHeaderResponse { difficulty: acc_data.achieved_difficulty.into(), num_transactions: block.body.kernels().len() as u32, confirmations, header: Some(block.header.into()), reward: total_block_reward.into(), }; Ok(Response::new(resp)) }, None => Err(Status::not_found(format!("Header not found with height `{}`", height))), } } async fn identify(&self, _: Request<tari_rpc::Empty>) -> Result<Response<tari_rpc::NodeIdentity>, Status> { let identity = self.comms.node_identity_ref(); Ok(Response::new(tari_rpc::NodeIdentity { public_key: identity.public_key().to_vec(), public_address: identity.public_address().to_string(), node_id: identity.node_id().to_vec(), })) } async fn get_network_status( &self, _: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::NetworkStatusResponse>, Status> { let status = self .comms .connectivity() .get_connectivity_status() .await .map_err(|err| Status::internal(err.to_string()))?; let latency = self .liveness .clone() .get_network_avg_latency() .await .map_err(|err| Status::internal(err.to_string()))?; let resp = tari_rpc::NetworkStatusResponse { status: tari_rpc::ConnectivityStatus::from(status) as i32, avg_latency_ms: latency .map(|l| u32::try_from(l.as_millis()).unwrap_or(u32::MAX)) .unwrap_or(0), num_node_connections: status.num_connected_nodes() as u32, }; Ok(Response::new(resp)) } async fn list_connected_peers( &self, _: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::ListConnectedPeersResponse>, Status> { let mut connectivity = self.comms.connectivity(); let peer_manager = self.comms.peer_manager(); let connected_peers = connectivity .get_active_connections() .await .map_err(|err| Status::internal(err.to_string()))?; let mut peers = Vec::with_capacity(connected_peers.len()); for peer in connected_peers { peers.push( peer_manager .find_by_node_id(peer.peer_node_id()) .await .map_err(|err| Status::internal(err.to_string()))? .ok_or_else(|| Status::not_found(format!("Peer {} not found", peer.peer_node_id())))?, ); } let resp = tari_rpc::ListConnectedPeersResponse { connected_peers: peers.into_iter().map(Into::into).collect(), }; Ok(Response::new(resp)) } async fn get_mempool_stats( &self, _: Request<tari_rpc::Empty>, ) -> Result<Response<tari_rpc::MempoolStatsResponse>, Status> { let mut mempool_handle = self.mempool_service.clone(); let mempool_stats = mempool_handle.get_mempool_stats().await.map_err(|e| { error!(target: LOG_TARGET, "Error submitting query:{}", e); Status::internal(e.to_string()) })?; let response = tari_rpc::MempoolStatsResponse { total_txs: mempool_stats.total_txs as u64, unconfirmed_txs: mempool_stats.unconfirmed_txs as u64, reorg_txs: mempool_stats.reorg_txs as u64, total_weight: mempool_stats.total_weight, }; Ok(Response::new(response)) } } enum BlockGroupType { BlockFees, BlockSize, } async fn get_block_group( mut handler: LocalNodeCommsInterface, request: Request<tari_rpc::BlockGroupRequest>, block_group_type: BlockGroupType, ) -> Result<Response<tari_rpc::BlockGroupResponse>, Status> { let request = request.into_inner(); let calc_type_response = request.calc_type; let calc_type: CalcType = request.calc_type(); let height_request: tari_rpc::HeightRequest = request.into(); debug!( target: LOG_TARGET, "Incoming GRPC request for GetBlockSize: from_tip: {:?} start_height: {:?} end_height: {:?}", height_request.from_tip, height_request.start_height, height_request.end_height ); let (start, end) = get_heights(&height_request, handler.clone()).await?; let blocks = match handler.get_blocks(start..=end).await { Err(err) => { warn!( target: LOG_TARGET, "Error communicating with local base node: {:?}", err, ); vec![] }, Ok(data) => data, }; let extractor = match block_group_type { BlockGroupType::BlockFees => block_fees, BlockGroupType::BlockSize => block_size, }; let values = blocks.iter().map(extractor).collect::<Vec<u64>>(); let value = match calc_type { CalcType::Median => median(values).map(|v| vec![v]), CalcType::Mean => mean(values).map(|v| vec![v]), CalcType::Quantile => return Err(Status::unimplemented("Quantile has not been implemented")), CalcType::Quartile => return Err(Status::unimplemented("Quartile has not been implemented")), } .unwrap_or_default(); debug!( target: LOG_TARGET, "Sending GetBlockSize response to client: {:?}", value ); Ok(Response::new(tari_rpc::BlockGroupResponse { value, calc_type: calc_type_response, })) }
} else { Either::Right(page_iter) }; for (start, end) in page_iter {
ng-loading-indicator.module.ts
import { NgModule } from '@angular/core'; import { ModalManagerModule } from '@browninglogic/ng-modal'; import { LoadingIndicatorComponent } from './components/loading-indicator.component'; @NgModule({ imports: [ ModalManagerModule ], declarations: [ LoadingIndicatorComponent ], exports: [ LoadingIndicatorComponent ] }) export class
{ }
NgLoadingIndicatorModule
stats.rs
use std::collections::HashMap; use std::iter::FromIterator; use std::path::PathBuf; use crate::diff; use crate::diff::Event; use crate::entry::Entry; pub struct Stats<'a> { pub added: Vec<&'a Entry>, pub removed: Vec<&'a Entry>, pub updated: Vec<&'a Entry>, pub updated_bitrot: Vec<&'a Entry>, pub moved: HashMap<PathBuf, &'a Entry>, pub unchanged: Vec<&'a Entry>, pub total: u64, } impl<'a> Stats<'a> { pub fn modified(&self) -> bool { !self.added.is_empty() || !self.removed.is_empty() || !self.updated.is_empty() || !self.updated_bitrot.is_empty() || !self.moved.is_empty() } pub fn iter_new(&self) -> impl Iterator<Item=&'a Entry> { let moved_entries: Vec<&'a Entry> = self.moved.values().copied().collect(); self.added.clone().into_iter(). chain(self.unchanged.clone().into_iter()). chain(self.updated.clone().into_iter()). chain(self.updated_bitrot.clone().into_iter()). chain(moved_entries) } fn compute_moved(&mut self) { let mut removed = self.removed.iter(). map(|&e| (e.hash.clone(), e)). collect::<HashMap<String, &'a Entry>>(); let mut added = Vec::new(); for &a in &self.added { match removed.remove_entry(&a.hash) { Some((_, val)) => { self.moved.insert(val.path.clone(), a); } None => { added.push(a); } } } let mut removed = Vec::new(); for &r in &self.removed { if self.moved.get(&r.path).is_none() { removed.push(r); } } self.added = added; self.removed = removed } } impl<'a> FromIterator<Event<'a, Entry>> for Stats<'a> { fn from_iter<T: IntoIterator<Item=Event<'a, Entry>>>(iter: T) -> Self { let mut stats = Stats { added: Vec::new(), removed: Vec::new(), updated: Vec::new(), updated_bitrot: Vec::new(), unchanged: Vec::new(), moved: HashMap::new(), total: 0, }; for event in iter { match event {
stats.added.push(new); stats.total += 1; } diff::Event::REMOVED { old } => { stats.removed.push(old); } diff::Event::UPDATED { old, new } if old.modified == new.modified && !Entry::compare_hash(old, new) => { stats.updated_bitrot.push(new); stats.total += 1; } diff::Event::UPDATED { old: _, new } => { stats.updated.push(new); stats.total += 1; } diff::Event::UNCHANGED { old, new: _ } => { stats.unchanged.push(old); stats.total += 1; } } } stats.compute_moved(); stats } } #[cfg(test)] mod tests { use anyhow::Result; use diff::Event; use super::*; fn given_entry(name: &str) -> Entry { let hash = format!("{} hash", name); return Entry { path: PathBuf::from(name), hash, len: 123, modified: 123, }; } #[test] fn test_modified_empty() -> Result<()> { // When let stats = Stats { added: vec![], removed: vec![], updated: vec![], updated_bitrot: vec![], moved: Default::default(), unchanged: vec![], total: 0, }; // Then assert_eq!(stats.modified(), false); Ok(()) } #[test] fn test_modified_unchanged() -> Result<()> { // Given let entry = given_entry("file.txt"); // When let stats = Stats { added: vec![], removed: vec![], updated: vec![], updated_bitrot: vec![], moved: Default::default(), unchanged: vec![&entry], total: 1, }; // Then assert_eq!(stats.modified(), false); Ok(()) } #[test] fn test_modified_added() -> Result<()> { // Given let entry = given_entry("file.txt"); // When let stats = Stats { added: vec![&entry], removed: vec![], updated: vec![], updated_bitrot: vec![], moved: Default::default(), unchanged: vec![], total: 1, }; // Then assert_eq!(stats.modified(), true); Ok(()) } #[test] fn test_modified_removed() -> Result<()> { // Given let entry = given_entry("file.txt"); // When let stats = Stats { added: vec![], removed: vec![&entry], updated: vec![], updated_bitrot: vec![], moved: Default::default(), unchanged: vec![], total: 1, }; // Then assert_eq!(stats.modified(), true); Ok(()) } #[test] fn test_modified_updated() -> Result<()> { // Given let entry = given_entry("file.txt"); // When let stats = Stats { added: vec![], removed: vec![], updated: vec![&entry], updated_bitrot: vec![], moved: Default::default(), unchanged: vec![], total: 1, }; // Then assert_eq!(stats.modified(), true); Ok(()) } #[test] fn test_modified_bitrot() -> Result<()> { // Given let entry = given_entry("file.txt"); // When let stats = Stats { added: vec![], removed: vec![], updated: vec![], updated_bitrot: vec![&entry], moved: Default::default(), unchanged: vec![], total: 1, }; // Then assert_eq!(stats.modified(), true); Ok(()) } #[test] fn test_modified_moved() -> Result<()> { // Given let entry = given_entry("file.txt"); let mut moved_files: HashMap<PathBuf, &Entry> = HashMap::new(); moved_files.insert(PathBuf::from("old-file.txt"), &entry); // When let stats = Stats { added: vec![], removed: vec![], updated: vec![], updated_bitrot: vec![], moved: moved_files, unchanged: vec![], total: 1, }; // Then assert_eq!(stats.modified(), true); Ok(()) } #[test] fn test_iter_new() -> Result<()> { // Given let added_entry = given_entry("added.txt"); let removed_entry = given_entry("removed.txt"); let updated_entry = given_entry("updated.txt"); let bitrot_entry = given_entry("bitrot.txt"); let moved_entry = given_entry("moved.txt"); let unchanged_entry = given_entry("unchanged.txt"); let mut moved_files: HashMap<PathBuf, &Entry> = HashMap::new(); moved_files.insert(PathBuf::from("old-file.txt"), &moved_entry); // When let stats = Stats { added: vec![&added_entry], removed: vec![&removed_entry], updated: vec![&updated_entry], updated_bitrot: vec![&bitrot_entry], moved: moved_files, unchanged: vec![&unchanged_entry], total: 6, }; let entries: Vec<&Entry> = stats.iter_new().collect(); // Then assert_eq!(entries, vec![&added_entry, &unchanged_entry, &updated_entry, &bitrot_entry, &moved_entry]); Ok(()) } #[test] fn test_from_iter_added() -> Result<()> { // Given let added_entry_1 = given_entry("new1.txt"); let added_entry_2 = given_entry("new2.txt"); let events = vec![ Event::ADDED { new: &added_entry_1 }, Event::ADDED { new: &added_entry_2 }, ]; // When let stats = Stats::from_iter(events); // Then assert_eq!(stats.total, 2); assert_eq!(stats.added, vec![&added_entry_1, &added_entry_2]); Ok(()) } #[test] fn test_from_iter_removed() -> Result<()> { // Given let removed_entry_1 = given_entry("removed1.txt"); let removed_entry_2 = given_entry("removed2.txt"); let events = vec![ Event::REMOVED { old: &removed_entry_1 }, Event::REMOVED { old: &removed_entry_2 }, ]; // When let stats = Stats::from_iter(events); // Then assert_eq!(stats.total, 0); assert_eq!(stats.removed, vec![&removed_entry_1, &removed_entry_2]); Ok(()) } #[test] fn test_from_iter_updated() -> Result<()> { // Given let updated_entry_old = given_entry("updated.txt"); let updated_entry_new = Entry { path: PathBuf::from("updated.txt"), hash: String::from("updated.txt new hash"), len: 456, modified: 234, }; let updated_entry_with_bitrot_old = given_entry("bitrot.txt"); let updated_entry_with_bitrot_new = Entry { path: PathBuf::from("bitrot.txt"), hash: String::from("bitrot new hash"), len: 123, modified: 123, }; let events = vec![ Event::UPDATED { old: &updated_entry_old, new: &updated_entry_new }, Event::UPDATED { old: &updated_entry_with_bitrot_old, new: &updated_entry_with_bitrot_new }, ]; // When let stats = Stats::from_iter(events); // Then assert_eq!(stats.total, 2); assert_eq!(stats.updated, vec![&updated_entry_new]); assert_eq!(stats.updated_bitrot, vec![&updated_entry_with_bitrot_new]); Ok(()) } #[test] fn test_from_iter_unchanged() -> Result<()> { // Given let unchanged_entry_1 = given_entry("unchanged_1.txt"); let unchanged_entry_2 = given_entry("unchanged_2.txt"); let events = vec![ Event::UNCHANGED { old: &unchanged_entry_1, new: &unchanged_entry_1, }, Event::UNCHANGED { old: &unchanged_entry_2, new: &unchanged_entry_2, }, ]; // When let stats = Stats::from_iter(events); // Then assert_eq!(stats.total, 2); assert_eq!(stats.unchanged, vec![&unchanged_entry_1, &unchanged_entry_2]); Ok(()) } #[test] fn test_from_iter_moved() -> Result<()> { // When let moved_entry_1_from = Entry { path: PathBuf::from("moved_1_from.txt"), hash: String::from("moved file 1 hash"), len: 123, modified: 123, }; let moved_entry_1a_to = Entry { path: PathBuf::from("moved_1a_to.txt"), hash: String::from("moved file 1 hash"), len: 123, modified: 123, }; let moved_entry_1b_to = Entry { path: PathBuf::from("moved_1b_to.txt"), hash: String::from("moved file 1 hash"), len: 123, modified: 123, }; let moved_entry_2a_from = Entry { path: PathBuf::from("moved_2a_from.txt"), hash: String::from("moved file 2 hash"), len: 123, modified: 123, }; let moved_entry_2b_from = Entry { path: PathBuf::from("moved_2b_from.txt"), hash: String::from("moved file 2 hash"), len: 123, modified: 123, }; let moved_entry_2_to = Entry { path: PathBuf::from("moved_2_to.txt"), hash: String::from("moved file 2 hash"), len: 123, modified: 123, }; let events = vec![ Event::REMOVED { old: &moved_entry_2b_from }, Event::REMOVED { old: &moved_entry_2a_from }, Event::ADDED { new: &moved_entry_1a_to }, Event::ADDED { new: &moved_entry_1b_to }, Event::ADDED { new: &moved_entry_2_to }, Event::REMOVED { old: &moved_entry_1_from }, ]; // When let stats = Stats::from_iter(events); // Then assert_eq!(stats.total, 3); assert_eq!(stats.moved.len(), 2); assert_eq!(stats.moved.get(moved_entry_1_from.path.as_path()), Some(&&moved_entry_1a_to)); assert_eq!(stats.moved.get(moved_entry_2a_from.path.as_path()), Some(&&moved_entry_2_to)); assert_eq!(stats.added, vec![&moved_entry_1b_to]); assert_eq!(stats.removed, vec![&moved_entry_2b_from]); Ok(()) } }
diff::Event::ADDED { new } => {
bitcoin_ja_JP.ts
<TS language="ja_JP" version="2.1"> <context> <name>AddressBookPage</name> <message> <source>Right-click to edit address or label</source> <translation>編集するためにアドレスもしくはラベルを右クリックします</translation> </message> <message> <source>Create a new address</source> <translation>新しいアドレスを作成します</translation> </message> <message> <source>&amp;New</source> <translation>新規</translation> </message> <message> <source>Copy the currently selected address to the system clipboard</source> <translation>クリップボードに現在選択されているアドレスをコピーします</translation> </message> <message> <source>&amp;Copy</source> <translation>&amp;コピー</translation> </message> <message> <source>C&amp;lose</source> <translation>C&amp;失敗</translation> </message> <message> <source>Delete the currently selected address from the list</source> <translation>リストから現在選択中のアドレスを削除します</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>ファイルの現在のタブを出力します</translation> </message> <message> <source>&amp;Export</source> <translation>&amp;出力</translation> </message> <message> <source>&amp;Delete</source> <translation>&amp;削除</translation> </message> <message> <source>Choose the address to send coins to</source> <translation>送付するコインのアドレスを選択</translation> </message> <message> <source>Choose the address to receive coins with</source> <translation>受け取るコインのアドレスを選択</translation> </message> <message> <source>C&amp;hoose</source> <translation>C&amp;選択</translation> </message> <message> <source>Sending addresses</source> <translation>アドレス送信</translation> </message> <message> <source>Receiving addresses</source> <translation>アドレス受信</translation> </message> <message> <source>These are your Dancoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>支払送信するためのビットコインアドレスです。コインを送付する前に、いつも残高と受信アドレスの確認をしてください。</translation> </message> <message> <source>These are your Dancoin addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source> <translation>支払の受信をするためのビットコインアドレスです。それぞれの処理に新規に受信アドレスを使用することを推奨します。</translation> </message> <message> <source>&amp;Copy Address</source> <translation>&amp;アドレスのコピー</translation> </message> <message> <source>Copy &amp;Label</source> <translation>コピー&amp;ラベル</translation> </message> <message> <source>&amp;Edit</source> <translation>&amp;編集</translation> </message> <message> <source>Export Address List</source> <translation>アドレス一覧の出力</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>カンマ区切りのファイル(*.csv)</translation> </message> <message> <source>Exporting Failed</source> <translation>出力の失敗</translation> </message> <message> <source>There was an error trying to save the address list to %1. Please try again.</source> <translation>%1のため、アドレス一覧の保存中のエラーが発生しました。もう一度、実行してください。</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <source>Label</source> <translation>ラベル</translation> </message> <message> <source>Address</source> <translation>アドレス</translation> </message> <message> <source>(no label)</source> <translation>(ラベルなし)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <source>Passphrase Dialog</source> <translation>パスフレーズダイヤログ</translation> </message> <message> <source>Enter passphrase</source> <translation>パスフレーズの入力</translation> </message> <message> <source>New passphrase</source> <translation>新規のパスフレーズ</translation> </message> <message> <source>Repeat new passphrase</source> <translation>新規パスフレーズの繰り返し</translation> </message> <message> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;ten or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>財布に新規パスフレーズの入力をします。&lt;br/&gt;パスフレーズを使ってください &lt;b&gt;10以上のランダム文字列&lt;/b&gt;もしくは &lt;b&gt;8以上の単語&lt;/b&gt;.</translation> </message> <message> <source>Encrypt wallet</source> <translation>財布を暗号化します</translation> </message> <message> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>この操作は、財布のロックを解除するために財布のパスフレーズが必要です。</translation> </message> <message> <source>Unlock wallet</source> <translation>財布のロック解除</translation> </message> <message> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>この操作は、財布を複合化するために財布のパスフレーズが必要です。</translation> </message> <message> <source>Decrypt wallet</source> <translation>財布の復号化</translation> </message> <message> <source>Change passphrase</source> <translation>パスフレーズの変更</translation> </message> <message> <source>Enter the old passphrase and new passphrase to the wallet.</source> <translation>財布に古いパスフレーズと新規パスフレーズを入力します。</translation> </message> <message> <source>Confirm wallet encryption</source> <translation>財布の暗号化を確認します</translation> </message> <message> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR DANCOINS&lt;/b&gt;!</source> <translation>注意: 財布の暗号化やパスフレーズを忘れた場合、 &lt;b&gt;あなたのビットコインはすべて失われます。&lt;/b&gt;!</translation> </message> <message> <source>Are you sure you wish to encrypt your wallet?</source> <translation>財布を暗号化してもよろしいですか?</translation> </message> <message> <source>Wallet encrypted</source> <translation>暗号化された財布</translation> </message> <message> <source>%1 will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your dancoins from being stolen by malware infecting your computer.</source> <translation>暗号化処理を終了させるために、すぐに%1を閉じるでしょう。あなたのコンピュータに感染したマルウェアによって、盗まれたビットコインは暗号化をしていても完全に守ることができないことを覚えておいてください。</translation> </message> <message> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>重要: 以前の財布ファイルのバックアップは、新しく作成された暗号化された財布ファイルに置き換えられるべきです。セキュリティの観点から、暗号化されていない以前の財布ファイルは、新しく暗号化された財布が利用開始になり次第、間もなく使用できなくなります。</translation> </message> <message> <source>Wallet encryption failed</source> <translation>財布の暗号化に失敗しました。</translation> </message> <message> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>内部エラーにより財布の暗号化に失敗しました。財布は暗号化されていません。</translation> </message> <message> <source>The supplied passphrases do not match.</source> <translation>提供されたパスフレーズは一致しません。</translation> </message> <message> <source>Wallet unlock failed</source> <translation>財布のロック解除に失敗しました。</translation> </message> <message> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>財布の復号化のために入力されたパスフレーズが間違っています。</translation> </message> <message> <source>Wallet decryption failed</source> <translation>財布の復号化に失敗しました。</translation> </message> <message> <source>Wallet passphrase was successfully changed.</source> <translation>財布パスフレーズの変更に成功しました。</translation> </message> <message> <source>Warning: The Caps Lock key is on!</source> <translation>注意: CapsLockキーが有効になっています!</translation> </message> </context> <context> <name>BanTableModel</name> <message> <source>IP/Netmask</source> <translation>IPアドレス/ネットマスク</translation> </message> <message> <source>Banned Until</source> <translation>まで禁止</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <source>Sign &amp;message...</source> <translation>サイン &amp;メッセージ...</translation> </message> <message> <source>Synchronizing with network...</source> <translation>ネットワークで同期中...</translation> </message> <message> <source>&amp;Overview</source> <translation>&amp;概要</translation> </message> <message> <source>Node</source> <translation>ノード</translation> </message> <message> <source>Show general overview of wallet</source> <translation>財布の一般概要を表示</translation> </message> <message> <source>&amp;Transactions</source> <translation>&amp;処理</translation> </message> <message> <source>Browse transaction history</source> <translation>処理の履歴を見る</translation> </message> <message> <source>E&amp;xit</source> <translation>退出</translation> </message> <message> <source>Quit application</source> <translation>アプリケーションを終了</translation> </message> <message> <source>&amp;About %1</source> <translation>&amp;%1について</translation> </message> <message> <source>Show information about %1</source> <translation>%1についての情報を見る</translation> </message> <message> <source>About &amp;Qt</source> <translation>&amp;Qtについて</translation> </message> <message> <source>Show information about Qt</source> <translation>Qtについての情報を見る</translation> </message> <message> <source>&amp;Options...</source> <translation>&amp;オプション...</translation> </message> <message> <source>Modify configuration options for %1</source> <translation>%1のオプション設定を変更する</translation> </message> <message> <source>&amp;Encrypt Wallet...</source> <translation>&amp;財布の暗号化...</translation> </message> <message> <source>&amp;Backup Wallet...</source> <translation>&amp;財布のバックアップ...</translation> </message> <message> <source>&amp;Change Passphrase...</source> <translation>&amp;パスフレーズの変更...</translation> </message> <message> <source>&amp;Sending addresses...</source> <translation>&amp;アドレスの送信...</translation> </message> <message> <source>&amp;Receiving addresses...</source> <translation>&amp;アドレスの受信...</translation> </message> <message> <source>Open &amp;URI...</source> <translation>オープン&amp;URI...</translation> </message> <message> <source>Click to disable network activity.</source> <translation>ネットワーク処理を無効にするためにクリックする</translation> </message> <message> <source>Network activity disabled.</source> <translation>無効化されたネットワーク</translation> </message> <message> <source>Click to enable network activity again.</source> <translation>再度、ネットワーク処理を有効化するために、クリック</translation> </message> <message> <source>Syncing Headers (%1%)...</source> <translation>ヘッダーを同期中 (%1%)...</translation> </message> <message> <source>Reindexing blocks on disk...</source> <translation>ディスクのブロックの再インデックス中...</translation> </message> <message> <source>Send coins to a Dancoin address</source> <translation>ビットコインアドレスにコインを送信</translation> </message> <message> <source>Backup wallet to another location</source> <translation>他の場所に財布をバックアップ</translation> </message> <message> <source>Change the passphrase used for wallet encryption</source> <translation>財布の暗号化に使用するパスフレーズを変更</translation> </message> <message> <source>&amp;Debug window</source> <translation>&amp;デバッグ用ウィンドウ</translation> </message> <message> <source>Open debugging and diagnostic console</source> <translation>デバッグと診断のコンソールを開く</translation> </message> <message> <source>&amp;Verify message...</source> <translation>&amp;メッセージの確認...</translation> </message> <message> <source>Dancoin</source> <translation>ビットコイン</translation> </message> <message> <source>Wallet</source> <translation>財布</translation> </message> <message> <source>&amp;Send</source> <translation>&amp;送信</translation> </message> <message> <source>&amp;Receive</source> <translation>&amp;受信</translation> </message> <message> <source>&amp;Show / Hide</source> <translation>&amp;表示/ 非表示</translation> </message> <message> <source>Show or hide the main Window</source> <translation>メインウィンドウの表示もしくは非表示</translation> </message> <message> <source>Encrypt the private keys that belong to your wallet</source> <translation>財布に属するプライベートキーの暗号化</translation> </message> <message> <source>Sign messages with your Dancoin addresses to prove you own them</source> <translation>所有者であることを証明するためにビットコインアドレスのメッセージにサイン</translation> </message> <message> <source>Verify messages to ensure they were signed with specified Dancoin addresses</source> <translation>署名された特定のビットコインアドレスを確認するために、メッセージを確認</translation> </message> <message> <source>&amp;File</source> <translation>&amp;ファイル</translation> </message> <message> <source>&amp;Settings</source> <translation>&amp;設定</translation> </message> <message> <source>&amp;Help</source> <translation>&amp;ヘルプ</translation> </message> <message> <source>Tabs toolbar</source> <translation>ツールバータブ</translation> </message> <message> <source>Request payments (generates QR codes and dancoin: URIs)</source> <translation>支払の要求 (QRコードとビットコインのURIを作成)</translation> </message> <message> <source>Show the list of used sending addresses and labels</source> <translation>送信に使用したアドレスとラベルの一覧を表示</translation> </message> <message> <source>Show the list of used receiving addresses and labels</source> <translation>受信に使用したアドレスとラベルの一覧を表示</translation> </message> <message> <source>Open a dancoin: URI or payment request</source> <translation>ビットコインのURIまたは支払要求を開く</translation> </message> <message> <source>&amp;Command-line options</source> <translation>&amp;コマンドラインのオプション</translation> </message> <message> <source>Indexing blocks on disk...</source> <translation>ディスクのブロックのインデックス化中...</translation> </message> <message> <source>Processing blocks on disk...</source> <translation>ディスクのブロックの処理中...</translation> </message> <message> <source>%1 behind</source> <translation>%1残り</translation> </message> <message> <source>Last received block was generated %1 ago.</source> <translation>最後の受信ブロックは%1前に生成されました。</translation> </message> <message> <source>Transactions after this will not yet be visible.</source> <translation>トランザクション後も、これらは見えることはありません。</translation> </message> <message> <source>Error</source> <translation>エラー</translation> </message> <message> <source>Warning</source> <translation>警告</translation> </message> <message> <source>Information</source> <translation>情報</translation> </message> <message> <source>Up to date</source> <translation>更新日</translation> </message> <message> <source>Show the %1 help message to get a list with possible Dancoin command-line options</source> <translation>ビットコインのコマンドラインオプションとして可能なリストの取得の%1ヘルプメッセージを表示</translation> </message> <message> <source>%1 client</source> <translation>クライアント%1</translation> </message> <message> <source>Connecting to peers...</source> <translation>ピアに接続中...</translation> </message> <message> <source>Catching up...</source> <translation>追いつき中...</translation> </message> <message> <source>Date: %1 </source> <translation>日付: %1 </translation> </message> <message> <source>Amount: %1 </source> <translation>残高: %1 </translation> </message> <message> <source>Type: %1 </source> <translation>タイプ: %1 </translation> </message> <message> <source>Label: %1 </source> <translation>ラベル: %1 </translation> </message> <message> <source>Address: %1 </source> <translation>アドレス: %1 </translation> </message> <message> <source>Sent transaction</source> <translation>処理送信</translation> </message> <message> <source>Incoming transaction</source> <translation>処理受信</translation> </message> <message> <source>HD key generation is &lt;b&gt;enabled&lt;/b&gt;</source> <translation>HDキー世代は&lt;b&gt;有効化&lt;/b&gt;</translation> </message> <message> <source>HD key generation is &lt;b&gt;disabled&lt;/b&gt;</source> <translation>HDキー世代は&lt;b&gt;無効化&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>財布は&lt;b&gt;暗号化された&lt;/b&gt; そして現在 &lt;b&gt;ロック解除されています&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>財布は &lt;b&gt;暗号化され&lt;/b&gt; そして現在 &lt;b&gt;ロックされています&lt;/b&gt;</translation> </message> <message> <source>A fatal error occurred. Dancoin can no longer continue safely and will quit.</source> <translation>致命的なエラーが発生しました。ビットコインは安全に終了することができません。</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <source>Coin Selection</source> <translation>コイン選択</translation> </message> <message> <source>Quantity:</source> <translation>量:</translation> </message> <message> <source>Bytes:</source> <translation>バイト:</translation> </message> <message> <source>Amount:</source> <translation>残高:</translation> </message> <message> <source>Fee:</source> <translation>手数料:</translation> </message> <message> <source>Dust:</source> <translation>ごみ:</translation> </message> <message> <source>After Fee:</source> <translation>後の料金:</translation> </message> <message> <source>Change:</source> <translation>変更:</translation> </message> <message> <source>(un)select all</source> <translation>全て選択する(しない)</translation> </message> <message> <source>Tree mode</source> <translation>ツリー型</translation> </message> <message> <source>List mode</source> <translation>リストモード</translation> </message> <message> <source>Amount</source> <translation>残高</translation> </message> <message> <source>Received with label</source> <translation>受信されたラベル</translation> </message> <message> <source>Received with address</source> <translation>受信されたアドレス</translation> </message> <message> <source>Date</source> <translation>日付</translation> </message> <message> <source>Confirmations</source> <translation>確認</translation> </message> <message> <source>Confirmed</source> <translation>確認済み</translation> </message> <message> <source>Copy address</source> <translation>アドレスのコピー</translation> </message> <message> <source>Copy label</source> <translation>ラベルのコピー</translation> </message> <message> <source>Copy amount</source> <translation>残高のコピー</translation> </message> <message> <source>Copy transaction ID</source> <translation>トランザクションIDのコピー</translation> </message> <message> <source>Lock unspent</source> <translation>未消費のロック</translation> </message> <message> <source>Unlock unspent</source> <translation>未消費のロック解除</translation> </message> <message> <source>Copy quantity</source> <translation>量をコピー</translation> </message> <message> <source>Copy fee</source> <translation>料金をコピー</translation> </message> <message> <source>Copy after fee</source> <translation>後料金をこぴー</translation> </message> <message> <source>Copy bytes</source> <translation>バイトをコピー</translation> </message> <message> <source>Copy dust</source> <translation>ゴミをコピー</translation> </message> <message> <source>Copy change</source> <translation>変更をコピー</translation> </message> <message> <source>(%1 locked)</source> <translation>(%1 ロック済み)</translation> </message> <message> <source>yes</source> <translation>はい</translation> </message> <message> <source>no</source> <translation>いいえ</translation> </message> <message> <source>This label turns red if any recipient receives an amount smaller than the current dust threshold.</source> <translation>受信したレシピが現在のダスト閾値よりも残高が少ない場合、ラベルは赤になります。</translation> </message> <message> <source>Can vary +/- %1 satoshi(s) per input.</source> <translation>入力ごとにsatoshiの+/- %1に変更できます。</translation> </message> <message> <source>(no label)</source> <translation>(ラベルなし)</translation> </message> <message> <source>change from %1 (%2)</source> <translation>%1 (%2)から変更</translation> </message> <message> <source>(change)</source> <translation>(変更)</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <source>Edit Address</source> <translation>アドレス編集</translation> </message> <message> <source>&amp;Label</source> <translation>&amp;ラベル</translation> </message> <message> <source>The label associated with this address list entry</source> <translation>ラベルに結合されたアドレスのリストエントリー</translation> </message> <message> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation>アドレスに結合されたアドレスリストのエントリーです。これは送信アドレスのみによって変更されます。</translation> </message> <message> <source>&amp;Address</source> <translation>&amp;アドレス</translation> </message> <message> <source>New receiving address</source> <translation>新規の受信アドレス</translation> </message> <message> <source>New sending address</source> <translation>新規の送信アドレス</translation> </message> <message> <source>Edit receiving address</source> <translation>受信アドレスを編集</translation> </message> <message> <source>Edit sending address</source> <translation>送信アドレスの編集</translation> </message> <message> <source>The entered address "%1" is not a valid Dancoin address.</source> <translation>入寮されたアドレス "%1" は正当なビットコインアドレスではありません。</translation> </message> <message> <source>The entered address "%1" is already in the address book.</source> <translation>入浴されたアドレス "%1" はすでにアドレス帳にあります。</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>財布をロック解除できませんでした。</translation> </message> <message> <source>New key generation failed.</source> <translation>新規のキー生成に失敗しました。</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <source>A new data directory will be created.</source> <translation>新しい日付の辞書が作成されるでしょう。</translation> </message> <message> <source>name</source> <translation>名前</translation> </message> <message> <source>Directory already exists. Add %1 if you intend to create a new directory here.</source> <translation>辞書はすでに存在しています。新しい辞書を作成する場合は、%1を追加してください。</translation> </message> <message> <source>Path already exists, and is not a directory.</source> <translation>パスはすでに存在しており、辞書ではありません。</translation> </message> <message> <source>Cannot create data directory here.</source> <translation>ここに日付辞書を作成することはできません。</translation> </message> </context> <context> <name>HelpMessageDialog</name> <message> <source>version</source> <translation>バージョン</translation> </message> <message> <source>(%1-bit)</source> <translation>(%1-ビット)</translation> </message> <message> <source>About %1</source> <translation>%1について</translation> </message> <message> <source>Command-line options</source> <translation>コマンドラインオプション</translation> </message> <message> <source>Usage:</source> <translation>使用方法:</translation> </message> <message> <source>command-line options</source> <translation>コマンドラインオプション:</translation> </message> <message> <source>UI Options:</source> <translation>UIオプション:</translation> </message> <message> <source>Choose data directory on startup (default: %u)</source> <translation>起動の日付辞書の選択 (デフォルト: %u)</translation> </message> <message> <source>Set language, for example "de_DE" (default: system locale)</source> <translation>言語設定, 例 "de_DE" (デフォルト: システムロケール)</translation> </message> <message> <source>Start minimized</source> <translation>最小化起動</translation> </message> <message> <source>Set SSL root certificates for payment request (default: -system-)</source> <translation>支払要求のSSLルート証明の設定 (デフォルト: -システム-)</translation> </message> <message> <source>Show splash screen on startup (default: %u)</source> <translation>起動スクリーンを表示 (デフォルト: %u)</translation> </message> <message> <source>Reset all settings changed in the GUI</source> <translation>GUIのすべての設定をリセット</translation> </message> </context> <context> <name>Intro</name> <message> <source>Welcome</source> <translation>ようこそ</translation> </message> <message> <source>Welcome to %1.</source> <translation>ようこそ %1.</translation> </message> <message> <source>As this is the first time the program is launched, you can choose where %1 will store its data.</source> <translation>プログラムの初回起動時なので、データの保存場所 %1を選択してください。</translation> </message> <message> <source>Use the default data directory</source> <translation>デフォルトデータ辞書を使用</translation> </message> <message> <source>Use a custom data directory:</source> <translation>カスタムデータ辞書を使用</translation> </message> <message> <source>Error: Specified data directory "%1" cannot be created.</source> <translation>エラー: 特定の辞書%1の作成に失敗しました。</translation> </message> <message> <source>Error</source> <translation>エラー</translation> </message> </context> <context> <name>ModalOverlay</name> <message> <source>Form</source> <translation>フォーム</translation> </message> <message> <source>Recent transactions may not yet be visible, and therefore your wallet's balance might be incorrect. This information will be correct once your wallet has finished synchronizing with the dancoin network, as detailed below.</source> <translation>最近のトランザクションが表示できない可能性があります。従って、ウォレットの残高が正しくない可能性があります。この情報はビットコインネットワークに接続し、同期処理を完了させると正しくなります。詳細は以下になります。</translation> </message> <message> <source>Attempting to spend dancoins that are affected by not-yet-displayed transactions will not be accepted by the network.</source> <translation>使用したビットコインで、未処理となっているものは、ネットワークによって受領されません。</translation> </message> <message> <source>Number of blocks left</source> <translation>残りのブロック数</translation> </message> <message> <source>Unknown...</source> <translation>不明...</translation> </message> <message> <source>Last block time</source> <translation>最後のブロック時間</translation> </message> <message> <source>Progress</source> <translation>実行</translation> </message> <message> <source>Progress increase per hour</source> <translation>時間ごとの実行増加</translation> </message> <message> <source>calculating...</source> <translation>計算中...</translation> </message> <message> <source>Estimated time left until synced</source> <translation>同期化完了までの予測時間</translation> </message> <message> <source>Hide</source> <translation>隠す</translation> </message> <message> <source>Unknown. Syncing Headers (%1)...</source> <translation>不明. ヘッダーの同期中 (%1)...</translation> </message> </context> <context> <name>OpenURIDialog</name> <message> <source>Open URI</source> <translation>オープンURI</translation> </message> <message> <source>Open payment request from URI or file</source> <translation>URLもしくはファイルからの支払い要求を開く</translation> </message> <message> <source>URI:</source> <translation>URI:</translation> </message> <message> <source>Select payment request file</source> <translation>支払い要求ファイルの選択</translation> </message> <message> <source>Select payment request file to open</source> <translation>開く支払い要求ファイルを選択</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <source>Options</source> <translation>オプション</translation> </message> <message> <source>&amp;Main</source> <translation>&amp;メイン</translation> </message> <message> <source>Automatically start %1 after logging in to the system.</source> <translation>システムログイン時に%1を自動的に開始</translation> </message> <message> <source>&amp;Start %1 on system login</source> <translation>&amp;システムログインで%1を開始</translation> </message> <message> <source>Size of &amp;database cache</source> <translation>&amp;データベース喝取のサイズ</translation> </message> <message> <source>MB</source> <translation>メガバイト</translation> </message> <message> <source>Number of script &amp;verification threads</source> <translation>&amp;認証スレッドスクリプトの数</translation> </message> <message> <source>Accept connections from outside</source> <translation>外部からの接続承認</translation> </message> <message> <source>Allow incoming connections</source> <translation>内部接続の許可</translation> </message> <message> <source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source> <translation>IP プロキシのアドレス (例 IPv4: 127.0.0.1 / IPv6: ::1)</translation> </message> <message> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Exit in the menu.</source> <translation>ウィンドウが閉じられた時、アプリケーションを終了せずに最小化します。オプションボタンが有効な時は、アプリケーションはメニューの終了が選択されら時のみ、終了します。</translation> </message> <message> <source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source> <translation>サードパーティのURL(例 a block explorer)はコンテキストメニューアイテムの処理タブに表示されます。YRLの%sは処理ハッシュによって置き換えられます。複数URLは水平バー | によって区切られます。</translation> </message> <message> <source>Third party transaction URLs</source> <translation>サードパティ処理URL</translation> </message> <message> <source>Active command-line options that override above options:</source> <translation>オプション上書きのコマンドラインオプションの有効化</translation> </message> <message> <source>Reset all client options to default.</source> <translation>全てのクライアントオプションをデフォルトにリセット</translation> </message> <message> <source>&amp;Reset Options</source> <translation>&amp;リセットオプション</translation> </message> <message> <source>&amp;Network</source> <translation>&amp;ネットワーク</translation> </message> <message> <source>(0 = auto, &lt;0 = leave that many cores free)</source> <translation>(0 = 自動, &lt;0 = 使用しないコア数)</translation> </message> <message> <source>W&amp;allet</source> <translation>ウォレット</translation> </message> <message> <source>Expert</source> <translation>専門家</translation> </message> <message> <source>Enable coin &amp;control features</source> <translation>コインと制御機能の有効化</translation> </message> <message> <source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source> <translation>認証されていない変更の支出を無効化した場合、その変更を含む処理はすべての認証が完了するまで、使用することはできません。また、これは残高の計算にも影響します。</translation> </message> <message> <source>&amp;Spend unconfirmed change</source> <translation>&amp;認証されていない変更の支払い</translation> </message> <message> <source>Automatically open the Dancoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>自動絵的にルータのビットコインのクライアントポートが開きます。この機能はルータがUPnPをサポートし、有効であるときに動作します。</translation> </message> <message> <source>Map port using &amp;UPnP</source> <translation>&amp;UPnPを使用してポート配置します。</translation> </message> <message> <source>Connect to the Dancoin network through a SOCKS5 proxy.</source> <translation>SOCKS5プロキシを使用してビットコインネットワークへ接続</translation> </message> <message> <source>&amp;Connect through SOCKS5 proxy (default proxy):</source> <translation>&amp;SOCKS5プロキシを使用して接続 (デフォルト プロキシ):</translation> </message> <message> <source>Proxy &amp;IP:</source> <translation>プロキシ &amp;IP:</translation> </message> <message> <source>&amp;Port:</source> <translation>&amp;ポート:</translation> </message> <message> <source>Port of the proxy (e.g. 9050)</source> <translation>プロキシのポート (例 9050)</translation> </message> <message> <source>Used for reaching peers via:</source> <translation>ピア経由での使用:</translation> </message> <message> <source>IPv4</source> <translation>IPv4</translation> </message> <message> <source>IPv6</source> <translation>IPv6</translation> </message> <message> <source>Tor</source> <translation>Tor</translation> </message> <message> <source>Connect to the Dancoin network through a separate SOCKS5 proxy for Tor hidden services.</source> <translation>Tor秘匿ネットワークのための区切られたSOCKS5を経由して、ビットコインネットワークに接続</translation> </message> <message> <source>Use separate SOCKS5 proxy to reach peers via Tor hidden services:</source> <translation>Tor匿名サービスを経由して到達ピアへ、区切られたSOCKS5 プロキシを使用:</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;画面</translation> </message> <message> <source>&amp;Hide the icon from the system tray.</source> <translation>&amp;システムトレイのアイコンを非表示.</translation> </message> <message> <source>Hide tray icon</source> <translation>トレイアイコンを非表示</translation> </message> <message> <source>Show only a tray icon after minimizing the window.</source> <translation>画面最小化時にトレイアイコンを表示する。</translation> </message> <message> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;ツールバーの代わりにトレイへ最小化</translation> </message> <message> <source>M&amp;inimize on close</source> <translation>クローズで最小化</translation> </message> <message> <source>&amp;Display</source> <translation>&amp;画面</translation> </message> <message> <source>User Interface &amp;language:</source> <translation>ユーザインターフェース &amp;言語:</translation> </message> <message> <source>The user interface language can be set here. This setting will take effect after restarting %1.</source> <translation>ユーザインタフェースの言語はここで設定されます。設定は%1を再起動後、有効になります。</translation> </message> <message> <source>&amp;Unit to show amounts in:</source> <translation>&amp;残高表示の単位:</translation>
<message> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>コイン送付時のインターフェースに表示するデフォルトの除算単位の選択</translation> </message> <message> <source>Whether to show coin control features or not.</source> <translation>コイン制御機能を表示するかどうか。</translation> </message> <message> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <source>&amp;Cancel</source> <translation>&amp;キャンセル</translation> </message> <message> <source>default</source> <translation>デフォルト</translation> </message> <message> <source>none</source> <translation>なし</translation> </message> <message> <source>Confirm options reset</source> <translation>オプションリセットの確認</translation> </message> <message> <source>Client restart required to activate changes.</source> <translation>変更を有効にするため、クライアントの再起動が必要です。</translation> </message> <message> <source>Client will be shut down. Do you want to proceed?</source> <translation>クライアントは停止します。継続してもよろしいでしょうか?</translation> </message> <message> <source>This change would require a client restart.</source> <translation>変更は、クライアントの再起動が必要になる場合があります。</translation> </message> <message> <source>The supplied proxy address is invalid.</source> <translation>提供されたプロキシアドレスは無効です。</translation> </message> </context> <context> <name>OverviewPage</name> <message> <source>Form</source> <translation>フォーム</translation> </message> <message> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Dancoin network after a connection is established, but this process has not completed yet.</source> <translation>表示されている情報は、期限切れの可能性があります。ウォレットは自動的にビットコインネットワークへの接続が確立した後に同期化されます。しかし、処理はまだ、完了していません。</translation> </message> <message> <source>Watch-only:</source> <translation>読み取り専用:</translation> </message> <message> <source>Available:</source> <translation>有効:</translation> </message> <message> <source>Your current spendable balance</source> <translation>現在の支払い可能な残高</translation> </message> <message> <source>Pending:</source> <translation>未決定:</translation> </message> <message> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source> <translation>全てのトランザクションはまだ、確認されておらず、支払い可能な残高も数えられていません。</translation> </message> <message> <source>Immature:</source> <translation>未完成:</translation> </message> <message> <source>Mined balance that has not yet matured</source> <translation>まだ、完成していない、採掘された残高</translation> </message> <message> <source>Balances</source> <translation>残高</translation> </message> <message> <source>Total:</source> <translation>合計:</translation> </message> <message> <source>Your current total balance</source> <translation>現在の合計残高</translation> </message> <message> <source>Your current balance in watch-only addresses</source> <translation>読み取り専用アドレス内の現在の残高</translation> </message> <message> <source>Spendable:</source> <translation>支払い可能:</translation> </message> <message> <source>Recent transactions</source> <translation>最近の処理</translation> </message> <message> <source>Unconfirmed transactions to watch-only addresses</source> <translation>読み取り専用アドレスへの未確認の処理</translation> </message> <message> <source>Mined balance in watch-only addresses that has not yet matured</source> <translation>まだ、完成していない、読み取り専用アドレスの中の発掘された残高</translation> </message> <message> <source>Current total balance in watch-only addresses</source> <translation>読み取り専用アドレス内の現在の合計残高</translation> </message> </context> <context> <name>PaymentServer</name> <message> <source>Payment request error</source> <translation>支払い要求エラー</translation> </message> <message> <source>Cannot start dancoin: click-to-pay handler</source> <translation>ビットコインを開始できません: click-to-pay handler</translation> </message> <message> <source>URI handling</source> <translation>URI ハンドリング</translation> </message> <message> <source>Payment request fetch URL is invalid: %1</source> <translation>取得した支払要求は無効です: %1</translation> </message> <message> <source>Invalid payment address %1</source> <translation>無効な支払アドレス %1</translation> </message> <message> <source>URI cannot be parsed! This can be caused by an invalid Dancoin address or malformed URI parameters.</source> <translation>URLはパースできませんでした! 原因はビットコインアドレスが無効であるか、URIパラメータの形式が間違っている可能性があります。</translation> </message> <message> <source>Payment request file handling</source> <translation>支払要求ファイル操作</translation> </message> <message> <source>Payment request file cannot be read! This can be caused by an invalid payment request file.</source> <translation>支払要求ファイルが読み込めませんでした! 支払要求ファイルが無効である可能性があります。</translation> </message> <message> <source>Payment request rejected</source> <translation>支払要求却下</translation> </message> <message> <source>Payment request network doesn't match client network.</source> <translation>支払要求ネットワークはクライアントネットワークと一致しませんでした。</translation> </message> <message> <source>Payment request expired.</source> <translation>支払要求期限切れ</translation> </message> <message> <source>Payment request is not initialized.</source> <translation>支払要求は初期化されていません。</translation> </message> <message> <source>Unverified payment requests to custom payment scripts are unsupported.</source> <translation>カスタム支払スクリプトへの未検証の支払要求は、サポートされていません。</translation> </message> <message> <source>Invalid payment request.</source> <translation>無効な支払要求:</translation> </message> <message> <source>Requested payment amount of %1 is too small (considered dust).</source> <translation>要求支払額%1は小さすぎます (ダストを検討してください).</translation> </message> <message> <source>Refund from %1</source> <translation>%1からの払い戻し</translation> </message> <message> <source>Payment request %1 is too large (%2 bytes, allowed %3 bytes).</source> <translation>支払要求 %1 は大きすぎます (%2 バイト, %3 バイトまで許可).</translation> </message> <message> <source>Error communicating with %1: %2</source> <translation>通信エラー:%1 :%2</translation> </message> <message> <source>Payment request cannot be parsed!</source> <translation>支払要求は解析できませんでした!</translation> </message> <message> <source>Bad response from server %1</source> <translation>サーバ%1からの不正なレスポンス</translation> </message> <message> <source>Network request error</source> <translation>ネットワーク要求エラー</translation> </message> <message> <source>Payment acknowledged</source> <translation>認証済み支払</translation> </message> </context> <context> <name>PeerTableModel</name> <message> <source>User Agent</source> <translation>ユーザエージェント</translation> </message> <message> <source>Node/Service</source> <translation>ノード/サービス</translation> </message> <message> <source>NodeId</source> <translation>ノードID</translation> </message> <message> <source>Ping</source> <translation>Ping</translation> </message> </context> <context> <name>QObject</name> <message> <source>Amount</source> <translation>残高</translation> </message> <message> <source>Enter a Dancoin address (e.g. %1)</source> <translation>ビットコインアドレスの入力 (例 %1)</translation> </message> <message> <source>%1 d</source> <translation>%1 d</translation> </message> <message> <source>%1 h</source> <translation>%1 h</translation> </message> <message> <source>%1 m</source> <translation>%1 m</translation> </message> <message> <source>%1 s</source> <translation>%1 s</translation> </message> <message> <source>None</source> <translation>なし</translation> </message> <message> <source>N/A</source> <translation>該当なし</translation> </message> <message> <source>%1 ms</source> <translation>%1 ms</translation> </message> <message> <source>%1 and %2</source> <translation>%1 と %2</translation> </message> <message> <source>%1 didn't yet exit safely...</source> <translation>%1 は安全に終了できませんでした...</translation> </message> </context> <context> <name>QObject::QObject</name> <message> <source>Error: Specified data directory "%1" does not exist.</source> <translation>エラー: 特定の日付辞書 "%1" は存在しません。</translation> </message> <message> <source>Error: Cannot parse configuration file: %1. Only use key=value syntax.</source> <translation>エラー: 構成ファイルが解析できませんでした: %1. キー=値の構文のみが使用できます。</translation> </message> <message> <source>Error: %1</source> <translation>エラー: %1</translation> </message> </context> <context> <name>QRImageWidget</name> <message> <source>&amp;Save Image...</source> <translation>&amp;イメージ保存...</translation> </message> <message> <source>&amp;Copy Image</source> <translation>&amp;イメージコピー</translation> </message> <message> <source>Save QR Code</source> <translation>QRコード保存</translation> </message> <message> <source>PNG Image (*.png)</source> <translation>PNGイメージ(*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <source>N/A</source> <translation>該当なし</translation> </message> <message> <source>Client version</source> <translation>クライアントバージョン</translation> </message> <message> <source>&amp;Information</source> <translation>&amp;情報</translation> </message> <message> <source>Debug window</source> <translation>デバッグ画面</translation> </message> <message> <source>General</source> <translation>一般</translation> </message> <message> <source>Using BerkeleyDB version</source> <translation>BerkeleyDBバージョンの使用</translation> </message> <message> <source>Datadir</source> <translation>データディレクトリ</translation> </message> <message> <source>Startup time</source> <translation>開始時間</translation> </message> <message> <source>Network</source> <translation>ネットワーク</translation> </message> <message> <source>Name</source> <translation>名前</translation> </message> <message> <source>Number of connections</source> <translation>接続数</translation> </message> <message> <source>Block chain</source> <translation>ブロックチェーン</translation> </message> <message> <source>Current number of blocks</source> <translation>現在のブロック数</translation> </message> <message> <source>Memory Pool</source> <translation>メモリプール</translation> </message> <message> <source>Current number of transactions</source> <translation>現在の処理数</translation> </message> <message> <source>Memory usage</source> <translation>メモリ使用量</translation> </message> <message> <source>Received</source> <translation>受信済み</translation> </message> <message> <source>Sent</source> <translation>送信済み</translation> </message> <message> <source>&amp;Peers</source> <translation>&amp;ピア</translation> </message> <message> <source>Banned peers</source> <translation>禁止ピア</translation> </message> <message> <source>Select a peer to view detailed information.</source> <translation>詳細情報を表示するピアの選択</translation> </message> <message> <source>Whitelisted</source> <translation>ホワイトリスト済み</translation> </message> <message> <source>Direction</source> <translation>方向</translation> </message> <message> <source>Version</source> <translation>バージョン</translation> </message> <message> <source>Starting Block</source> <translation>ブロック開始中</translation> </message> <message> <source>Synced Headers</source> <translation>同期化されたヘッダー</translation> </message> <message> <source>Synced Blocks</source> <translation>同期化されたブロック</translation> </message> <message> <source>User Agent</source> <translation>ユーザエージェント</translation> </message> <message> <source>Open the %1 debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>現在のデータ辞書から%1デバッグログファイルを開きます。ログファイルが大きいため、数秒かかります。</translation> </message> <message> <source>Decrease font size</source> <translation>フォントサイズの縮小</translation> </message> <message> <source>Increase font size</source> <translation>フォントサイズの拡大</translation> </message> <message> <source>Services</source> <translation>サービス</translation> </message> <message> <source>Ban Score</source> <translation>禁止スコア</translation> </message> <message> <source>Connection Time</source> <translation>接続時間</translation> </message> <message> <source>Last Send</source> <translation>最後の送信</translation> </message> <message> <source>Last Receive</source> <translation>最終受信</translation> </message> <message> <source>Ping Time</source> <translation>Ping時間</translation> </message> <message> <source>The duration of a currently outstanding ping.</source> <translation>現在の異常なpingの間隔</translation> </message> <message> <source>Ping Wait</source> <translation>ping待ち</translation> </message> <message> <source>Min Ping</source> <translation>ping最小時間</translation> </message> <message> <source>Time Offset</source> <translation>時間オフセット</translation> </message> <message> <source>Last block time</source> <translation>最後のブロック時間</translation> </message> <message> <source>&amp;Open</source> <translation>&amp;開く</translation> </message> <message> <source>&amp;Console</source> <translation>&amp;コンソール</translation> </message> <message> <source>&amp;Network Traffic</source> <translation>&amp;ネットワークトラヒック</translation> </message> <message> <source>Totals</source> <translation>合計</translation> </message> <message> <source>In:</source> <translation>入力:</translation> </message> <message> <source>Out:</source> <translation>出力:</translation> </message> <message> <source>Debug log file</source> <translation>デバッグログファイル</translation> </message> <message> <source>Clear console</source> <translation>コンソールのクリア</translation> </message> <message> <source>1 &amp;hour</source> <translation>1 &amp;時</translation> </message> <message> <source>1 &amp;day</source> <translation>1 &amp;日</translation> </message> <message> <source>1 &amp;week</source> <translation>1 &amp;週</translation> </message> <message> <source>1 &amp;year</source> <translation>1 &amp;年</translation> </message> <message> <source>&amp;Disconnect</source> <translation>&amp;切断</translation> </message> <message> <source>Ban for</source> <translation>禁止の</translation> </message> <message> <source>&amp;Unban</source> <translation>&amp;禁止しない</translation> </message> <message> <source>Welcome to the %1 RPC console.</source> <translation>%1 RPCコンソールへようこそ</translation> </message> <message> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>&lt;b&gt;help&lt;/b&gt; と入力すると有効なコマンドが表示されます。</translation> </message> <message> <source>Network activity disabled</source> <translation>無効化されたネットワーク利用</translation> </message> <message> <source>%1 B</source> <translation>%1 バイト</translation> </message> <message> <source>%1 KB</source> <translation>%1 キロバイト</translation> </message> <message> <source>%1 MB</source> <translation>%1 メガバイト</translation> </message> <message> <source>%1 GB</source> <translation>%1 Gギガバイト</translation> </message> <message> <source>(node id: %1)</source> <translation>(ノードid: %1)</translation> </message> <message> <source>via %1</source> <translation> %1経由</translation> </message> <message> <source>never</source> <translation>いままでない</translation> </message> <message> <source>Inbound</source> <translation>流入</translation> </message> <message> <source>Outbound</source> <translation>流出</translation> </message> <message> <source>Yes</source> <translation>はい</translation> </message> <message> <source>No</source> <translation>いいえ</translation> </message> <message> <source>Unknown</source> <translation>未知</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <source>&amp;Amount:</source> <translation>&amp;残高:</translation> </message> <message> <source>&amp;Label:</source> <translation>&amp;ラベル:</translation> </message> <message> <source>&amp;Message:</source> <translation>&amp;メッセージ:</translation> </message> <message> <source>Reuse one of the previously used receiving addresses. Reusing addresses has security and privacy issues. Do not use this unless re-generating a payment request made before.</source> <translation>前回使用された受信アドレスを再利用します。再度アドレスを使用することは、セキュリティとプライバシーの問題になります。再度支払要求を事前に再生成しない限り、これを使用しないようにしてください。</translation> </message> <message> <source>R&amp;euse an existing receiving address (not recommended)</source> <translation>&amp;再度存在している受信アドレスを死闘する(非推奨)</translation> </message> <message> <source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Dancoin network.</source> <translation>支払要求に添付されたオプションメッセージは、要求を開いたときに表示されます。メモ: メッセージは、ビットコインネットワーク上で支払と一緒に送信されません。</translation> </message> <message> <source>An optional label to associate with the new receiving address.</source> <translation>新規受信アドレスと関連するオプションラベル</translation> </message> <message> <source>Use this form to request payments. All fields are &lt;b&gt;optional&lt;/b&gt;.</source> <translation>支払要求のための形式を使用してください。すべてのフィールドは&lt;b&gt;おプション&lt;/b&gt; です。</translation> </message> <message> <source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source> <translation>要求のためのオプション残高。特定の金額の要求をしないときは空か0を入力します。</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>入力形式の全てのフィールドをクリア</translation> </message> <message> <source>Clear</source> <translation>クリア</translation> </message> <message> <source>Requested payments history</source> <translation>支払要求履歴</translation> </message> <message> <source>&amp;Request payment</source> <translation>&amp;支払要求</translation> </message> <message> <source>Show the selected request (does the same as double clicking an entry)</source> <translation>選択された要求の表示。(空をダブルクリックしたときと同じ)</translation> </message> <message> <source>Show</source> <translation>表示</translation> </message> <message> <source>Remove the selected entries from the list</source> <translation>リストから選択されたエントリの削除</translation> </message> <message> <source>Remove</source> <translation>削除</translation> </message> <message> <source>Copy URI</source> <translation>コピーURI</translation> </message> <message> <source>Copy label</source> <translation>ラベルのコピー</translation> </message> <message> <source>Copy message</source> <translation>メッセージコピー</translation> </message> <message> <source>Copy amount</source> <translation>残高のコピー</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <source>QR Code</source> <translation>QRコード</translation> </message> <message> <source>Copy &amp;URI</source> <translation>&amp;URIコピー</translation> </message> <message> <source>Copy &amp;Address</source> <translation>&amp;アドレスコピー</translation> </message> <message> <source>&amp;Save Image...</source> <translation>&amp;イメージ保存...</translation> </message> <message> <source>Request payment to %1</source> <translation>%1のための支払要求</translation> </message> <message> <source>Payment information</source> <translation>支払情報</translation> </message> <message> <source>URI</source> <translation>URI</translation> </message> <message> <source>Address</source> <translation>アドレス</translation> </message> <message> <source>Amount</source> <translation>残高</translation> </message> <message> <source>Label</source> <translation>ラベル</translation> </message> <message> <source>Message</source> <translation>メッセージ</translation> </message> <message> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>URIが長すぎるので、ラベルまたはメッセージを短くしてください。</translation> </message> <message> <source>Error encoding URI into QR Code.</source> <translation>QRコードのURIのコード化エラー</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <source>Date</source> <translation>日付</translation> </message> <message> <source>Label</source> <translation>ラベル</translation> </message> <message> <source>Message</source> <translation>メッセージ</translation> </message> <message> <source>(no label)</source> <translation>(ラベルなし)</translation> </message> <message> <source>(no message)</source> <translation>(メッセージなし)</translation> </message> <message> <source>(no amount requested)</source> <translation>(料金要求なし)</translation> </message> <message> <source>Requested</source> <translation>要求済み</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <source>Send Coins</source> <translation>コイン送付</translation> </message> <message> <source>Coin Control Features</source> <translation>コイン制御機能</translation> </message> <message> <source>Inputs...</source> <translation>入力...</translation> </message> <message> <source>automatically selected</source> <translation>自動的に選択される</translation> </message> <message> <source>Insufficient funds!</source> <translation>残高不十分!</translation> </message> <message> <source>Quantity:</source> <translation>量:</translation> </message> <message> <source>Bytes:</source> <translation>バイト:</translation> </message> <message> <source>Amount:</source> <translation>残高:</translation> </message> <message> <source>Fee:</source> <translation>手数料:</translation> </message> <message> <source>After Fee:</source> <translation>後の料金:</translation> </message> <message> <source>Change:</source> <translation>変更:</translation> </message> <message> <source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source> <translation>これが有効化されていて変更アドレスが空または無効でない時、変更は新しく生成されたアドレスへ送信されます。</translation> </message> <message> <source>Custom change address</source> <translation>カスタム変更アドレス</translation> </message> <message> <source>Transaction Fee:</source> <translation>処理費用:</translation> </message> <message> <source>Choose...</source> <translation>選択...</translation> </message> <message> <source>collapse fee-settings</source> <translation>料金設定を折りたたむ</translation> </message> <message> <source>per kilobyte</source> <translation>キロバイトごと</translation> </message> <message> <source>If the custom fee is set to 1000 satoshis and the transaction is only 250 bytes, then "per kilobyte" only pays 250 satoshis in fee, while "total at least" pays 1000 satoshis. For transactions bigger than a kilobyte both pay by kilobyte.</source> <translation>カスタム料金が1000 satoshiで処理が250バイトのみの場合、料金はキロバイトあたりは250 satoshiのみとなり、最新の合計は1000 satoshiまで支払います。処理が1キロバイトよりも大きい場合、キロバイトごとに支払いが行われます。</translation> </message> <message> <source>Hide</source> <translation>隠す</translation> </message> <message> <source>Paying only the minimum fee is just fine as long as there is less transaction volume than space in the blocks. But be aware that this can end up in a never confirming transaction once there is more demand for dancoin transactions than the network can process.</source> <translation>最小の料金のみ支払いは、ブロック内の処理容量よりも小さい場合に限り、問題ありません。しかし、ネットワークの処理できる量よりも、ビットコイン処理のほうが需要があり、最終的にその確認処理が実行されないことに注意する必要があります。</translation> </message> <message> <source>(read the tooltip)</source> <translation>(ツールチップを読む)</translation> </message> <message> <source>Recommended:</source> <translation>推奨:</translation> </message> <message> <source>Custom:</source> <translation>カスタム:</translation> </message> <message> <source>(Smart fee not initialized yet. This usually takes a few blocks...)</source> <translation>(スマート料金は初期化されていません。通常であれば数ブロックかかります...)</translation> </message> <message> <source>Send to multiple recipients at once</source> <translation>一度に複数の受信者に送信します</translation> </message> <message> <source>Add &amp;Recipient</source> <translation>追加&amp;受信者</translation> </message> <message> <source>Clear all fields of the form.</source> <translation>入力形式の全てのフィールドをクリア</translation> </message> <message> <source>Dust:</source> <translation>ごみ:</translation> </message> <message> <source>Confirmation time target:</source> <translation>目標時間の確認:</translation> </message> <message> <source>Clear &amp;All</source> <translation>クリア&amp;全て</translation> </message> <message> <source>Balance:</source> <translation>残高:</translation> </message> <message> <source>Confirm the send action</source> <translation>送信実行の確認</translation> </message> <message> <source>S&amp;end</source> <translation>&amp;送信</translation> </message> <message> <source>Copy quantity</source> <translation>量をコピー</translation> </message> <message> <source>Copy amount</source> <translation>残高のコピー</translation> </message> <message> <source>Copy fee</source> <translation>料金をコピー</translation> </message> <message> <source>Copy after fee</source> <translation>後料金をこぴー</translation> </message> <message> <source>Copy bytes</source> <translation>バイトをコピー</translation> </message> <message> <source>Copy dust</source> <translation>ゴミをコピー</translation> </message> <message> <source>Copy change</source> <translation>変更をコピー</translation> </message> <message> <source>%1 to %2</source> <translation>%2から%1</translation> </message> <message> <source>Are you sure you want to send?</source> <translation>送信してもよろしいですか?</translation> </message> <message> <source>added as transaction fee</source> <translation>処理料金として追加済み</translation> </message> <message> <source>Total Amount %1</source> <translation>合計残高: %1</translation> </message> <message> <source>or</source> <translation>もしくは</translation> </message> <message> <source>Confirm send coins</source> <translation>送信コイン確認</translation> </message> <message> <source>The recipient address is not valid. Please recheck.</source> <translation>受信者アドレスは無効です。再度確認してください。</translation> </message> <message> <source>The amount to pay must be larger than 0.</source> <translation>支払料金は0より大きい必要があります。</translation> </message> <message> <source>The amount exceeds your balance.</source> <translation>料金が残高を超えています。</translation> </message> <message> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>%1の処理料金を含めると、合計が残高を超えます。</translation> </message> <message> <source>Duplicate address found: addresses should only be used once each.</source> <translation>重複アドレスの発見: アドレスはそれぞれ一つずつ使用される必要があります。</translation> </message> <message> <source>Transaction creation failed!</source> <translation>処理の作成に失敗しました!</translation> </message> <message> <source>The transaction was rejected with the following reason: %1</source> <translation>処理は次の理由により却下されました: %1</translation> </message> <message> <source>A fee higher than %1 is considered an absurdly high fee.</source> <translation>料金が%1より高い場合、非常識に高い料金として考えられます。</translation> </message> <message> <source>Payment request expired.</source> <translation>支払要求期限切れ</translation> </message> <message> <source>Pay only the required fee of %1</source> <translation>%1の必須料金のみ支払う</translation> </message> <message> <source>Warning: Invalid Dancoin address</source> <translation>注意: 無効なビットコインアドレス</translation> </message> <message> <source>Warning: Unknown change address</source> <translation>注意: 未知の変更アドレス</translation> </message> <message> <source>Confirm custom change address</source> <translation>カスタム変更アドレスの確認</translation> </message> <message> <source>The address you selected for change is not part of this wallet. Any or all funds in your wallet may be sent to this address. Are you sure?</source> <translation>変更のために選択したアドレスは、ウォレットの一部ではありません。ウォレット内の一部またはすべて料金はこのアドレスで送付される必要があります。実行してもよろしいでしょうか?</translation> </message> <message> <source>(no label)</source> <translation>(ラベルなし)</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <source>A&amp;mount:</source> <translation>&amp;残高:</translation> </message> <message> <source>Pay &amp;To:</source> <translation>支払&amp;宛先:</translation> </message> <message> <source>&amp;Label:</source> <translation>&amp;ラベル:</translation> </message> <message> <source>Choose previously used address</source> <translation>前回使用したアドレスの選択</translation> </message> <message> <source>This is a normal payment.</source> <translation>これは通常の支払です。</translation> </message> <message> <source>The Dancoin address to send the payment to</source> <translation>支払に送信するビットコインアドレス</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>クリップボードからアドレスを貼り付ける</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Remove this entry</source> <translation>入力の削除</translation> </message> <message> <source>The fee will be deducted from the amount being sent. The recipient will receive less dancoins than you enter in the amount field. If multiple recipients are selected, the fee is split equally.</source> <translation>料金は送信料金から控除されます。受信者は入力された料金よりも低い金額を受け取ります。複数の受信者を選択した場合、この料金は同額に分けられます。</translation> </message> <message> <source>S&amp;ubtract fee from amount</source> <translation>&amp;残高から料金を差し引く</translation> </message> <message> <source>Message:</source> <translation>メッセージ:</translation> </message> <message> <source>This is an unauthenticated payment request.</source> <translation>これは未証明の支払要求です。</translation> </message> <message> <source>This is an authenticated payment request.</source> <translation>これは証明済みの支払要求です。</translation> </message> <message> <source>Enter a label for this address to add it to the list of used addresses</source> <translation>使用されたアドレス一覧のために、このアドレスのラベルを入力します。</translation> </message> <message> <source>A message that was attached to the dancoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Dancoin network.</source> <translation>参考資料に保存された処理のビットコインURIは、メッセージに添付されます。メモ: このメッセージはビットコイン上で送信されません。</translation> </message> <message> <source>Pay To:</source> <translation>支払先:</translation> </message> <message> <source>Memo:</source> <translation>メモ:</translation> </message> <message> <source>Enter a label for this address to add it to your address book</source> <translation>アドレス帳にアドレスを追加すためのラベルの入力</translation> </message> </context> <context> <name>SendConfirmationDialog</name> <message> <source>Yes</source> <translation>はい</translation> </message> </context> <context> <name>ShutdownWindow</name> <message> <source>%1 is shutting down...</source> <translation>%1は停止中です...</translation> </message> <message> <source>Do not shut down the computer until this window disappears.</source> <translation>この画面が消えるまで、コンピュータを停止しないでください。</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <source>Signatures - Sign / Verify a Message</source> <translation>署名 - サイン / メッセージの確認</translation> </message> <message> <source>&amp;Sign Message</source> <translation>&amp;署名メッセージ</translation> </message> <message> <source>You can sign messages/agreements with your addresses to prove you can receive dancoins sent to them. Be careful not to sign anything vague or random, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>送信されたビットコインを受信できることを証明するために、アドレスのメッセージや同意事項に署名できます。フィッシング攻撃が署名を利用する可能性があるので、署名が曖昧やランダムでないことに注意してください。詳細な明細のみに署名されます。</translation> </message> <message> <source>The Dancoin address to sign the message with</source> <translation>メッセージ署名するためのビットコインアドレス</translation> </message> <message> <source>Choose previously used address</source> <translation>前回使用したアドレスの選択</translation> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>クリップボードからアドレスを貼り付ける</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Enter the message you want to sign here</source> <translation>ここに署名をするメッセージを入力</translation> </message> <message> <source>Signature</source> <translation>署名</translation> </message> <message> <source>Copy the current signature to the system clipboard</source> <translation>システムのクリップボードへ現在の署名をコピー</translation> </message> <message> <source>Sign the message to prove you own this Dancoin address</source> <translation>ビットコインアドレスを証明するために、メッセージに署名</translation> </message> <message> <source>Sign &amp;Message</source> <translation>署名&amp;メッセージ</translation> </message> <message> <source>Reset all sign message fields</source> <translation>全ての署名フィールドをリセット</translation> </message> <message> <source>Clear &amp;All</source> <translation>クリア&amp;全て</translation> </message> <message> <source>&amp;Verify Message</source> <translation>&amp;メッセージ確認</translation> </message> <message> <source>Enter the receiver's address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack. Note that this only proves the signing party receives with the address, it cannot prove sendership of any transaction!</source> <translation>以下のメッセージを確認して、受信者のアドレス、メッセージ(タブやすスペースなどを正確にこぴーしてください)、署名を入力してください。中間の攻撃者からのアタックを避けるために、署名されていないメッセージを読まないように注意してください。署名はアドレスと受信を証明することはできますが、送信者の処理を証明できないことに注意してください。</translation> </message> <message> <source>The Dancoin address the message was signed with</source> <translation>メッセージのビットコインアドレスはサインされています</translation> </message> <message> <source>Verify the message to ensure it was signed with the specified Dancoin address</source> <translation>特定のビットコインアドレスと署名されているメッセージであることを確認してください。</translation> </message> <message> <source>Verify &amp;Message</source> <translation>確認&amp;メッセージ</translation> </message> <message> <source>Reset all verify message fields</source> <translation>全ての確認フィールドをリセット</translation> </message> <message> <source>Click "Sign Message" to generate signature</source> <translation>署名を生成するため、"署名メッセージ"をクリック</translation> </message> <message> <source>The entered address is invalid.</source> <translation>入力されたアドレスは無効です。</translation> </message> <message> <source>Please check the address and try again.</source> <translation>アドレスを確認して、再実行してください。</translation> </message> <message> <source>The entered address does not refer to a key.</source> <translation>入力されたアドレスは鍵と関連がありません。</translation> </message> <message> <source>Wallet unlock was cancelled.</source> <translation>ウォレットのロック解除は中断されました。</translation> </message> <message> <source>Private key for the entered address is not available.</source> <translation>入力されたプライベートキーは無効です。</translation> </message> <message> <source>Message signing failed.</source> <translation>メッセージの署名は失敗しました。</translation> </message> <message> <source>Message signed.</source> <translation>メッセージに署名しました。</translation> </message> <message> <source>The signature could not be decoded.</source> <translation>署名は復号化できませんでした。</translation> </message> <message> <source>Please check the signature and try again.</source> <translation>署名を確認して、再実行してください。</translation> </message> <message> <source>The signature did not match the message digest.</source> <translation>署名は、メッセージダイジェストと一致しませんでした。</translation> </message> <message> <source>Message verification failed.</source> <translation>メッセージ確認は失敗しました。</translation> </message> <message> <source>Message verified.</source> <translation>メッセージは確認されました。</translation> </message> </context> <context> <name>SplashScreen</name> <message> <source>[testnet]</source> <translation>[testnet]</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <source>KB/s</source> <translation>キロバイト/秒</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <source>Open until %1</source> <translation>%1まで開く</translation> </message> <message> <source>conflicted with a transaction with %1 confirmations</source> <translation>処理と%1確認が矛盾しています。</translation> </message> <message> <source>%1/offline</source> <translation>%1/オフライン</translation> </message> <message> <source>0/unconfirmed, %1</source> <translation>0/未確認, %1</translation> </message> <message> <source>in memory pool</source> <translation>メモリプール中</translation> </message> <message> <source>not in memory pool</source> <translation>メモリプール外</translation> </message> <message> <source>abandoned</source> <translation>放棄</translation> </message> <message> <source>%1/unconfirmed</source> <translation>%1/未確認</translation> </message> <message> <source>%1 confirmations</source> <translation>%1 確認</translation> </message> <message> <source>Status</source> <translation>状態</translation> </message> <message> <source>, has not been successfully broadcast yet</source> <translation>まだ、散布に成功していません</translation> </message> <message> <source>Date</source> <translation>日付</translation> </message> <message> <source>Source</source> <translation>ソース</translation> </message> <message> <source>Generated</source> <translation>生成済み</translation> </message> <message> <source>From</source> <translation>送信元</translation> </message> <message> <source>unknown</source> <translation>不明</translation> </message> <message> <source>To</source> <translation>送信先</translation> </message> <message> <source>own address</source> <translation>自アドレス</translation> </message> <message> <source>watch-only</source> <translation>読み取り専用</translation> </message> <message> <source>label</source> <translation>ラベル</translation> </message> <message> <source>Credit</source> <translation>信用</translation> </message> <message> <source>not accepted</source> <translation>承認拒否</translation> </message> <message> <source>Debit</source> <translation>デビット</translation> </message> <message> <source>Total debit</source> <translation>デビットの合計</translation> </message> <message> <source>Total credit</source> <translation>合計クレジット</translation> </message> <message> <source>Transaction fee</source> <translation>処理料金</translation> </message> <message> <source>Net amount</source> <translation>ネット残高</translation> </message> <message> <source>Message</source> <translation>メッセージ</translation> </message> <message> <source>Comment</source> <translation>コメント</translation> </message> <message> <source>Transaction ID</source> <translation>処理ID</translation> </message> <message> <source>Transaction total size</source> <translation>処理合計サイズ</translation> </message> <message> <source>Output index</source> <translation>出力インデックス</translation> </message> <message> <source>Merchant</source> <translation>商人</translation> </message> <message> <source>Amount</source> <translation>残高</translation> </message> </context> <context> <name>TransactionDescDialog</name> </context> <context> <name>TransactionTableModel</name> <message> <source>Date</source> <translation>日付</translation> </message> <message> <source>Label</source> <translation>ラベル</translation> </message> <message> <source>Open until %1</source> <translation>%1まで開く</translation> </message> <message> <source>watch-only</source> <translation>読み取り専用</translation> </message> <message> <source>(no label)</source> <translation>(ラベルなし)</translation> </message> </context> <context> <name>TransactionView</name> <message> <source>Copy address</source> <translation>アドレスのコピー</translation> </message> <message> <source>Copy label</source> <translation>ラベルのコピー</translation> </message> <message> <source>Copy amount</source> <translation>残高のコピー</translation> </message> <message> <source>Copy transaction ID</source> <translation>トランザクションIDのコピー</translation> </message> <message> <source>Comma separated file (*.csv)</source> <translation>カンマ区切りのファイル(*.csv)</translation> </message> <message> <source>Confirmed</source> <translation>確認済み</translation> </message> <message> <source>Date</source> <translation>日付</translation> </message> <message> <source>Label</source> <translation>ラベル</translation> </message> <message> <source>Address</source> <translation>アドレス</translation> </message> <message> <source>Exporting Failed</source> <translation>出力の失敗</translation> </message> </context> <context> <name>UnitDisplayStatusBarControl</name> </context> <context> <name>WalletFrame</name> </context> <context> <name>WalletModel</name> <message> <source>Send Coins</source> <translation>コイン送付</translation> </message> </context> <context> <name>WalletView</name> <message> <source>&amp;Export</source> <translation>&amp;出力</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation>ファイルの現在のタブを出力します</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <source>Information</source> <translation>情報</translation> </message> <message> <source>Warning</source> <translation>警告</translation> </message> <message> <source>Error</source> <translation>エラー</translation> </message> </context> </TS>
</message>
bins_share.js
import React, { Component } from 'react'; class
extends Component { onShareClick() { const email = this.refs.email.value; Meteor.call('bins.share', this.props.bin, email); } renderShareList() { return this.props.bin.sharedWith.map(email => { return ( <button key={email} className="btn btn-default"> {email} </button> ); }); } render() { return ( <footer className="bins-share"> <div className="input-group"> <input ref="email" className="form-control" /> <div className="input-group-btn"> <button onClick={this.onShareClick.bind(this)} className="btn btn-default" > Share Bin </button> </div> </div> <div>Shared With:</div> <div className="btn-group">{this.renderShareList()}</div> </footer> ); } } export default BinsShare;
BinsShare
INT.go
// // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // package values import ( "encoding/xml" "math" "strconv" ) type PlcINT struct { value int16 PlcSimpleNumericValueAdapter } func NewPlcINT(value int16) PlcINT { return PlcINT{ value: value, } } func (m PlcINT) GetBoolean() bool { if m.value == 0 { return false } return true } func (m PlcINT) IsUint8() bool { return m.value >= 0 && m.value <= math.MaxUint8 } func (m PlcINT) GetUint8() uint8 { if m.IsUint8() { return uint8(m.GetInt16()) } return 0 } func (m PlcINT) IsUint16() bool { return m.value >= 0 } func (m PlcINT) GetUint16() uint16 { if m.IsUint16() {
return 0 } func (m PlcINT) IsUint32() bool { return m.value >= 0 } func (m PlcINT) GetUint32() uint32 { if m.IsUint32() { return uint32(m.GetInt16()) } return 0 } func (m PlcINT) IsUint64() bool { return m.value >= 0 } func (m PlcINT) GetUint64() uint64 { if m.IsUint64() { return uint64(m.GetInt16()) } return 0 } func (m PlcINT) IsInt8() bool { return m.value >= math.MinInt8 && m.value <= math.MaxInt8 } func (m PlcINT) GetInt8() int8 { if m.IsInt8() { return int8(m.GetInt16()) } return 0 } func (m PlcINT) GetInt16() int16 { return m.value } func (m PlcINT) GetInt32() int32 { return int32(m.GetInt16()) } func (m PlcINT) GetInt64() int64 { return int64(m.GetInt16()) } func (m PlcINT) GetFloat32() float32 { //TODO: Check if this is ok return float32(m.GetInt16()) } func (m PlcINT) GetFloat64() float64 { //TODO: Check if this is ok return float64(m.GetInt16()) } func (m PlcINT) GetString() string { return strconv.Itoa(int(m.GetInt64())) } func (m PlcINT) MarshalXML(e *xml.Encoder, start xml.StartElement) error { if err := e.EncodeElement(m.value, xml.StartElement{Name: xml.Name{Local: "PlcINT"}}); err != nil { return err } return nil }
return uint16(m.GetInt16()) }
error-reporting.js
!function(e){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=e();else if("function"==typeof define&&define.amd)define([],e);else{var t;t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,t.StackTrace=e()}}(function(){var e;return function t(e,n,r){function o(a,s){if(!n[a]){if(!e[a]){var u="function"==typeof require&&require;if(!s&&u)return u(a,!0);if(i)return i(a,!0);var c=new Error("Cannot find module '"+a+"'");throw c.code="MODULE_NOT_FOUND",c}var l=n[a]={exports:{}};e[a][0].call(l.exports,function(t){var n=e[a][1][t];return o(n?n:t)},l,l.exports,t,e,n,r)}return n[a].exports}for(var i="function"==typeof require&&require,a=0;a<r.length;a++)o(r[a]);return o}({1:[function(t,n,r){!function(o,i){"use strict";"function"==typeof e&&e.amd?e("error-stack-parser",["stackframe"],i):"object"==typeof r?n.exports=i(t("stackframe")):o.ErrorStackParser=i(o.StackFrame)}(this,function(e){"use strict";var t=/(^|@)\S+\:\d+/,n=/^\s*at .*(\S+\:\d+|\(native\))/m,r=/^(eval@)?(\[native code\])?$/;return{parse:function(e){if("undefined"!=typeof e.stacktrace||"undefined"!=typeof e["opera#sourceloc"])return this.parseOpera(e);if(e.stack&&e.stack.match(n))return this.parseV8OrIE(e);if(e.stack)return this.parseFFOrSafari(e);throw new Error("Cannot parse given Error object")},extractLocation:function(e){if(e.indexOf(":")===-1)return[e];var t=/(.+?)(?:\:(\d+))?(?:\:(\d+))?$/,n=t.exec(e.replace(/[\(\)]/g,""));return[n[1],n[2]||void 0,n[3]||void 0]},parseV8OrIE:function(t){var r=t.stack.split("\n").filter(function(e){return!!e.match(n)},this);return r.map(function(t){t.indexOf("(eval ")>-1&&(t=t.replace(/eval code/g,"eval").replace(/(\(eval at [^\()]*)|(\)\,.*$)/g,""));var n=t.replace(/^\s+/,"").replace(/\(eval code/g,"(").split(/\s+/).slice(1),r=this.extractLocation(n.pop()),o=n.join(" ")||void 0,i=["eval","<anonymous>"].indexOf(r[0])>-1?void 0:r[0];return new e({functionName:o,fileName:i,lineNumber:r[1],columnNumber:r[2],source:t})},this)},parseFFOrSafari:function(t){var n=t.stack.split("\n").filter(function(e){return!e.match(r)},this);return n.map(function(t){if(t.indexOf(" > eval")>-1&&(t=t.replace(/ line (\d+)(?: > eval line \d+)* > eval\:\d+\:\d+/g,":$1")),t.indexOf("@")===-1&&t.indexOf(":")===-1)return new e({functionName:t});var n=t.split("@"),r=this.extractLocation(n.pop()),o=n.join("@")||void 0;return new e({functionName:o,fileName:r[0],lineNumber:r[1],columnNumber:r[2],source:t})},this)},parseOpera:function(e){return!e.stacktrace||e.message.indexOf("\n")>-1&&e.message.split("\n").length>e.stacktrace.split("\n").length?this.parseOpera9(e):e.stack?this.parseOpera11(e):this.parseOpera10(e)},parseOpera9:function(t){for(var n=/Line (\d+).*script (?:in )?(\S+)/i,r=t.message.split("\n"),o=[],i=2,a=r.length;i<a;i+=2){var s=n.exec(r[i]);s&&o.push(new e({fileName:s[2],lineNumber:s[1],source:r[i]}))}return o},parseOpera10:function(t){for(var n=/Line (\d+).*script (?:in )?(\S+)(?:: In function (\S+))?$/i,r=t.stacktrace.split("\n"),o=[],i=0,a=r.length;i<a;i+=2){var s=n.exec(r[i]);s&&o.push(new e({functionName:s[3]||void 0,fileName:s[2],lineNumber:s[1],source:r[i]}))}return o},parseOpera11:function(n){var r=n.stack.split("\n").filter(function(e){return!!e.match(t)&&!e.match(/^Error created at/)},this);return r.map(function(t){var n,r=t.split("@"),o=this.extractLocation(r.pop()),i=r.shift()||"",a=i.replace(/<anonymous function(: (\w+))?>/,"$2").replace(/\([^\)]*\)/g,"")||void 0;i.match(/\(([^\)]*)\)/)&&(n=i.replace(/^[^\(]+\(([^\)]*)\)$/,"$1"));var s=void 0===n||"[arguments not available]"===n?void 0:n.split(",");return new e({functionName:a,args:s,fileName:o[0],lineNumber:o[1],columnNumber:o[2],source:t})},this)}}})},{stackframe:13}],2:[function(t,n,r){(function(o,i){!function(t,o){"object"==typeof r&&"undefined"!=typeof n?n.exports=o():"function"==typeof e&&e.amd?e(o):t.ES6Promise=o()}(this,function(){"use strict";function e(e){return"function"==typeof e||"object"==typeof e&&null!==e}function n(e){return"function"==typeof e}function r(e){W=e}function a(e){K=e}function s(){return function(){return o.nextTick(p)}}function u(){return function(){Y(p)}}function c(){var e=0,t=new Q(p),n=document.createTextNode("");return t.observe(n,{characterData:!0}),function(){n.data=e=++e%2}}function l(){var e=new MessageChannel;return e.port1.onmessage=p,function(){return e.port2.postMessage(0)}}function f(){var e=setTimeout;return function(){return e(p,1)}}function p(){for(var e=0;e<H;e+=2){var t=ne[e],n=ne[e+1];t(n),ne[e]=void 0,ne[e+1]=void 0}H=0}function h(){try{var e=t,n=e("vertx");return Y=n.runOnLoop||n.runOnContext,u()}catch(r){return f()}}function g(e,t){var n=arguments,r=this,o=new this.constructor(m);void 0===o[oe]&&R(o);var i=r._state;return i?!function(){var e=n[i-1];K(function(){return M(i,o,e,r._result)})}():T(r,o,e,t),o}function d(e){var t=this;if(e&&"object"==typeof e&&e.constructor===t)return e;var n=new t(m);return O(n,e),n}function m(){}function v(){return new TypeError("You cannot resolve a promise with itself")}function y(){return new TypeError("A promises callback cannot return that same promise.")}function _(e){try{return e.then}catch(t){return ue.error=t,ue}}function w(e,t,n,r){try{e.call(t,n,r)}catch(o){return o}}function b(e,t,n){K(function(e){var r=!1,o=w(n,t,function(n){r||(r=!0,t!==n?O(e,n):E(e,n))},function(t){r||(r=!0,N(e,t))},"Settle: "+(e._label||" unknown promise"));!r&&o&&(r=!0,N(e,o))},e)}function C(e,t){t._state===ae?E(e,t._result):t._state===se?N(e,t._result):T(t,void 0,function(t){return O(e,t)},function(t){return N(e,t)})}function A(e,t,r){t.constructor===e.constructor&&r===g&&t.constructor.resolve===d?C(e,t):r===ue?N(e,ue.error):void 0===r?E(e,t):n(r)?b(e,t,r):E(e,t)}function O(t,n){t===n?N(t,v()):e(n)?A(t,n,_(n)):E(t,n)}function S(e){e._onerror&&e._onerror(e._result),j(e)}function E(e,t){e._state===ie&&(e._result=t,e._state=ae,0!==e._subscribers.length&&K(j,e))}function N(e,t){e._state===ie&&(e._state=se,e._result=t,K(S,e))}function T(e,t,n,r){var o=e._subscribers,i=o.length;e._onerror=null,o[i]=t,o[i+ae]=n,o[i+se]=r,0===i&&e._state&&K(j,e)}function j(e){var t=e._subscribers,n=e._state;if(0!==t.length){for(var r=void 0,o=void 0,i=e._result,a=0;a<t.length;a+=3)r=t[a],o=t[a+n],r?M(n,r,o,i):o(i);e._subscribers.length=0}}function L(){this.error=null}function k(e,t){try{return e(t)}catch(n){return ce.error=n,ce}}function M(e,t,r,o){var i=n(r),a=void 0,s=void 0,u=void 0,c=void 0;if(i){if(a=k(r,o),a===ce?(c=!0,s=a.error,a=null):u=!0,t===a)return void N(t,y())}else a=o,u=!0;t._state!==ie||(i&&u?O(t,a):c?N(t,s):e===ae?E(t,a):e===se&&N(t,a))}function x(e,t){try{t(function(t){O(e,t)},function(t){N(e,t)})}catch(n){N(e,n)}}function P(){return le++}function R(e){e[oe]=le++,e._state=void 0,e._result=void 0,e._subscribers=[]}function U(e,t){this._instanceConstructor=e,this.promise=new e(m),this.promise[oe]||R(this.promise),Z(t)?(this._input=t,this.length=t.length,this._remaining=t.length,this._result=new Array(this.length),0===this.length?E(this.promise,this._result):(this.length=this.length||0,this._enumerate(),0===this._remaining&&E(this.promise,this._result))):N(this.promise,F())}function F(){return new Error("Array Methods must be provided an Array")}function $(e){return new U(this,e).promise}function G(e){var t=this;return new t(Z(e)?function(n,r){for(var o=e.length,i=0;i<o;i++)t.resolve(e[i]).then(n,r)}:function(e,t){return t(new TypeError("You must pass an array to race."))})}function D(e){var t=this,n=new t(m);return N(n,e),n}function B(){throw new TypeError("You must pass a resolver function as the first argument to the promise constructor")}function I(){throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.")}function J(e){this[oe]=P(),this._result=this._state=void 0,this._subscribers=[],m!==e&&("function"!=typeof e&&B(),this instanceof J?x(this,e):I())}function q(){var e=void 0;if("undefined"!=typeof i)e=i;else if("undefined"!=typeof self)e=self;else try{e=Function("return this")()}catch(t){throw new Error("polyfill failed because global object is unavailable in this environment")}var n=e.Promise;if(n){var r=null;try{r=Object.prototype.toString.call(n.resolve())}catch(t){}if("[object Promise]"===r&&!n.cast)return}e.Promise=J}var z=void 0;z=Array.isArray?Array.isArray:function(e){return"[object Array]"===Object.prototype.toString.call(e)};var Z=z,H=0,Y=void 0,W=void 0,K=function(e,t){ne[H]=e,ne[H+1]=t,H+=2,2===H&&(W?W(p):re())},V="undefined"!=typeof window?window:void 0,X=V||{},Q=X.MutationObserver||X.WebKitMutationObserver,ee="undefined"==typeof self&&"undefined"!=typeof o&&"[object process]"==={}.toString.call(o),te="undefined"!=typeof Uint8ClampedArray&&"undefined"!=typeof importScripts&&"undefined"!=typeof MessageChannel,ne=new Array(1e3),re=void 0;re=ee?s():Q?c():te?l():void 0===V&&"function"==typeof t?h():f();var oe=Math.random().toString(36).substring(16),ie=void 0,ae=1,se=2,ue=new L,ce=new L,le=0;return U.prototype._enumerate=function(){for(var e=this.length,t=this._input,n=0;this._state===ie&&n<e;n++)this._eachEntry(t[n],n)},U.prototype._eachEntry=function(e,t){var n=this._instanceConstructor,r=n.resolve;if(r===d){var o=_(e);if(o===g&&e._state!==ie)this._settledAt(e._state,t,e._result);else if("function"!=typeof o)this._remaining--,this._result[t]=e;else if(n===J){var i=new n(m);A(i,e,o),this._willSettleAt(i,t)}else this._willSettleAt(new n(function(t){return t(e)}),t)}else this._willSettleAt(r(e),t)},U.prototype._settledAt=function(e,t,n){var r=this.promise;r._state===ie&&(this._remaining--,e===se?N(r,n):this._result[t]=n),0===this._remaining&&E(r,this._result)},U.prototype._willSettleAt=function(e,t){var n=this;T(e,void 0,function(e){return n._settledAt(ae,t,e)},function(e){return n._settledAt(se,t,e)})},J.all=$,J.race=G,J.resolve=d,J.reject=D,J._setScheduler=r,J._setAsap=a,J._asap=K,J.prototype={constructor:J,then:g,"catch":function(e){return this.then(null,e)}},q(),J.polyfill=q,J.Promise=J,J})}).call(this,t("_process"),"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{_process:4}],3:[function(t,n,r){(function(t){(function(){function o(e,t){function n(e){if(n[e]!==m)return n[e];var o;if("bug-string-char-index"==e)o="a"!="a"[0];else if("json"==e)o=n("json-stringify")&&n("json-parse");else{var a,s='{"a":[1,true,false,null,"\\u0000\\b\\n\\f\\r\\t"]}';if("json-stringify"==e){var u=t.stringify,l="function"==typeof u&&_;if(l){(a=function(){return 1}).toJSON=a;try{l="0"===u(0)&&"0"===u(new r)&&'""'==u(new i)&&u(y)===m&&u(m)===m&&u()===m&&"1"===u(a)&&"[1]"==u([a])&&"[null]"==u([m])&&"null"==u(null)&&"[null,null,null]"==u([m,y,null])&&u({a:[a,!0,!1,null,"\0\b\n\f\r\t"]})==s&&"1"===u(null,a)&&"[\n 1,\n 2\n]"==u([1,2],null,1)&&'"-271821-04-20T00:00:00.000Z"'==u(new c((-864e13)))&&'"+275760-09-13T00:00:00.000Z"'==u(new c(864e13))&&'"-000001-01-01T00:00:00.000Z"'==u(new c((-621987552e5)))&&'"1969-12-31T23:59:59.999Z"'==u(new c((-1)))}catch(f){l=!1}}o=l}if("json-parse"==e){var p=t.parse;if("function"==typeof p)try{if(0===p("0")&&!p(!1)){a=p(s);var h=5==a.a.length&&1===a.a[0];if(h){try{h=!p('"\t"')}catch(f){}if(h)try{h=1!==p("01")}catch(f){}if(h)try{h=1!==p("1.")}catch(f){}}}}catch(f){h=!1}o=h}}return n[e]=!!o}e||(e=u.Object()),t||(t=u.Object());var r=e.Number||u.Number,i=e.String||u.String,s=e.Object||u.Object,c=e.Date||u.Date,l=e.SyntaxError||u.SyntaxError,f=e.TypeError||u.TypeError,p=e.Math||u.Math,h=e.JSON||u.JSON;"object"==typeof h&&h&&(t.stringify=h.stringify,t.parse=h.parse);var g,d,m,v=s.prototype,y=v.toString,_=new c((-0xc782b5b800cec));try{_=_.getUTCFullYear()==-109252&&0===_.getUTCMonth()&&1===_.getUTCDate()&&10==_.getUTCHours()&&37==_.getUTCMinutes()&&6==_.getUTCSeconds()&&708==_.getUTCMilliseconds()}catch(w){}if(!n("json")){var b="[object Function]",C="[object Date]",A="[object Number]",O="[object String]",S="[object Array]",E="[object Boolean]",N=n("bug-string-char-index");if(!_)var T=p.floor,j=[0,31,59,90,120,151,181,212,243,273,304,334],L=function(e,t){return j[t]+365*(e-1970)+T((e-1969+(t=+(t>1)))/4)-T((e-1901+t)/100)+T((e-1601+t)/400)};if((g=v.hasOwnProperty)||(g=function(e){var t,n={};return(n.__proto__=null,n.__proto__={toString:1},n).toString!=y?g=function(e){var t=this.__proto__,n=e in(this.__proto__=null,this);return this.__proto__=t,n}:(t=n.constructor,g=function(e){var n=(this.constructor||t).prototype;return e in this&&!(e in n&&this[e]===n[e])}),n=null,g.call(this,e)}),d=function(e,t){var n,r,o,i=0;(n=function(){this.valueOf=0}).prototype.valueOf=0,r=new n;for(o in r)g.call(r,o)&&i++;return n=r=null,i?d=2==i?function(e,t){var n,r={},o=y.call(e)==b;for(n in e)o&&"prototype"==n||g.call(r,n)||!(r[n]=1)||!g.call(e,n)||t(n)}:function(e,t){var n,r,o=y.call(e)==b;for(n in e)o&&"prototype"==n||!g.call(e,n)||(r="constructor"===n)||t(n);(r||g.call(e,n="constructor"))&&t(n)}:(r=["valueOf","toString","toLocaleString","propertyIsEnumerable","isPrototypeOf","hasOwnProperty","constructor"],d=function(e,t){var n,o,i=y.call(e)==b,s=!i&&"function"!=typeof e.constructor&&a[typeof e.hasOwnProperty]&&e.hasOwnProperty||g;for(n in e)i&&"prototype"==n||!s.call(e,n)||t(n);for(o=r.length;n=r[--o];s.call(e,n)&&t(n));}),d(e,t)},!n("json-stringify")){var k={92:"\\\\",34:'\\"',8:"\\b",12:"\\f",10:"\\n",13:"\\r",9:"\\t"},M="000000",x=function(e,t){return(M+(t||0)).slice(-e)},P="\\u00",R=function(e){for(var t='"',n=0,r=e.length,o=!N||r>10,i=o&&(N?e.split(""):e);n<r;n++){var a=e.charCodeAt(n);switch(a){case 8:case 9:case 10:case 12:case 13:case 34:case 92:t+=k[a];break;default:if(a<32){t+=P+x(2,a.toString(16));break}t+=o?i[n]:e.charAt(n)}}return t+'"'},U=function(e,t,n,r,o,i,a){var s,u,c,l,p,h,v,_,w,b,N,j,k,M,P,F;try{s=t[e]}catch($){}if("object"==typeof s&&s)if(u=y.call(s),u!=C||g.call(s,"toJSON"))"function"==typeof s.toJSON&&(u!=A&&u!=O&&u!=S||g.call(s,"toJSON"))&&(s=s.toJSON(e));else if(s>-1/0&&s<1/0){if(L){for(p=T(s/864e5),c=T(p/365.2425)+1970-1;L(c+1,0)<=p;c++);for(l=T((p-L(c,0))/30.42);L(c,l+1)<=p;l++);p=1+p-L(c,l),h=(s%864e5+864e5)%864e5,v=T(h/36e5)%24,_=T(h/6e4)%60,w=T(h/1e3)%60,b=h%1e3}else c=s.getUTCFullYear(),l=s.getUTCMonth(),p=s.getUTCDate(),v=s.getUTCHours(),_=s.getUTCMinutes(),w=s.getUTCSeconds(),b=s.getUTCMilliseconds();s=(c<=0||c>=1e4?(c<0?"-":"+")+x(6,c<0?-c:c):x(4,c))+"-"+x(2,l+1)+"-"+x(2,p)+"T"+x(2,v)+":"+x(2,_)+":"+x(2,w)+"."+x(3,b)+"Z"}else s=null;if(n&&(s=n.call(t,e,s)),null===s)return"null";if(u=y.call(s),u==E)return""+s;if(u==A)return s>-1/0&&s<1/0?""+s:"null";if(u==O)return R(""+s);if("object"==typeof s){for(M=a.length;M--;)if(a[M]===s)throw f();if(a.push(s),N=[],P=i,i+=o,u==S){for(k=0,M=s.length;k<M;k++)j=U(k,s,n,r,o,i,a),N.push(j===m?"null":j);F=N.length?o?"[\n"+i+N.join(",\n"+i)+"\n"+P+"]":"["+N.join(",")+"]":"[]"}else d(r||s,function(e){var t=U(e,s,n,r,o,i,a);t!==m&&N.push(R(e)+":"+(o?" ":"")+t)}),F=N.length?o?"{\n"+i+N.join(",\n"+i)+"\n"+P+"}":"{"+N.join(",")+"}":"{}";return a.pop(),F}};t.stringify=function(e,t,n){var r,o,i,s;if(a[typeof t]&&t)if((s=y.call(t))==b)o=t;else if(s==S){i={};for(var u,c=0,l=t.length;c<l;u=t[c++],s=y.call(u),(s==O||s==A)&&(i[u]=1));}if(n)if((s=y.call(n))==A){if((n-=n%1)>0)for(r="",n>10&&(n=10);r.length<n;r+=" ");}else s==O&&(r=n.length<=10?n:n.slice(0,10));return U("",(u={},u[""]=e,u),o,i,r,"",[])}}if(!n("json-parse")){var F,$,G=i.fromCharCode,D={92:"\\",34:'"',47:"/",98:"\b",116:"\t",110:"\n",102:"\f",114:"\r"},B=function(){throw F=$=null,l()},I=function(){for(var e,t,n,r,o,i=$,a=i.length;F<a;)switch(o=i.charCodeAt(F)){case 9:case 10:case 13:case 32:F++;break;case 123:case 125:case 91:case 93:case 58:case 44:return e=N?i.charAt(F):i[F],F++,e;case 34:for(e="@",F++;F<a;)if(o=i.charCodeAt(F),o<32)B();else if(92==o)switch(o=i.charCodeAt(++F)){case 92:case 34:case 47:case 98:case 116:case 110:case 102:case 114:e+=D[o],F++;break;case 117:for(t=++F,n=F+4;F<n;F++)o=i.charCodeAt(F),o>=48&&o<=57||o>=97&&o<=102||o>=65&&o<=70||B();e+=G("0x"+i.slice(t,F));break;default:B()}else{if(34==o)break;for(o=i.charCodeAt(F),t=F;o>=32&&92!=o&&34!=o;)o=i.charCodeAt(++F);e+=i.slice(t,F)}if(34==i.charCodeAt(F))return F++,e;B();default:if(t=F,45==o&&(r=!0,o=i.charCodeAt(++F)),o>=48&&o<=57){for(48==o&&(o=i.charCodeAt(F+1),o>=48&&o<=57)&&B(),r=!1;F<a&&(o=i.charCodeAt(F),o>=48&&o<=57);F++);if(46==i.charCodeAt(F)){for(n=++F;n<a&&(o=i.charCodeAt(n),o>=48&&o<=57);n++);n==F&&B(),F=n}if(o=i.charCodeAt(F),101==o||69==o){for(o=i.charCodeAt(++F),43!=o&&45!=o||F++,n=F;n<a&&(o=i.charCodeAt(n),o>=48&&o<=57);n++);n==F&&B(),F=n}return+i.slice(t,F)}if(r&&B(),"true"==i.slice(F,F+4))return F+=4,!0;if("false"==i.slice(F,F+5))return F+=5,!1;if("null"==i.slice(F,F+4))return F+=4,null;B()}return"$"},J=function(e){var t,n;if("$"==e&&B(),"string"==typeof e){if("@"==(N?e.charAt(0):e[0]))return e.slice(1);if("["==e){for(t=[];e=I(),"]"!=e;n||(n=!0))n&&(","==e?(e=I(),"]"==e&&B()):B()),","==e&&B(),t.push(J(e));return t}if("{"==e){for(t={};e=I(),"}"!=e;n||(n=!0))n&&(","==e?(e=I(),"}"==e&&B()):B()),","!=e&&"string"==typeof e&&"@"==(N?e.charAt(0):e[0])&&":"==I()||B(),t[e.slice(1)]=J(I());return t}B()}return e},q=function(e,t,n){var r=z(e,t,n);r===m?delete e[t]:e[t]=r},z=function(e,t,n){var r,o=e[t];if("object"==typeof o&&o)if(y.call(o)==S)for(r=o.length;r--;)q(o,r,n);else d(o,function(e){q(o,e,n)});return n.call(e,t,o)};t.parse=function(e,t){var n,r;return F=0,$=""+e,n=J(I()),"$"!=I()&&B(),F=$=null,t&&y.call(t)==b?z((r={},r[""]=n,r),"",t):n}}}return t.runInContext=o,t}var i="function"==typeof e&&e.amd,a={"function":!0,object:!0},s=a[typeof r]&&r&&!r.nodeType&&r,u=a[typeof window]&&window||this,c=s&&a[typeof n]&&n&&!n.nodeType&&"object"==typeof t&&t;if(!c||c.global!==c&&c.window!==c&&c.self!==c||(u=c),s&&!i)o(u,s);else{var l=u.JSON,f=u.JSON3,p=!1,h=o(u,u.JSON3={noConflict:function(){return p||(p=!0,u.JSON=l,u.JSON3=f,l=f=null),h}});u.JSON={parse:h.parse,stringify:h.stringify}}i&&e(function(){return h})}).call(this)}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],4:[function(e,t,n){function r(){throw new Error("setTimeout has not been defined")}function o(){throw new Error("clearTimeout has not been defined")}function i(e){if(f===setTimeout)return setTimeout(e,0);if((f===r||!f)&&setTimeout)return f=setTimeout,setTimeout(e,0);try{return f(e,0)}catch(t){try{return f.call(null,e,0)}catch(t){return f.call(this,e,0)}}}function a(e){if(p===clearTimeout)return clearTimeout(e);if((p===o||!p)&&clearTimeout)return p=clearTimeout,clearTimeout(e);try{return p(e)}catch(t){try{return p.call(null,e)}catch(t){return p.call(this,e)}}}function s(){m&&g&&(m=!1,g.length?d=g.concat(d):v=-1,d.length&&u())}function u(){if(!m){var e=i(s);m=!0;for(var t=d.length;t;){for(g=d,d=[];++v<t;)g&&g[v].run();v=-1,t=d.length}g=null,m=!1,a(e)}}function c(e,t){this.fun=e,this.array=t}function l(){}var f,p,h=t.exports={};!function(){try{f="function"==typeof setTimeout?setTimeout:r}catch(e){f=r}try{p="function"==typeof clearTimeout?clearTimeout:o}catch(e){p=o}}();var g,d=[],m=!1,v=-1;h.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var n=1;n<arguments.length;n++)t[n-1]=arguments[n];d.push(new c(e,t)),1!==d.length||m||i(u)},c.prototype.run=function(){this.fun.apply(null,this.array)},h.title="browser",h.browser=!0,h.env={},h.argv=[],h.version="",h.versions={},h.on=l,h.addListener=l,h.once=l,h.off=l,h.removeListener=l,h.removeAllListeners=l,h.emit=l,h.binding=function(e){throw new Error("process.binding is not supported")},h.cwd=function(){return"/"},h.chdir=function(e){throw new Error("process.chdir is not supported")},h.umask=function(){return 0}},{}],5:[function(e,t,n){function r(){this._array=[],this._set=Object.create(null)}var o=e("./util"),i=Object.prototype.hasOwnProperty;r.fromArray=function(e,t){for(var n=new r,o=0,i=e.length;o<i;o++)n.add(e[o],t);return n},r.prototype.size=function(){return Object.getOwnPropertyNames(this._set).length},r.prototype.add=function(e,t){var n=o.toSetString(e),r=i.call(this._set,n),a=this._array.length;r&&!t||this._array.push(e),r||(this._set[n]=a)},r.prototype.has=function(e){var t=o.toSetString(e);return i.call(this._set,t)},r.prototype.indexOf=function(e){var t=o.toSetString(e);if(i.call(this._set,t))return this._set[t];throw new Error('"'+e+'" is not in the set.')},r.prototype.at=function(e){if(e>=0&&e<this._array.length)return this._array[e];throw new Error("No element indexed by "+e)},r.prototype.toArray=function(){return this._array.slice()},n.ArraySet=r},{"./util":11}],6:[function(e,t,n){function r(e){return e<0?(-e<<1)+1:(e<<1)+0}function o(e){var t=1===(1&e),n=e>>1;return t?-n:n}var i=e("./base64"),a=5,s=1<<a,u=s-1,c=s;n.encode=function(e){var t,n="",o=r(e);do t=o&u,o>>>=a,o>0&&(t|=c),n+=i.encode(t);while(o>0);return n},n.decode=function(e,t,n){var r,s,l=e.length,f=0,p=0;do{if(t>=l)throw new Error("Expected more digits in base 64 VLQ value.");if(s=i.decode(e.charCodeAt(t++)),s===-1)throw new Error("Invalid base64 digit: "+e.charAt(t-1));r=!!(s&c),s&=u,f+=s<<p,p+=a}while(r);n.value=o(f),n.rest=t}},{"./base64":7}],7:[function(e,t,n){var r="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".split("");n.encode=function(e){if(0<=e&&e<r.length)return r[e];throw new TypeError("Must be between 0 and 63: "+e)},n.decode=function(e){var t=65,n=90,r=97,o=122,i=48,a=57,s=43,u=47,c=26,l=52;return t<=e&&e<=n?e-t:r<=e&&e<=o?e-r+c:i<=e&&e<=a?e-i+l:e==s?62:e==u?63:-1}},{}],8:[function(e,t,n){function r(e,t,o,i,a,s){var u=Math.floor((t-e)/2)+e,c=a(o,i[u],!0);return 0===c?u:c>0?t-u>1?r(u,t,o,i,a,s):s==n.LEAST_UPPER_BOUND?t<i.length?t:-1:u:u-e>1?r(e,u,o,i,a,s):s==n.LEAST_UPPER_BOUND?u:e<0?-1:e}n.GREATEST_LOWER_BOUND=1,n.LEAST_UPPER_BOUND=2,n.search=function(e,t,o,i){if(0===t.length)return-1;var a=r(-1,t.length,e,t,o,i||n.GREATEST_LOWER_BOUND);if(a<0)return-1;for(;a-1>=0&&0===o(t[a],t[a-1],!0);)--a;return a}},{}],9:[function(e,t,n){function r(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function o(e,t){return Math.round(e+Math.random()*(t-e))}function i(e,t,n,a){if(n<a){var s=o(n,a),u=n-1;r(e,s,a);for(var c=e[a],l=n;l<a;l++)t(e[l],c)<=0&&(u+=1,r(e,u,l));r(e,u+1,l);var f=u+1;i(e,t,n,f-1),i(e,t,f+1,a)}}n.quickSort=function(e,t){i(e,t,0,e.length-1)}},{}],10:[function(e,t,n){function r(e){var t=e;return"string"==typeof e&&(t=JSON.parse(e.replace(/^\)\]\}'/,""))),null!=t.sections?new a(t):new o(t)}function o(e){var t=e;"string"==typeof e&&(t=JSON.parse(e.replace(/^\)\]\}'/,"")));var n=s.getArg(t,"version"),r=s.getArg(t,"sources"),o=s.getArg(t,"names",[]),i=s.getArg(t,"sourceRoot",null),a=s.getArg(t,"sourcesContent",null),u=s.getArg(t,"mappings"),l=s.getArg(t,"file",null);if(n!=this._version)throw new Error("Unsupported version: "+n);r=r.map(String).map(s.normalize).map(function(e){return i&&s.isAbsolute(i)&&s.isAbsolute(e)?s.relative(i,e):e}),this._names=c.fromArray(o.map(String),!0),this._sources=c.fromArray(r,!0),this.sourceRoot=i,this.sourcesContent=a,this._mappings=u,this.file=l}function i(){this.generatedLine=0,this.generatedColumn=0,this.source=null,this.originalLine=null,this.originalColumn=null,this.name=null}function a(e){var t=e;"string"==typeof e&&(t=JSON.parse(e.replace(/^\)\]\}'/,"")));var n=s.getArg(t,"version"),o=s.getArg(t,"sections");if(n!=this._version)throw new Error("Unsupported version: "+n);this._sources=new c,this._names=new c;var i={line:-1,column:0};this._sections=o.map(function(e){if(e.url)throw new Error("Support for url field in sections not implemented.");var t=s.getArg(e,"offset"),n=s.getArg(t,"line"),o=s.getArg(t,"column");if(n<i.line||n===i.line&&o<i.column)throw new Error("Section offsets must be ordered and non-overlapping.");return i=t,{generatedOffset:{generatedLine:n+1,generatedColumn:o+1},consumer:new r(s.getArg(e,"map"))}})}var s=e("./util"),u=e("./binary-search"),c=e("./array-set").ArraySet,l=e("./base64-vlq"),f=e("./quick-sort").quickSort;r.fromSourceMap=function(e){return o.fromSourceMap(e)},r.prototype._version=3,r.prototype.__generatedMappings=null,Object.defineProperty(r.prototype,"_generatedMappings",{get:function(){return this.__generatedMappings||this._parseMappings(this._mappings,this.sourceRoot),this.__generatedMappings}}),r.prototype.__originalMappings=null,Object.defineProperty(r.prototype,"_originalMappings",{get:function(){return this.__originalMappings||this._parseMappings(this._mappings,this.sourceRoot),this.__originalMappings}}),r.prototype._charIsMappingSeparator=function(e,t){var n=e.charAt(t);return";"===n||","===n},r.prototype._parseMappings=function(e,t){throw new Error("Subclasses must implement _parseMappings")},r.GENERATED_ORDER=1,r.ORIGINAL_ORDER=2,r.GREATEST_LOWER_BOUND=1,r.LEAST_UPPER_BOUND=2,r.prototype.eachMapping=function(e,t,n){var o,i=t||null,a=n||r.GENERATED_ORDER;switch(a){case r.GENERATED_ORDER:o=this._generatedMappings;break;case r.ORIGINAL_ORDER:o=this._originalMappings;break;default:throw new Error("Unknown order of iteration.")}var u=this.sourceRoot;o.map(function(e){var t=null===e.source?null:this._sources.at(e.source);return null!=t&&null!=u&&(t=s.join(u,t)),{source:t,generatedLine:e.generatedLine,generatedColumn:e.generatedColumn,originalLine:e.originalLine,originalColumn:e.originalColumn,name:null===e.name?null:this._names.at(e.name)}},this).forEach(e,i)},r.prototype.allGeneratedPositionsFor=function(e){var t=s.getArg(e,"line"),n={source:s.getArg(e,"source"),originalLine:t,originalColumn:s.getArg(e,"column",0)};if(null!=this.sourceRoot&&(n.source=s.relative(this.sourceRoot,n.source)),!this._sources.has(n.source))return[];n.source=this._sources.indexOf(n.source);var r=[],o=this._findMapping(n,this._originalMappings,"originalLine","originalColumn",s.compareByOriginalPositions,u.LEAST_UPPER_BOUND);if(o>=0){var i=this._originalMappings[o];if(void 0===e.column)for(var a=i.originalLine;i&&i.originalLine===a;)r.push({line:s.getArg(i,"generatedLine",null),column:s.getArg(i,"generatedColumn",null),lastColumn:s.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o];else for(var c=i.originalColumn;i&&i.originalLine===t&&i.originalColumn==c;)r.push({line:s.getArg(i,"generatedLine",null),column:s.getArg(i,"generatedColumn",null),lastColumn:s.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o]}return r},n.SourceMapConsumer=r,o.prototype=Object.create(r.prototype),o.prototype.consumer=r,o.fromSourceMap=function(e){var t=Object.create(o.prototype),n=t._names=c.fromArray(e._names.toArray(),!0),r=t._sources=c.fromArray(e._sources.toArray(),!0);t.sourceRoot=e._sourceRoot,t.sourcesContent=e._generateSourcesContent(t._sources.toArray(),t.sourceRoot),t.file=e._file;for(var a=e._mappings.toArray().slice(),u=t.__generatedMappings=[],l=t.__originalMappings=[],p=0,h=a.length;p<h;p++){var g=a[p],d=new i;d.generatedLine=g.generatedLine,d.generatedColumn=g.generatedColumn,g.source&&(d.source=r.indexOf(g.source),d.originalLine=g.originalLine,d.originalColumn=g.originalColumn,g.name&&(d.name=n.indexOf(g.name)),l.push(d)),u.push(d)}return f(t.__originalMappings,s.compareByOriginalPositions),t},o.prototype._version=3,Object.defineProperty(o.prototype,"sources",{get:function(){return this._sources.toArray().map(function(e){return null!=this.sourceRoot?s.join(this.sourceRoot,e):e},this)}}),o.prototype._parseMappings=function(e,t){for(var n,r,o,a,u,c=1,p=0,h=0,g=0,d=0,m=0,v=e.length,y=0,_={},w={},b=[],C=[];y<v;)if(";"===e.charAt(y))c++,y++,p=0;else if(","===e.charAt(y))y++;else{for(n=new i,n.generatedLine=c,a=y;a<v&&!this._charIsMappingSeparator(e,a);a++);if(r=e.slice(y,a),o=_[r])y+=r.length;else{for(o=[];y<a;)l.decode(e,y,w),u=w.value,y=w.rest,o.push(u);if(2===o.length)throw new Error("Found a source, but no line and column");if(3===o.length)throw new Error("Found a source and line, but no column");_[r]=o}n.generatedColumn=p+o[0],p=n.generatedColumn,o.length>1&&(n.source=d+o[1],d+=o[1],n.originalLine=h+o[2],h=n.originalLine,n.originalLine+=1,n.originalColumn=g+o[3],g=n.originalColumn,o.length>4&&(n.name=m+o[4],m+=o[4])),C.push(n),"number"==typeof n.originalLine&&b.push(n)}f(C,s.compareByGeneratedPositionsDeflated),this.__generatedMappings=C,f(b,s.compareByOriginalPositions),this.__originalMappings=b},o.prototype._findMapping=function(e,t,n,r,o,i){if(e[n]<=0)throw new TypeError("Line must be greater than or equal to 1, got "+e[n]);if(e[r]<0)throw new TypeError("Column must be greater than or equal to 0, got "+e[r]);return u.search(e,t,o,i)},o.prototype.computeColumnSpans=function(){for(var e=0;e<this._generatedMappings.length;++e){var t=this._generatedMappings[e];if(e+1<this._generatedMappings.length){var n=this._generatedMappings[e+1];if(t.generatedLine===n.generatedLine){t.lastGeneratedColumn=n.generatedColumn-1;continue}}t.lastGeneratedColumn=1/0}},o.prototype.originalPositionFor=function(e){var t={generatedLine:s.getArg(e,"line"),generatedColumn:s.getArg(e,"column")},n=this._findMapping(t,this._generatedMappings,"generatedLine","generatedColumn",s.compareByGeneratedPositionsDeflated,s.getArg(e,"bias",r.GREATEST_LOWER_BOUND));if(n>=0){var o=this._generatedMappings[n];if(o.generatedLine===t.generatedLine){var i=s.getArg(o,"source",null);null!==i&&(i=this._sources.at(i),null!=this.sourceRoot&&(i=s.join(this.sourceRoot,i)));var a=s.getArg(o,"name",null);return null!==a&&(a=this._names.at(a)),{source:i,line:s.getArg(o,"originalLine",null),column:s.getArg(o,"originalColumn",null),name:a}}}return{source:null,line:null,column:null,name:null}},o.prototype.hasContentsOfAllSources=function(){return!!this.sourcesContent&&(this.sourcesContent.length>=this._sources.size()&&!this.sourcesContent.some(function(e){return null==e}))},o.prototype.sourceContentFor=function(e,t){if(!this.sourcesContent)return null;if(null!=this.sourceRoot&&(e=s.relative(this.sourceRoot,e)),this._sources.has(e))return this.sourcesContent[this._sources.indexOf(e)];var n;if(null!=this.sourceRoot&&(n=s.urlParse(this.sourceRoot))){var r=e.replace(/^file:\/\//,"");if("file"==n.scheme&&this._sources.has(r))return this.sourcesContent[this._sources.indexOf(r)];if((!n.path||"/"==n.path)&&this._sources.has("/"+e))return this.sourcesContent[this._sources.indexOf("/"+e)]}if(t)return null;throw new Error('"'+e+'" is not in the SourceMap.')},o.prototype.generatedPositionFor=function(e){var t=s.getArg(e,"source");if(null!=this.sourceRoot&&(t=s.relative(this.sourceRoot,t)),!this._sources.has(t))return{line:null,column:null,lastColumn:null};t=this._sources.indexOf(t);var n={source:t,originalLine:s.getArg(e,"line"),originalColumn:s.getArg(e,"column")},o=this._findMapping(n,this._originalMappings,"originalLine","originalColumn",s.compareByOriginalPositions,s.getArg(e,"bias",r.GREATEST_LOWER_BOUND));if(o>=0){var i=this._originalMappings[o];if(i.source===n.source)return{line:s.getArg(i,"generatedLine",null),column:s.getArg(i,"generatedColumn",null),lastColumn:s.getArg(i,"lastGeneratedColumn",null)}}return{line:null,column:null,lastColumn:null}},n.BasicSourceMapConsumer=o,a.prototype=Object.create(r.prototype),a.prototype.constructor=r,a.prototype._version=3,Object.defineProperty(a.prototype,"sources",{get:function(){for(var e=[],t=0;t<this._sections.length;t++)for(var n=0;n<this._sections[t].consumer.sources.length;n++)e.push(this._sections[t].consumer.sources[n]);return e}}),a.prototype.originalPositionFor=function(e){var t={generatedLine:s.getArg(e,"line"),generatedColumn:s.getArg(e,"column")},n=u.search(t,this._sections,function(e,t){var n=e.generatedLine-t.generatedOffset.generatedLine;return n?n:e.generatedColumn-t.generatedOffset.generatedColumn}),r=this._sections[n];return r?r.consumer.originalPositionFor({line:t.generatedLine-(r.generatedOffset.generatedLine-1),column:t.generatedColumn-(r.generatedOffset.generatedLine===t.generatedLine?r.generatedOffset.generatedColumn-1:0),bias:e.bias}):{source:null,line:null,column:null,name:null}},a.prototype.hasContentsOfAllSources=function(){return this._sections.every(function(e){return e.consumer.hasContentsOfAllSources()})},a.prototype.sourceContentFor=function(e,t){
for(var n=0;n<this._sections.length;n++){var r=this._sections[n],o=r.consumer.sourceContentFor(e,!0);if(o)return o}if(t)return null;throw new Error('"'+e+'" is not in the SourceMap.')},a.prototype.generatedPositionFor=function(e){for(var t=0;t<this._sections.length;t++){var n=this._sections[t];if(n.consumer.sources.indexOf(s.getArg(e,"source"))!==-1){var r=n.consumer.generatedPositionFor(e);if(r){var o={line:r.line+(n.generatedOffset.generatedLine-1),column:r.column+(n.generatedOffset.generatedLine===r.line?n.generatedOffset.generatedColumn-1:0)};return o}}}return{line:null,column:null}},a.prototype._parseMappings=function(e,t){this.__generatedMappings=[],this.__originalMappings=[];for(var n=0;n<this._sections.length;n++)for(var r=this._sections[n],o=r.consumer._generatedMappings,i=0;i<o.length;i++){var a=o[i],u=r.consumer._sources.at(a.source);null!==r.consumer.sourceRoot&&(u=s.join(r.consumer.sourceRoot,u)),this._sources.add(u),u=this._sources.indexOf(u);var c=r.consumer._names.at(a.name);this._names.add(c),c=this._names.indexOf(c);var l={source:u,generatedLine:a.generatedLine+(r.generatedOffset.generatedLine-1),generatedColumn:a.generatedColumn+(r.generatedOffset.generatedLine===a.generatedLine?r.generatedOffset.generatedColumn-1:0),originalLine:a.originalLine,originalColumn:a.originalColumn,name:c};this.__generatedMappings.push(l),"number"==typeof l.originalLine&&this.__originalMappings.push(l)}f(this.__generatedMappings,s.compareByGeneratedPositionsDeflated),f(this.__originalMappings,s.compareByOriginalPositions)},n.IndexedSourceMapConsumer=a},{"./array-set":5,"./base64-vlq":6,"./binary-search":8,"./quick-sort":9,"./util":11}],11:[function(e,t,n){function r(e,t,n){if(t in e)return e[t];if(3===arguments.length)return n;throw new Error('"'+t+'" is a required argument.')}function o(e){var t=e.match(v);return t?{scheme:t[1],auth:t[2],host:t[3],port:t[4],path:t[5]}:null}function i(e){var t="";return e.scheme&&(t+=e.scheme+":"),t+="//",e.auth&&(t+=e.auth+"@"),e.host&&(t+=e.host),e.port&&(t+=":"+e.port),e.path&&(t+=e.path),t}function a(e){var t=e,r=o(e);if(r){if(!r.path)return e;t=r.path}for(var a,s=n.isAbsolute(t),u=t.split(/\/+/),c=0,l=u.length-1;l>=0;l--)a=u[l],"."===a?u.splice(l,1):".."===a?c++:c>0&&(""===a?(u.splice(l+1,c),c=0):(u.splice(l,2),c--));return t=u.join("/"),""===t&&(t=s?"/":"."),r?(r.path=t,i(r)):t}function s(e,t){""===e&&(e="."),""===t&&(t=".");var n=o(t),r=o(e);if(r&&(e=r.path||"/"),n&&!n.scheme)return r&&(n.scheme=r.scheme),i(n);if(n||t.match(y))return t;if(r&&!r.host&&!r.path)return r.host=t,i(r);var s="/"===t.charAt(0)?t:a(e.replace(/\/+$/,"")+"/"+t);return r?(r.path=s,i(r)):s}function u(e,t){""===e&&(e="."),e=e.replace(/\/$/,"");for(var n=0;0!==t.indexOf(e+"/");){var r=e.lastIndexOf("/");if(r<0)return t;if(e=e.slice(0,r),e.match(/^([^\/]+:\/)?\/*$/))return t;++n}return Array(n+1).join("../")+t.substr(e.length+1)}function c(e){return e}function l(e){return p(e)?"$"+e:e}function f(e){return p(e)?e.slice(1):e}function p(e){if(!e)return!1;var t=e.length;if(t<9)return!1;if(95!==e.charCodeAt(t-1)||95!==e.charCodeAt(t-2)||111!==e.charCodeAt(t-3)||116!==e.charCodeAt(t-4)||111!==e.charCodeAt(t-5)||114!==e.charCodeAt(t-6)||112!==e.charCodeAt(t-7)||95!==e.charCodeAt(t-8)||95!==e.charCodeAt(t-9))return!1;for(var n=t-10;n>=0;n--)if(36!==e.charCodeAt(n))return!1;return!0}function h(e,t,n){var r=e.source-t.source;return 0!==r?r:(r=e.originalLine-t.originalLine,0!==r?r:(r=e.originalColumn-t.originalColumn,0!==r||n?r:(r=e.generatedColumn-t.generatedColumn,0!==r?r:(r=e.generatedLine-t.generatedLine,0!==r?r:e.name-t.name))))}function g(e,t,n){var r=e.generatedLine-t.generatedLine;return 0!==r?r:(r=e.generatedColumn-t.generatedColumn,0!==r||n?r:(r=e.source-t.source,0!==r?r:(r=e.originalLine-t.originalLine,0!==r?r:(r=e.originalColumn-t.originalColumn,0!==r?r:e.name-t.name))))}function d(e,t){return e===t?0:e>t?1:-1}function m(e,t){var n=e.generatedLine-t.generatedLine;return 0!==n?n:(n=e.generatedColumn-t.generatedColumn,0!==n?n:(n=d(e.source,t.source),0!==n?n:(n=e.originalLine-t.originalLine,0!==n?n:(n=e.originalColumn-t.originalColumn,0!==n?n:d(e.name,t.name)))))}n.getArg=r;var v=/^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.]*)(?::(\d+))?(\S*)$/,y=/^data:.+\,.+$/;n.urlParse=o,n.urlGenerate=i,n.normalize=a,n.join=s,n.isAbsolute=function(e){return"/"===e.charAt(0)||!!e.match(v)},n.relative=u;var _=function(){var e=Object.create(null);return!("__proto__"in e)}();n.toSetString=_?c:l,n.fromSetString=_?c:f,n.compareByOriginalPositions=h,n.compareByGeneratedPositionsDeflated=g,n.compareByGeneratedPositionsInflated=m},{}],12:[function(t,n,r){!function(o,i){"use strict";"function"==typeof e&&e.amd?e("stack-generator",["stackframe"],i):"object"==typeof r?n.exports=i(t("stackframe")):o.StackGenerator=i(o.StackFrame)}(this,function(e){return{backtrace:function(t){var n=[],r=10;"object"==typeof t&&"number"==typeof t.maxStackSize&&(r=t.maxStackSize);for(var o=arguments.callee;o&&n.length<r;){for(var i=new Array(o.arguments.length),a=0;a<i.length;++a)i[a]=o.arguments[a];/function(?:\s+([\w$]+))+\s*\(/.test(o.toString())?n.push(new e({functionName:RegExp.$1||void 0,args:i})):n.push(new e({args:i}));try{o=o.caller}catch(s){break}}return n}}})},{stackframe:13}],13:[function(t,n,r){!function(t,o){"use strict";"function"==typeof e&&e.amd?e("stackframe",[],o):"object"==typeof r?n.exports=o():t.StackFrame=o()}(this,function(){"use strict";function e(e){return!isNaN(parseFloat(e))&&isFinite(e)}function t(e){return e[0].toUpperCase()+e.substring(1)}function n(e){return function(){return this[e]}}function r(e){if(e instanceof Object)for(var n=o.concat(i.concat(a.concat(s))),r=0;r<n.length;r++)e.hasOwnProperty(n[r])&&void 0!==e[n[r]]&&this["set"+t(n[r])](e[n[r]])}var o=["isConstructor","isEval","isNative","isToplevel"],i=["columnNumber","lineNumber"],a=["fileName","functionName","source"],s=["args"];r.prototype={getArgs:function(){return this.args},setArgs:function(e){if("[object Array]"!==Object.prototype.toString.call(e))throw new TypeError("Args must be an Array");this.args=e},getEvalOrigin:function(){return this.evalOrigin},setEvalOrigin:function(e){if(e instanceof r)this.evalOrigin=e;else{if(!(e instanceof Object))throw new TypeError("Eval Origin must be an Object or StackFrame");this.evalOrigin=new r(e)}},toString:function(){var t=this.getFunctionName()||"{anonymous}",n="("+(this.getArgs()||[]).join(",")+")",r=this.getFileName()?"@"+this.getFileName():"",o=e(this.getLineNumber())?":"+this.getLineNumber():"",i=e(this.getColumnNumber())?":"+this.getColumnNumber():"";return t+n+r+o+i}};for(var u=0;u<o.length;u++)r.prototype["get"+t(o[u])]=n(o[u]),r.prototype["set"+t(o[u])]=function(e){return function(t){this[e]=Boolean(t)}}(o[u]);for(var c=0;c<i.length;c++)r.prototype["get"+t(i[c])]=n(i[c]),r.prototype["set"+t(i[c])]=function(t){return function(n){if(!e(n))throw new TypeError(t+" must be a Number");this[t]=Number(n)}}(i[c]);for(var l=0;l<a.length;l++)r.prototype["get"+t(a[l])]=n(a[l]),r.prototype["set"+t(a[l])]=function(e){return function(t){this[e]=String(t)}}(a[l]);return r})},{}],14:[function(t,n,r){!function(o,i){"use strict";"function"==typeof e&&e.amd?e("stacktrace-gps",["source-map","stackframe"],i):"object"==typeof r?n.exports=i(t("source-map/lib/source-map-consumer"),t("stackframe")):o.StackTraceGPS=i(o.SourceMap||o.sourceMap,o.StackFrame)}(this,function(e,t){"use strict";function n(e){return new Promise(function(t,n){var r=new XMLHttpRequest;r.open("get",e),r.onerror=n,r.onreadystatechange=function(){4===r.readyState&&(r.status>=200&&r.status<300||"file://"===e.substr(0,7)&&r.responseText?t(r.responseText):n(new Error("HTTP status: "+r.status+" retrieving "+e)))},r.send()})}function r(e){if("undefined"!=typeof window&&window.atob)return window.atob(e);throw new Error("You must supply a polyfill for window.atob in this environment")}function o(e){if("undefined"!=typeof JSON&&JSON.parse)return JSON.parse(e);throw new Error("You must supply a polyfill for JSON.parse in this environment")}function i(e,t){for(var n=[/['"]?([$_A-Za-z][$_A-Za-z0-9]*)['"]?\s*[:=]\s*function\b/,/function\s+([^('"`]*?)\s*\(([^)]*)\)/,/['"]?([$_A-Za-z][$_A-Za-z0-9]*)['"]?\s*[:=]\s*(?:eval|new Function)\b/,/\b(?!(?:if|for|switch|while|with|catch)\b)(?:(?:static)\s+)?(\S+)\s*\(.*?\)\s*\{/,/['"]?([$_A-Za-z][$_A-Za-z0-9]*)['"]?\s*[:=]\s*\(.*?\)\s*=>/],r=e.split("\n"),o="",i=Math.min(t,20),a=0;a<i;++a){var s=r[t-a-1],u=s.indexOf("//");if(u>=0&&(s=s.substr(0,u)),s){o=s+o;for(var c=n.length,l=0;l<c;l++){var f=n[l].exec(o);if(f&&f[1])return f[1]}}}}function a(){if("function"!=typeof Object.defineProperty||"function"!=typeof Object.create)throw new Error("Unable to consume source maps in older browsers")}function s(e){if("object"!=typeof e)throw new TypeError("Given StackFrame is not an object");if("string"!=typeof e.fileName)throw new TypeError("Given file name is not a String");if("number"!=typeof e.lineNumber||e.lineNumber%1!==0||e.lineNumber<1)throw new TypeError("Given line number must be a positive integer");if("number"!=typeof e.columnNumber||e.columnNumber%1!==0||e.columnNumber<0)throw new TypeError("Given column number must be a non-negative integer");return!0}function u(e){var t=/\/\/[#@] ?sourceMappingURL=([^\s'"]+)\s*$/m.exec(e);if(t&&t[1])return t[1];throw new Error("sourceMappingURL not found")}function c(e,n,r){return new Promise(function(o,i){var a=n.originalPositionFor({line:e.lineNumber,column:e.columnNumber});if(a.source){var s=n.sourceContentFor(a.source);s&&(r[a.source]=s),o(new t({functionName:a.name||e.functionName,args:e.args,fileName:a.source,lineNumber:a.line,columnNumber:a.column}))}else i(new Error("Could not get original source for given stackframe and source map"))})}return function l(f){return this instanceof l?(f=f||{},this.sourceCache=f.sourceCache||{},this.sourceMapConsumerCache=f.sourceMapConsumerCache||{},this.ajax=f.ajax||n,this._atob=f.atob||r,this._get=function(e){return new Promise(function(t,n){var r="data:"===e.substr(0,5);if(this.sourceCache[e])t(this.sourceCache[e]);else if(f.offline&&!r)n(new Error("Cannot make network requests in offline mode"));else if(r){var o=/^data:application\/json;([\w=:"-]+;)*base64,/,i=e.match(o);if(i){var a=i[0].length,s=e.substr(a),u=this._atob(s);this.sourceCache[e]=u,t(u)}else n(new Error("The encoding of the inline sourcemap is not supported"))}else{var c=this.ajax(e,{method:"get"});this.sourceCache[e]=c,c.then(t,n)}}.bind(this))},this._getSourceMapConsumer=function(t,n){return new Promise(function(r,i){if(this.sourceMapConsumerCache[t])r(this.sourceMapConsumerCache[t]);else{var a=new Promise(function(r,i){return this._get(t).then(function(t){"string"==typeof t&&(t=o(t.replace(/^\)\]\}'/,""))),"undefined"==typeof t.sourceRoot&&(t.sourceRoot=n),r(new e.SourceMapConsumer(t))},i)}.bind(this));this.sourceMapConsumerCache[t]=a,r(a)}}.bind(this))},this.pinpoint=function(e){return new Promise(function(t,n){this.getMappedLocation(e).then(function(e){function n(){t(e)}this.findFunctionName(e).then(t,n)["catch"](n)}.bind(this),n)}.bind(this))},this.findFunctionName=function(e){return new Promise(function(n,r){s(e),this._get(e.fileName).then(function(r){var o=e.lineNumber,a=e.columnNumber,s=i(r,o,a);n(s?new t({functionName:s,args:e.args,fileName:e.fileName,lineNumber:o,columnNumber:a}):e)},r)["catch"](r)}.bind(this))},void(this.getMappedLocation=function(e){return new Promise(function(t,n){a(),s(e);var r=this.sourceCache,o=e.fileName;this._get(o).then(function(n){var i=u(n),a="data:"===i.substr(0,5),s=o.substring(0,o.lastIndexOf("/")+1);return"/"===i[0]||a||/^https?:\/\/|^\/\//i.test(i)||(i=s+i),this._getSourceMapConsumer(i,s).then(function(n){return c(e,n,r).then(t)["catch"](function(){t(e)})})}.bind(this),n)["catch"](n)}.bind(this))})):new l(f)}})},{"source-map/lib/source-map-consumer":10,stackframe:13}],15:[function(e,t,n){Array.isArray||(Array.isArray=function(e){return"[object Array]"===Object.prototype.toString.call(e)}),"undefined"==typeof Promise&&ES6Promise.polyfill(),Function.prototype.bind||(Function.prototype.bind=function(e){if("function"!=typeof this)throw new TypeError("Function.prototype.bind - what is trying to be bound is not callable");var t=Array.prototype.slice.call(arguments,1),n=this,r=function(){},o=function(){return n.apply(this instanceof r&&e?this:e,t.concat(Array.prototype.slice.call(arguments)))};return r.prototype=this.prototype,o.prototype=new r,o}),Array.prototype.map||(Array.prototype.map=function(e,t){if(void 0===this||null===this)throw new TypeError("this is null or not defined");var n,r=Object(this),o=r.length>>>0;if("function"!=typeof e)throw new TypeError(e+" is not a function");arguments.length>1&&(n=t);for(var i=new Array(o),a=0;a<o;){var s,u;a in r&&(s=r[a],u=e.call(n,s,a,r),i[a]=u),a++}return i}),Array.prototype.filter||(Array.prototype.filter=function(e){if(void 0===this||null===this)throw new TypeError("this is null or not defined");var t=Object(this),n=t.length>>>0;if("function"!=typeof e)throw new TypeError(e+" is not a function");for(var r=[],o=arguments.length>=2?arguments[1]:void 0,i=0;i<n;i++)if(i in t){var a=t[i];e.call(o,a,i,t)&&r.push(a)}return r}),Array.prototype.forEach||(Array.prototype.forEach=function(e,t){var n,r;if(null===this||void 0===this)throw new TypeError(" this is null or not defined");var o=Object(this),i=o.length>>>0;if("function"!=typeof e)throw new TypeError(e+" is not a function");for(arguments.length>1&&(n=t),r=0;r<i;){var a;r in o&&(a=o[r],e.call(n,a,r,o)),r++}})},{}],16:[function(t,n,r){!function(o,i){"use strict";"function"==typeof e&&e.amd?e("stacktrace",["error-stack-parser","stack-generator","stacktrace-gps"],i):"object"==typeof r?n.exports=i(t("error-stack-parser"),t("stack-generator"),t("stacktrace-gps")):o.StackTrace=i(o.ErrorStackParser,o.StackGenerator,o.StackTraceGPS)}(this,function(e,t,n){function r(e,t){var n={};return[e,t].forEach(function(e){for(var t in e)e.hasOwnProperty(t)&&(n[t]=e[t]);return n}),n}function o(e){return e.stack||e["opera#sourceloc"]}function i(e,t){return"function"==typeof t?e.filter(t):e}var a={filter:function(e){return(e.functionName||"").indexOf("StackTrace$$")===-1&&(e.functionName||"").indexOf("ErrorStackParser$$")===-1&&(e.functionName||"").indexOf("StackTraceGPS$$")===-1&&(e.functionName||"").indexOf("StackGenerator$$")===-1},sourceCache:{}},s=function(){try{throw new Error}catch(e){return e}};return{get:function(e){var t=s();return o(t)?this.fromError(t,e):this.generateArtificially(e)},getSync:function(n){n=r(a,n);var u=s(),c=o(u)?e.parse(u):t.backtrace(n);return i(c,n.filter)},fromError:function(t,o){o=r(a,o);var s=new n(o);return new Promise(function(n){var r=i(e.parse(t),o.filter);n(Promise.all(r.map(function(e){return new Promise(function(t){function n(){t(e)}s.pinpoint(e).then(t,n)["catch"](n)})})))}.bind(this))},generateArtificially:function(e){e=r(a,e);var n=t.backtrace(e);return"function"==typeof e.filter&&(n=n.filter(e.filter)),Promise.resolve(n)},instrument:function(e,t,n,r){if("function"!=typeof e)throw new Error("Cannot instrument non-function object");if("function"==typeof e.__stacktraceOriginalFn)return e;var i=function(){try{return this.get().then(t,n)["catch"](n),e.apply(r||this,arguments)}catch(i){throw o(i)&&this.fromError(i).then(t,n)["catch"](n),i}}.bind(this);return i.__stacktraceOriginalFn=e,i},deinstrument:function(e){if("function"!=typeof e)throw new Error("Cannot de-instrument non-function object");return"function"==typeof e.__stacktraceOriginalFn?e.__stacktraceOriginalFn:e},report:function(e,t,n,r){return new Promise(function(o,i){var a=new XMLHttpRequest;if(a.onerror=i,a.onreadystatechange=function(){4===a.readyState&&(a.status>=200&&a.status<400?o(a.responseText):i(new Error("POST to "+t+" failed with status: "+a.status)))},a.open("post",t),a.setRequestHeader("Content-Type","application/json"),r&&"object"==typeof r.headers){var s=r.headers;for(var u in s)s.hasOwnProperty(u)&&a.setRequestHeader(u,s[u])}var c={stack:e};void 0!==n&&null!==n&&(c.message=n),a.send(JSON.stringify(c))})}}})},{"error-stack-parser":1,"stack-generator":12,"stacktrace-gps":14}]},{},[2,3,15,16])(16)}),function(e){"use strict";var t="https://clouderrorreporting.googleapis.com/v1beta1/projects/",n=function(){};e.StackdriverErrorReporter=n,n.prototype.start=function(e){if(!e.key&&!e.targetUrl)throw new Error("Cannot initialize: No API key or target url provided.");if(!e.projectId&&!e.targetUrl)throw new Error("Cannot initialize: No project ID or target url provided.");if("undefined"==typeof StackTrace)throw new Error("make sure you loaded “dist/stackdriver-errors-concat.js” or “dist/stackdriver-errors-concat.min.js”, or that you imported the “stacktrace-js” module");this.apiKey=e.key,this.projectId=e.projectId,this.targetUrl=e.targetUrl,this.context=e.context||{},this.serviceContext={service:e.service||"web"},e.version&&(this.serviceContext.version=e.version),this.reportUncaughtExceptions=e.reportUncaughtExceptions!==!1,this.disabled=e.disabled||!1;var t=this;if(this.reportUncaughtExceptions){var n=window.onerror||function(){};window.onerror=function(e,r,o,i,a){return a&&t.report(a),n(e,r,o,i,a),!0}}},n.prototype.report=function(e,t){if(this.disabled)return"function"==typeof t&&t();if(!e)return"function"==typeof t&&t("no error to report");var n={};n.serviceContext=this.serviceContext,n.context=this.context,n.context.httpRequest={userAgent:window.navigator.userAgent,url:window.location.href};var r=0;if("string"==typeof e||e instanceof String){try{throw new Error(e)}catch(o){e=o}r=1}var i=this;StackTrace.fromError(e).then(function(o){n.message=e.toString();for(var a=r;a<o.length;a++)n.message+="\n",n.message+=[" at ",o[a].getFunctionName()||"<anonymous>"," (",o[a].getFileName(),":",o[a].getLineNumber(),":",o[a].getColumnNumber(),")"].join("");i.sendErrorPayload(n,t)},function(r){n.message=["Error extracting stack trace: ",r,"\n",e.toString(),"\n"," (",e.file,":",e.line,":",e.column,")"].join(""),i.sendErrorPayload(n,t)})},n.prototype.sendErrorPayload=function(e,n){var r=t+this.projectId+"/events:report?key="+this.apiKey,o=this.targetUrl||r,i=new XMLHttpRequest;i.open("POST",o,!0),i.setRequestHeader("Content-Type","application/json; charset=UTF-8"),i.onloadend=function(){return"function"==typeof n&&n()},i.onerror=function(e){return"function"==typeof n&&n(e)},i.send(JSON.stringify(e))},n.prototype.setUser=function(e){this.context.user=e}}(this);
enums.rs
use gdnative::prelude::*; #[derive(FromVariant, ToVariant, PartialEq, Eq, Debug, Copy, Clone)] pub enum State { IDLE,
JUMP, FALL, DEAD, HURT, ATTACK, }
MOVE,
AppFooter.js
/* eslint-disable prettier/prettier */ import React from 'react' import { CFooter } from '@coreui/react' const AppFooter = () => { return ( <CFooter> <div> <a href="https://coreui.io" target="_blank" rel="noopener noreferrer"> CoreUI </a> <span className="ms-1">&copy; 2021 creativeLabs.</span> </div> <div className="ms-auto"> <span className="me-1">Powered by</span> <a href="https://coreui.io/react" target="_blank" rel="noopener noreferrer"> CoreUI for React
) } export default React.memo(AppFooter)
</a> </div> </CFooter>
item.rs
use super::diagnostics::{dummy_arg, ConsumeClosingDelim, Error}; use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign}; use super::{AttrWrapper, FollowedByType, ForceCollect, Parser, PathStyle, TrailingToken}; use rustc_ast::ast::*; use rustc_ast::ptr::P; use rustc_ast::token::{self, TokenKind}; use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree}; use rustc_ast::{self as ast, AttrVec, Attribute, DUMMY_NODE_ID}; use rustc_ast::{Async, Const, Defaultness, IsAuto, Mutability, Unsafe, UseTree, UseTreeKind}; use rustc_ast::{BindingMode, Block, FnDecl, FnSig, Param, SelfKind}; use rustc_ast::{EnumDef, FieldDef, Generics, TraitRef, Ty, TyKind, Variant, VariantData}; use rustc_ast::{FnHeader, ForeignItem, Path, PathSegment, Visibility, VisibilityKind}; use rustc_ast::{MacArgs, MacCall, MacDelimiter}; use rustc_ast_pretty::pprust; use rustc_errors::{struct_span_err, Applicability, PResult, StashKey}; use rustc_span::edition::{Edition, LATEST_STABLE_EDITION}; use rustc_span::source_map::{self, Span}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use std::convert::TryFrom; use std::mem; use tracing::debug; impl<'a> Parser<'a> { /// Parses a source module as a crate. This is the main entry point for the parser. pub fn parse_crate_mod(&mut self) -> PResult<'a, ast::Crate> { let (attrs, items, span) = self.parse_mod(&token::Eof)?; let proc_macros = Vec::new(); // Filled in by `proc_macro_harness::inject()`. Ok(ast::Crate { attrs, items, span, proc_macros }) } /// Parses a `mod <foo> { ... }` or `mod <foo>;` item. fn parse_item_mod(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, ItemInfo> { let unsafety = self.parse_unsafety(); self.expect_keyword(kw::Mod)?; let id = self.parse_ident()?; let mod_kind = if self.eat(&token::Semi) { ModKind::Unloaded } else { self.expect(&token::OpenDelim(token::Brace))?; let (mut inner_attrs, items, inner_span) = self.parse_mod(&token::CloseDelim(token::Brace))?; attrs.append(&mut inner_attrs); ModKind::Loaded(items, Inline::Yes, inner_span) }; Ok((id, ItemKind::Mod(unsafety, mod_kind))) } /// Parses the contents of a module (inner attributes followed by module items). pub fn parse_mod( &mut self, term: &TokenKind, ) -> PResult<'a, (Vec<Attribute>, Vec<P<Item>>, Span)> { let lo = self.token.span; let attrs = self.parse_inner_attributes()?; let mut items = vec![]; while let Some(item) = self.parse_item(ForceCollect::No)? { items.push(item); self.maybe_consume_incorrect_semicolon(&items); } if !self.eat(term) { let token_str = super::token_descr(&self.token); if !self.maybe_consume_incorrect_semicolon(&items) { let msg = &format!("expected item, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected item"); return Err(err); } } Ok((attrs, items, lo.to(self.prev_token.span))) } } pub(super) type ItemInfo = (Ident, ItemKind); impl<'a> Parser<'a> { pub fn parse_item(&mut self, force_collect: ForceCollect) -> PResult<'a, Option<P<Item>>> { self.parse_item_(|_| true, force_collect).map(|i| i.map(P)) } fn parse_item_( &mut self, req_name: ReqName, force_collect: ForceCollect, ) -> PResult<'a, Option<Item>> { let attrs = self.parse_outer_attributes()?; self.parse_item_common(attrs, true, false, req_name, force_collect) } pub(super) fn parse_item_common( &mut self, attrs: AttrWrapper, mac_allowed: bool, attrs_allowed: bool, req_name: ReqName, force_collect: ForceCollect, ) -> PResult<'a, Option<Item>> { // Don't use `maybe_whole` so that we have precise control // over when we bump the parser if let token::Interpolated(nt) = &self.token.kind { if let token::NtItem(item) = &**nt { let mut item = item.clone(); self.bump(); attrs.prepend_to_nt_inner(&mut item.attrs); return Ok(Some(item.into_inner())); } }; let mut unclosed_delims = vec![]; let item = self.collect_tokens_trailing_token(attrs, force_collect, |this: &mut Self, attrs| { let item = this.parse_item_common_(attrs, mac_allowed, attrs_allowed, req_name); unclosed_delims.append(&mut this.unclosed_delims); Ok((item?, TrailingToken::None)) })?; self.unclosed_delims.append(&mut unclosed_delims); Ok(item) } fn parse_item_common_( &mut self, mut attrs: Vec<Attribute>, mac_allowed: bool, attrs_allowed: bool, req_name: ReqName, ) -> PResult<'a, Option<Item>> { let lo = self.token.span; let vis = self.parse_visibility(FollowedByType::No)?; let mut def = self.parse_defaultness(); let kind = self.parse_item_kind(&mut attrs, mac_allowed, lo, &vis, &mut def, req_name)?; if let Some((ident, kind)) = kind { self.error_on_unconsumed_default(def, &kind); let span = lo.to(self.prev_token.span); let id = DUMMY_NODE_ID; let item = Item { ident, attrs, id, kind, vis, span, tokens: None }; return Ok(Some(item)); } // At this point, we have failed to parse an item. self.error_on_unmatched_vis(&vis); self.error_on_unmatched_defaultness(def); if !attrs_allowed { self.recover_attrs_no_item(&attrs)?; } Ok(None) } /// Error in-case a non-inherited visibility was parsed but no item followed. fn error_on_unmatched_vis(&self, vis: &Visibility) { if let VisibilityKind::Inherited = vis.kind { return; } let vs = pprust::vis_to_string(&vis); let vs = vs.trim_end(); self.struct_span_err(vis.span, &format!("visibility `{}` is not followed by an item", vs)) .span_label(vis.span, "the visibility") .help(&format!("you likely meant to define an item, e.g., `{} fn foo() {{}}`", vs)) .emit(); } /// Error in-case a `default` was parsed but no item followed. fn error_on_unmatched_defaultness(&self, def: Defaultness) { if let Defaultness::Default(sp) = def { self.struct_span_err(sp, "`default` is not followed by an item") .span_label(sp, "the `default` qualifier") .note("only `fn`, `const`, `type`, or `impl` items may be prefixed by `default`") .emit(); } } /// Error in-case `default` was parsed in an in-appropriate context. fn error_on_unconsumed_default(&self, def: Defaultness, kind: &ItemKind) { if let Defaultness::Default(span) = def { let msg = format!("{} {} cannot be `default`", kind.article(), kind.descr()); self.struct_span_err(span, &msg) .span_label(span, "`default` because of this") .note("only associated `fn`, `const`, and `type` items can be `default`") .emit(); } } /// Parses one of the items allowed by the flags. fn parse_item_kind( &mut self, attrs: &mut Vec<Attribute>, macros_allowed: bool, lo: Span, vis: &Visibility, def: &mut Defaultness, req_name: ReqName, ) -> PResult<'a, Option<ItemInfo>> { let def_final = def == &Defaultness::Final; let mut def = || mem::replace(def, Defaultness::Final); let info = if self.eat_keyword(kw::Use) { // USE ITEM let tree = self.parse_use_tree()?; // If wildcard or glob-like brace syntax doesn't have `;`, // the user may not know `*` or `{}` should be the last. if let Err(mut e) = self.expect_semi() { match tree.kind { UseTreeKind::Glob => { e.note("the wildcard token must be last on the path").emit(); } UseTreeKind::Nested(..) => { e.note("glob-like brace syntax must be last on the path").emit(); } _ => (), } return Err(e); } (Ident::invalid(), ItemKind::Use(tree)) } else if self.check_fn_front_matter(def_final) { // FUNCTION ITEM let (ident, sig, generics, body) = self.parse_fn(attrs, req_name, lo)?; (ident, ItemKind::Fn(box FnKind(def(), sig, generics, body))) } else if self.eat_keyword(kw::Extern) { if self.eat_keyword(kw::Crate) { // EXTERN CRATE self.parse_item_extern_crate()? } else { // EXTERN BLOCK self.parse_item_foreign_mod(attrs, Unsafe::No)? } } else if self.is_unsafe_foreign_mod() { // EXTERN BLOCK let unsafety = self.parse_unsafety(); self.expect_keyword(kw::Extern)?; self.parse_item_foreign_mod(attrs, unsafety)? } else if self.is_static_global() { // STATIC ITEM self.bump(); // `static` let m = self.parse_mutability(); let (ident, ty, expr) = self.parse_item_global(Some(m))?; (ident, ItemKind::Static(ty, m, expr)) } else if let Const::Yes(const_span) = self.parse_constness() { // CONST ITEM if self.token.is_keyword(kw::Impl) { // recover from `const impl`, suggest `impl const` self.recover_const_impl(const_span, attrs, def())? } else { self.recover_const_mut(const_span); let (ident, ty, expr) = self.parse_item_global(None)?; (ident, ItemKind::Const(def(), ty, expr)) } } else if self.check_keyword(kw::Trait) || self.check_auto_or_unsafe_trait_item() { // TRAIT ITEM self.parse_item_trait(attrs, lo)? } else if self.check_keyword(kw::Impl) || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Impl]) { // IMPL ITEM self.parse_item_impl(attrs, def())? } else if self.check_keyword(kw::Mod) || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Mod]) { // MODULE ITEM self.parse_item_mod(attrs)? } else if self.eat_keyword(kw::Type) { // TYPE ITEM self.parse_type_alias(def())? } else if self.eat_keyword(kw::Enum) { // ENUM ITEM self.parse_item_enum()? } else if self.eat_keyword(kw::Struct) { // STRUCT ITEM self.parse_item_struct()? } else if self.is_kw_followed_by_ident(kw::Union) { // UNION ITEM self.bump(); // `union` self.parse_item_union()? } else if self.eat_keyword(kw::Macro) { // MACROS 2.0 ITEM self.parse_item_decl_macro(lo)? } else if self.is_macro_rules_item() { // MACRO_RULES ITEM self.parse_item_macro_rules(vis)? } else if vis.kind.is_pub() && self.isnt_macro_invocation() { self.recover_missing_kw_before_item()?; return Ok(None); } else if macros_allowed && self.check_path() { // MACRO INVOCATION ITEM (Ident::invalid(), ItemKind::MacCall(self.parse_item_macro(vis)?)) } else { return Ok(None); }; Ok(Some(info)) } /// When parsing a statement, would the start of a path be an item? pub(super) fn is_path_start_item(&mut self) -> bool { self.is_crate_vis() // no: `crate::b`, yes: `crate $item` || self.is_kw_followed_by_ident(kw::Union) // no: `union::b`, yes: `union U { .. }` || self.check_auto_or_unsafe_trait_item() // no: `auto::b`, yes: `auto trait X { .. }` || self.is_async_fn() // no(2015): `async::b`, yes: `async fn` || self.is_macro_rules_item() // no: `macro_rules::b`, yes: `macro_rules! mac` } /// Are we sure this could not possibly be a macro invocation? fn isnt_macro_invocation(&mut self) -> bool { self.check_ident() && self.look_ahead(1, |t| *t != token::Not && *t != token::ModSep) } /// Recover on encountering a struct or method definition where the user /// forgot to add the `struct` or `fn` keyword after writing `pub`: `pub S {}`. fn recover_missing_kw_before_item(&mut self) -> PResult<'a, ()> { // Space between `pub` keyword and the identifier // // pub S {} // ^^^ `sp` points here let sp = self.prev_token.span.between(self.token.span); let full_sp = self.prev_token.span.to(self.token.span); let ident_sp = self.token.span; if self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) { // possible public struct definition where `struct` was forgotten let ident = self.parse_ident().unwrap(); let msg = format!("add `struct` here to parse `{}` as a public struct", ident); let mut err = self.struct_span_err(sp, "missing `struct` for struct definition"); err.span_suggestion_short( sp, &msg, " struct ".into(), Applicability::MaybeIncorrect, // speculative ); Err(err) } else if self.look_ahead(1, |t| *t == token::OpenDelim(token::Paren)) { let ident = self.parse_ident().unwrap(); self.bump(); // `(` let kw_name = self.recover_first_param(); self.consume_block(token::Paren, ConsumeClosingDelim::Yes); let (kw, kw_name, ambiguous) = if self.check(&token::RArrow) { self.eat_to_tokens(&[&token::OpenDelim(token::Brace)]); self.bump(); // `{` ("fn", kw_name, false) } else if self.check(&token::OpenDelim(token::Brace)) { self.bump(); // `{` ("fn", kw_name, false) } else if self.check(&token::Colon) { let kw = "struct"; (kw, kw, false) } else { ("fn` or `struct", "function or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.struct_span_err(sp, &msg); if !ambiguous { self.consume_block(token::Brace, ConsumeClosingDelim::Yes); let suggestion = format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name); err.span_suggestion_short( sp, &suggestion, format!(" {} ", kw), Applicability::MachineApplicable, ); } else if let Ok(snippet) = self.span_to_snippet(ident_sp) { err.span_suggestion( full_sp, "if you meant to call a macro, try", format!("{}!", snippet), // this is the `ambiguous` conditional branch Applicability::MaybeIncorrect, ); } else { err.help( "if you meant to call a macro, remove the `pub` \ and add a trailing `!` after the identifier", ); } Err(err) } else if self.look_ahead(1, |t| *t == token::Lt) { let ident = self.parse_ident().unwrap(); self.eat_to_tokens(&[&token::Gt]); self.bump(); // `>` let (kw, kw_name, ambiguous) = if self.eat(&token::OpenDelim(token::Paren)) { ("fn", self.recover_first_param(), false) } else if self.check(&token::OpenDelim(token::Brace)) { ("struct", "struct", false) } else { ("fn` or `struct", "function or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.struct_span_err(sp, &msg); if !ambiguous { err.span_suggestion_short( sp, &format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name), format!(" {} ", kw), Applicability::MachineApplicable, ); } Err(err) } else { Ok(()) } } /// Parses an item macro, e.g., `item!();`. fn parse_item_macro(&mut self, vis: &Visibility) -> PResult<'a, MacCall> { let path = self.parse_path(PathStyle::Mod)?; // `foo::bar` self.expect(&token::Not)?; // `!` let args = self.parse_mac_args()?; // `( .. )` or `[ .. ]` (followed by `;`), or `{ .. }`. self.eat_semi_for_macro_if_needed(&args); self.complain_if_pub_macro(vis, false); Ok(MacCall { path, args, prior_type_ascription: self.last_type_ascription }) } /// Recover if we parsed attributes and expected an item but there was none. fn recover_attrs_no_item(&mut self, attrs: &[Attribute]) -> PResult<'a, ()> { let (start, end) = match attrs { [] => return Ok(()), [x0 @ xn] | [x0, .., xn] => (x0, xn), }; let msg = if end.is_doc_comment() { "expected item after doc comment" } else { "expected item after attributes" }; let mut err = self.struct_span_err(end.span, msg); if end.is_doc_comment() { err.span_label(end.span, "this doc comment doesn't document anything"); } if let [.., penultimate, _] = attrs { err.span_label(start.span.to(penultimate.span), "other attributes here"); } Err(err) } fn is_async_fn(&self) -> bool { self.token.is_keyword(kw::Async) && self.is_keyword_ahead(1, &[kw::Fn]) } fn parse_polarity(&mut self) -> ast::ImplPolarity { // Disambiguate `impl !Trait for Type { ... }` and `impl ! { ... }` for the never type. if self.check(&token::Not) && self.look_ahead(1, |t| t.can_begin_type()) { self.bump(); // `!` ast::ImplPolarity::Negative(self.prev_token.span) } else { ast::ImplPolarity::Positive } } /// Parses an implementation item. /// /// ``` /// impl<'a, T> TYPE { /* impl items */ } /// impl<'a, T> TRAIT for TYPE { /* impl items */ } /// impl<'a, T> !TRAIT for TYPE { /* impl items */ } /// impl<'a, T> const TRAIT for TYPE { /* impl items */ } /// ``` /// /// We actually parse slightly more relaxed grammar for better error reporting and recovery. /// ``` /// "impl" GENERICS "const"? "!"? TYPE "for"? (TYPE | "..") ("where" PREDICATES)? "{" BODY "}" /// "impl" GENERICS "const"? "!"? TYPE ("where" PREDICATES)? "{" BODY "}" /// ``` fn parse_item_impl( &mut self, attrs: &mut Vec<Attribute>, defaultness: Defaultness, ) -> PResult<'a, ItemInfo> { let unsafety = self.parse_unsafety(); self.expect_keyword(kw::Impl)?; // First, parse generic parameters if necessary. let mut generics = if self.choose_generics_over_qpath(0) { self.parse_generics()? } else { let mut generics = Generics::default(); // impl A for B {} // /\ this is where `generics.span` should point when there are no type params. generics.span = self.prev_token.span.shrink_to_hi(); generics }; let constness = self.parse_constness(); if let Const::Yes(span) = constness { self.sess.gated_spans.gate(sym::const_trait_impl, span); } let polarity = self.parse_polarity(); // Parse both types and traits as a type, then reinterpret if necessary. let err_path = |span| ast::Path::from_ident(Ident::new(kw::Empty, span)); let ty_first = if self.token.is_keyword(kw::For) && self.look_ahead(1, |t| t != &token::Lt) { let span = self.prev_token.span.between(self.token.span); self.struct_span_err(span, "missing trait in a trait impl").emit(); P(Ty { kind: TyKind::Path(None, err_path(span)), span, id: DUMMY_NODE_ID, tokens: None, }) } else { self.parse_ty()? }; // If `for` is missing we try to recover. let has_for = self.eat_keyword(kw::For); let missing_for_span = self.prev_token.span.between(self.token.span); let ty_second = if self.token == token::DotDot { // We need to report this error after `cfg` expansion for compatibility reasons self.bump(); // `..`, do not add it to expected tokens Some(self.mk_ty(self.prev_token.span, TyKind::Err)) } else if has_for || self.token.can_begin_type() { Some(self.parse_ty()?) } else { None }; generics.where_clause = self.parse_where_clause()?; let impl_items = self.parse_item_list(attrs, |p| p.parse_impl_item(ForceCollect::No))?; let item_kind = match ty_second { Some(ty_second) => { // impl Trait for Type if !has_for { self.struct_span_err(missing_for_span, "missing `for` in a trait impl") .span_suggestion_short( missing_for_span, "add `for` here", " for ".to_string(), Applicability::MachineApplicable, ) .emit(); } let ty_first = ty_first.into_inner(); let path = match ty_first.kind { // This notably includes paths passed through `ty` macro fragments (#46438). TyKind::Path(None, path) => path, _ => { self.struct_span_err(ty_first.span, "expected a trait, found type").emit(); err_path(ty_first.span) } }; let trait_ref = TraitRef { path, ref_id: ty_first.id }; ItemKind::Impl(box ImplKind { unsafety, polarity, defaultness, constness, generics, of_trait: Some(trait_ref), self_ty: ty_second, items: impl_items, }) } None => { // impl Type ItemKind::Impl(box ImplKind { unsafety, polarity, defaultness, constness, generics, of_trait: None, self_ty: ty_first, items: impl_items, }) } }; Ok((Ident::invalid(), item_kind)) } fn parse_item_list<T>( &mut self, attrs: &mut Vec<Attribute>, mut parse_item: impl FnMut(&mut Parser<'a>) -> PResult<'a, Option<Option<T>>>, ) -> PResult<'a, Vec<T>> { let open_brace_span = self.token.span; self.expect(&token::OpenDelim(token::Brace))?; attrs.append(&mut self.parse_inner_attributes()?); let mut items = Vec::new(); while !self.eat(&token::CloseDelim(token::Brace)) { if self.recover_doc_comment_before_brace() { continue; } match parse_item(self) { Ok(None) => { // We have to bail or we'll potentially never make progress. let non_item_span = self.token.span; self.consume_block(token::Brace, ConsumeClosingDelim::Yes); self.struct_span_err(non_item_span, "non-item in item list") .span_label(open_brace_span, "item list starts here") .span_label(non_item_span, "non-item starts here") .span_label(self.prev_token.span, "item list ends here") .emit(); break; } Ok(Some(item)) => items.extend(item), Err(mut err) => { self.consume_block(token::Brace, ConsumeClosingDelim::Yes); err.span_label(open_brace_span, "while parsing this item list starting here") .span_label(self.prev_token.span, "the item list ends here") .emit(); break; } } } Ok(items) } /// Recover on a doc comment before `}`. fn recover_doc_comment_before_brace(&mut self) -> bool { if let token::DocComment(..) = self.token.kind { if self.look_ahead(1, |tok| tok == &token::CloseDelim(token::Brace)) { struct_span_err!( self.diagnostic(), self.token.span, E0584, "found a documentation comment that doesn't document anything", ) .span_label(self.token.span, "this doc comment doesn't document anything") .help( "doc comments must come before what they document, maybe a \ comment was intended with `//`?", ) .emit(); self.bump(); return true; } } false } /// Parses defaultness (i.e., `default` or nothing). fn parse_defaultness(&mut self) -> Defaultness { // We are interested in `default` followed by another identifier. // However, we must avoid keywords that occur as binary operators. // Currently, the only applicable keyword is `as` (`default as Ty`). if self.check_keyword(kw::Default) && self.look_ahead(1, |t| t.is_non_raw_ident_where(|i| i.name != kw::As)) { self.bump(); // `default` Defaultness::Default(self.prev_token.uninterpolated_span()) } else { Defaultness::Final } } /// Is this an `(unsafe auto? | auto) trait` item? fn check_auto_or_unsafe_trait_item(&mut self) -> bool { // auto trait self.check_keyword(kw::Auto) && self.is_keyword_ahead(1, &[kw::Trait]) // unsafe auto trait || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Trait, kw::Auto]) } /// Parses `unsafe? auto? trait Foo { ... }` or `trait Foo = Bar;`. fn parse_item_trait(&mut self, attrs: &mut Vec<Attribute>, lo: Span) -> PResult<'a, ItemInfo> { let unsafety = self.parse_unsafety(); // Parse optional `auto` prefix. let is_auto = if self.eat_keyword(kw::Auto) { IsAuto::Yes } else { IsAuto::No }; self.expect_keyword(kw::Trait)?; let ident = self.parse_ident()?; let mut tps = self.parse_generics()?; // Parse optional colon and supertrait bounds. let had_colon = self.eat(&token::Colon); let span_at_colon = self.prev_token.span; let bounds = if had_colon { self.parse_generic_bounds(Some(self.prev_token.span))? } else { Vec::new() }; let span_before_eq = self.prev_token.span; if self.eat(&token::Eq) { // It's a trait alias. if had_colon { let span = span_at_colon.to(span_before_eq); self.struct_span_err(span, "bounds are not allowed on trait aliases").emit(); } let bounds = self.parse_generic_bounds(None)?; tps.where_clause = self.parse_where_clause()?; self.expect_semi()?; let whole_span = lo.to(self.prev_token.span); if is_auto == IsAuto::Yes { let msg = "trait aliases cannot be `auto`"; self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit(); } if let Unsafe::Yes(_) = unsafety { let msg = "trait aliases cannot be `unsafe`"; self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit(); } self.sess.gated_spans.gate(sym::trait_alias, whole_span); Ok((ident, ItemKind::TraitAlias(tps, bounds))) } else { // It's a normal trait. tps.where_clause = self.parse_where_clause()?; let items = self.parse_item_list(attrs, |p| p.parse_trait_item(ForceCollect::No))?; Ok((ident, ItemKind::Trait(box TraitKind(is_auto, unsafety, tps, bounds, items)))) } } pub fn parse_impl_item( &mut self, force_collect: ForceCollect, ) -> PResult<'a, Option<Option<P<AssocItem>>>> { self.parse_assoc_item(|_| true, force_collect) } pub fn parse_trait_item( &mut self, force_collect: ForceCollect, ) -> PResult<'a, Option<Option<P<AssocItem>>>> { self.parse_assoc_item(|edition| edition >= Edition::Edition2018, force_collect) } /// Parses associated items. fn parse_assoc_item( &mut self, req_name: ReqName, force_collect: ForceCollect, ) -> PResult<'a, Option<Option<P<AssocItem>>>> { Ok(self.parse_item_(req_name, force_collect)?.map( |Item { attrs, id, span, vis, ident, kind, tokens }| { let kind = match AssocItemKind::try_from(kind) { Ok(kind) => kind, Err(kind) => match kind { ItemKind::Static(a, _, b) => { self.struct_span_err(span, "associated `static` items are not allowed") .emit(); AssocItemKind::Const(Defaultness::Final, a, b) } _ => return self.error_bad_item_kind(span, &kind, "`trait`s or `impl`s"), }, }; Some(P(Item { attrs, id, span, vis, ident, kind, tokens })) }, )) } /// Parses a `type` alias with the following grammar: /// ``` /// TypeAlias = "type" Ident Generics {":" GenericBounds}? {"=" Ty}? ";" ; /// ``` /// The `"type"` has already been eaten. fn parse_type_alias(&mut self, def: Defaultness) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; // Parse optional colon and param bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds(None)? } else { Vec::new() }; generics.where_clause = self.parse_where_clause()?; let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None }; self.expect_semi()?; Ok((ident, ItemKind::TyAlias(box TyAliasKind(def, generics, bounds, default)))) } /// Parses a `UseTree`. /// /// ```text /// USE_TREE = [`::`] `*` | /// [`::`] `{` USE_TREE_LIST `}` | /// PATH `::` `*` | /// PATH `::` `{` USE_TREE_LIST `}` | /// PATH [`as` IDENT] /// ``` fn parse_use_tree(&mut self) -> PResult<'a, UseTree> { let lo = self.token.span; let mut prefix = ast::Path { segments: Vec::new(), span: lo.shrink_to_lo(), tokens: None }; let kind = if self.check(&token::OpenDelim(token::Brace)) || self.check(&token::BinOp(token::Star)) || self.is_import_coupler() { // `use *;` or `use ::*;` or `use {...};` or `use ::{...};` let mod_sep_ctxt = self.token.span.ctxt(); if self.eat(&token::ModSep) { prefix .segments .push(PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt))); } self.parse_use_tree_glob_or_nested()? } else { // `use path::*;` or `use path::{...};` or `use path;` or `use path as bar;` prefix = self.parse_path(PathStyle::Mod)?; if self.eat(&token::ModSep) { self.parse_use_tree_glob_or_nested()? } else { UseTreeKind::Simple(self.parse_rename()?, DUMMY_NODE_ID, DUMMY_NODE_ID) } }; Ok(UseTree { prefix, kind, span: lo.to(self.prev_token.span) }) } /// Parses `*` or `{...}`. fn parse_use_tree_glob_or_nested(&mut self) -> PResult<'a, UseTreeKind> { Ok(if self.eat(&token::BinOp(token::Star)) { UseTreeKind::Glob } else { UseTreeKind::Nested(self.parse_use_tree_list()?) }) } /// Parses a `UseTreeKind::Nested(list)`. /// /// ```text /// USE_TREE_LIST = Ø | (USE_TREE `,`)* USE_TREE [`,`] /// ``` fn parse_use_tree_list(&mut self) -> PResult<'a, Vec<(UseTree, ast::NodeId)>> { self.parse_delim_comma_seq(token::Brace, |p| Ok((p.parse_use_tree()?, DUMMY_NODE_ID))) .map(|(r, _)| r) } fn parse_rename(&mut self) -> PResult<'a, Option<Ident>> { if self.eat_keyword(kw::As) { self.parse_ident_or_underscore().map(Some) } else { Ok(None) } } fn parse_ident_or_underscore(&mut self) -> PResult<'a, Ident> { match self.token.ident() { Some((ident @ Ident { name: kw::Underscore, .. }, false)) => { self.bump(); Ok(ident) } _ => self.parse_ident(), } } /// Parses `extern crate` links. /// /// # Examples /// /// ``` /// extern crate foo; /// extern crate bar as foo; /// ``` fn parse_item_extern_crate(&mut self) -> PResult<'a, ItemInfo> { // Accept `extern crate name-like-this` for better diagnostics let orig_name = self.parse_crate_name_with_dashes()?; let (item_name, orig_name) = if let Some(rename) = self.parse_rename()? { (rename, Some(orig_name.name)) } else { (orig_name, None) }; self.expect_semi()?; Ok((item_name, ItemKind::ExternCrate(orig_name))) } fn parse_crate_name_with_dashes(&mut self) -> PResult<'a, Ident> { let error_msg = "crate name using dashes are not valid in `extern crate` statements"; let suggestion_msg = "if the original crate name uses dashes you need to use underscores \ in the code"; let mut ident = if self.token.is_keyword(kw::SelfLower) { self.parse_path_segment_ident() } else { self.parse_ident() }?; let mut idents = vec![]; let mut replacement = vec![]; let mut fixed_crate_name = false; // Accept `extern crate name-like-this` for better diagnostics. let dash = token::BinOp(token::BinOpToken::Minus); if self.token == dash { // Do not include `-` as part of the expected tokens list. while self.eat(&dash) { fixed_crate_name = true; replacement.push((self.prev_token.span, "_".to_string())); idents.push(self.parse_ident()?); } } if fixed_crate_name { let fixed_name_sp = ident.span.to(idents.last().unwrap().span); let mut fixed_name = format!("{}", ident.name); for part in idents { fixed_name.push_str(&format!("_{}", part.name)); } ident = Ident::from_str_and_span(&fixed_name, fixed_name_sp); self.struct_span_err(fixed_name_sp, error_msg) .span_label(fixed_name_sp, "dash-separated idents are not valid") .multipart_suggestion(suggestion_msg, replacement, Applicability::MachineApplicable) .emit(); } Ok(ident) } /// Parses `extern` for foreign ABIs modules. /// /// `extern` is expected to have been consumed before calling this method. /// /// # Examples /// /// ```ignore (only-for-syntax-highlight) /// extern "C" {} /// extern {} /// ``` fn parse_item_foreign_mod( &mut self, attrs: &mut Vec<Attribute>, unsafety: Unsafe, ) -> PResult<'a, ItemInfo> { let abi = self.parse_abi(); // ABI? let items = self.parse_item_list(attrs, |p| p.parse_foreign_item(ForceCollect::No))?; let module = ast::ForeignMod { unsafety, abi, items }; Ok((Ident::invalid(), ItemKind::ForeignMod(module))) } /// Parses a foreign item (one in an `extern { ... }` block). pub fn parse_foreign_item( &mut self, force_collect: ForceCollect, ) -> PResult<'a, Option<Option<P<ForeignItem>>>> { Ok(self.parse_item_(|_| true, force_collect)?.map( |Item { attrs, id, span, vis, ident, kind, tokens }| { let kind = match ForeignItemKind::try_from(kind) { Ok(kind) => kind, Err(kind) => match kind { ItemKind::Const(_, a, b) => { self.error_on_foreign_const(span, ident); ForeignItemKind::Static(a, Mutability::Not, b) } _ => return self.error_bad_item_kind(span, &kind, "`extern` blocks"), }, }; Some(P(Item { attrs, id, span, vis, ident, kind, tokens })) }, )) } fn error_bad_item_kind<T>(&self, span: Span, kind: &ItemKind, ctx: &str) -> Option<T> { let span = self.sess.source_map().guess_head_span(span); let descr = kind.descr(); self.struct_span_err(span, &format!("{} is not supported in {}", descr, ctx)) .help(&format!("consider moving the {} out to a nearby module scope", descr)) .emit(); None } fn error_on_foreign_const(&self, span: Span, ident: Ident) { self.struct_span_err(ident.span, "extern items cannot be `const`") .span_suggestion( span.with_hi(ident.span.lo()), "try using a static value", "static ".to_string(), Applicability::MachineApplicable, ) .note("for more information, visit https://doc.rust-lang.org/std/keyword.extern.html") .emit(); } fn i
&self) -> bool { self.token.is_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Extern]) && self.look_ahead( 2 + self.look_ahead(2, |t| t.can_begin_literal_maybe_minus() as usize), |t| t.kind == token::OpenDelim(token::Brace), ) } fn is_static_global(&mut self) -> bool { if self.check_keyword(kw::Static) { // Check if this could be a closure. !self.look_ahead(1, |token| { if token.is_keyword(kw::Move) { return true; } matches!(token.kind, token::BinOp(token::Or) | token::OrOr) }) } else { false } } /// Recover on `const mut` with `const` already eaten. fn recover_const_mut(&mut self, const_span: Span) { if self.eat_keyword(kw::Mut) { let span = self.prev_token.span; self.struct_span_err(span, "const globals cannot be mutable") .span_label(span, "cannot be mutable") .span_suggestion( const_span, "you might want to declare a static instead", "static".to_owned(), Applicability::MaybeIncorrect, ) .emit(); } } /// Recover on `const impl` with `const` already eaten. fn recover_const_impl( &mut self, const_span: Span, attrs: &mut Vec<Attribute>, defaultness: Defaultness, ) -> PResult<'a, ItemInfo> { let impl_span = self.token.span; let mut err = self.expected_ident_found(); // Only try to recover if this is implementing a trait for a type let mut impl_info = match self.parse_item_impl(attrs, defaultness) { Ok(impl_info) => impl_info, Err(mut recovery_error) => { // Recovery failed, raise the "expected identifier" error recovery_error.cancel(); return Err(err); } }; match impl_info.1 { ItemKind::Impl(box ImplKind { of_trait: Some(ref trai), ref mut constness, .. }) => { *constness = Const::Yes(const_span); let before_trait = trai.path.span.shrink_to_lo(); let const_up_to_impl = const_span.with_hi(impl_span.lo()); err.multipart_suggestion( "you might have meant to write a const trait impl", vec![(const_up_to_impl, "".to_owned()), (before_trait, "const ".to_owned())], Applicability::MaybeIncorrect, ) .emit(); } ItemKind::Impl { .. } => return Err(err), _ => unreachable!(), } Ok(impl_info) } /// Parse `["const" | ("static" "mut"?)] $ident ":" $ty (= $expr)?` with /// `["const" | ("static" "mut"?)]` already parsed and stored in `m`. /// /// When `m` is `"const"`, `$ident` may also be `"_"`. fn parse_item_global( &mut self, m: Option<Mutability>, ) -> PResult<'a, (Ident, P<Ty>, Option<P<ast::Expr>>)> { let id = if m.is_none() { self.parse_ident_or_underscore() } else { self.parse_ident() }?; // Parse the type of a `const` or `static mut?` item. // That is, the `":" $ty` fragment. let ty = if self.eat(&token::Colon) { self.parse_ty()? } else { self.recover_missing_const_type(id, m) }; let expr = if self.eat(&token::Eq) { Some(self.parse_expr()?) } else { None }; self.expect_semi()?; Ok((id, ty, expr)) } /// We were supposed to parse `:` but the `:` was missing. /// This means that the type is missing. fn recover_missing_const_type(&mut self, id: Ident, m: Option<Mutability>) -> P<Ty> { // Construct the error and stash it away with the hope // that typeck will later enrich the error with a type. let kind = match m { Some(Mutability::Mut) => "static mut", Some(Mutability::Not) => "static", None => "const", }; let mut err = self.struct_span_err(id.span, &format!("missing type for `{}` item", kind)); err.span_suggestion( id.span, "provide a type for the item", format!("{}: <type>", id), Applicability::HasPlaceholders, ); err.stash(id.span, StashKey::ItemNoType); // The user intended that the type be inferred, // so treat this as if the user wrote e.g. `const A: _ = expr;`. P(Ty { kind: TyKind::Infer, span: id.span, id: ast::DUMMY_NODE_ID, tokens: None }) } /// Parses an enum declaration. fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; let mut generics = self.parse_generics()?; generics.where_clause = self.parse_where_clause()?; let (variants, _) = self.parse_delim_comma_seq(token::Brace, |p| p.parse_enum_variant()).map_err(|e| { self.recover_stmt(); e })?; let enum_definition = EnumDef { variants: variants.into_iter().flatten().collect() }; Ok((id, ItemKind::Enum(enum_definition, generics))) } fn parse_enum_variant(&mut self) -> PResult<'a, Option<Variant>> { let variant_attrs = self.parse_outer_attributes()?; self.collect_tokens_trailing_token( variant_attrs, ForceCollect::No, |this, variant_attrs| { let vlo = this.token.span; let vis = this.parse_visibility(FollowedByType::No)?; if !this.recover_nested_adt_item(kw::Enum)? { return Ok((None, TrailingToken::None)); } let ident = this.parse_field_ident("enum", vlo)?; let struct_def = if this.check(&token::OpenDelim(token::Brace)) { // Parse a struct variant. let (fields, recovered) = this.parse_record_struct_body("struct")?; VariantData::Struct(fields, recovered) } else if this.check(&token::OpenDelim(token::Paren)) { VariantData::Tuple(this.parse_tuple_struct_body()?, DUMMY_NODE_ID) } else { VariantData::Unit(DUMMY_NODE_ID) }; let disr_expr = if this.eat(&token::Eq) { Some(this.parse_anon_const_expr()?) } else { None }; let vr = ast::Variant { ident, vis, id: DUMMY_NODE_ID, attrs: variant_attrs.into(), data: struct_def, disr_expr, span: vlo.to(this.prev_token.span), is_placeholder: false, }; Ok((Some(vr), TrailingToken::MaybeComma)) }, ) } /// Parses `struct Foo { ... }`. fn parse_item_struct(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; // There is a special case worth noting here, as reported in issue #17904. // If we are parsing a tuple struct it is the case that the where clause // should follow the field list. Like so: // // struct Foo<T>(T) where T: Copy; // // If we are parsing a normal record-style struct it is the case // that the where clause comes before the body, and after the generics. // So if we look ahead and see a brace or a where-clause we begin // parsing a record style struct. // // Otherwise if we look ahead and see a paren we parse a tuple-style // struct. let vdata = if self.token.is_keyword(kw::Where) { generics.where_clause = self.parse_where_clause()?; if self.eat(&token::Semi) { // If we see a: `struct Foo<T> where T: Copy;` style decl. VariantData::Unit(DUMMY_NODE_ID) } else { // If we see: `struct Foo<T> where T: Copy { ... }` let (fields, recovered) = self.parse_record_struct_body("struct")?; VariantData::Struct(fields, recovered) } // No `where` so: `struct Foo<T>;` } else if self.eat(&token::Semi) { VariantData::Unit(DUMMY_NODE_ID) // Record-style struct definition } else if self.token == token::OpenDelim(token::Brace) { let (fields, recovered) = self.parse_record_struct_body("struct")?; VariantData::Struct(fields, recovered) // Tuple-style struct definition with optional where-clause. } else if self.token == token::OpenDelim(token::Paren) { let body = VariantData::Tuple(self.parse_tuple_struct_body()?, DUMMY_NODE_ID); generics.where_clause = self.parse_where_clause()?; self.expect_semi()?; body } else { let token_str = super::token_descr(&self.token); let msg = &format!( "expected `where`, `{{`, `(`, or `;` after struct name, found {}", token_str ); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where`, `{`, `(`, or `;` after struct name"); return Err(err); }; Ok((class_name, ItemKind::Struct(vdata, generics))) } /// Parses `union Foo { ... }`. fn parse_item_union(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; let vdata = if self.token.is_keyword(kw::Where) { generics.where_clause = self.parse_where_clause()?; let (fields, recovered) = self.parse_record_struct_body("union")?; VariantData::Struct(fields, recovered) } else if self.token == token::OpenDelim(token::Brace) { let (fields, recovered) = self.parse_record_struct_body("union")?; VariantData::Struct(fields, recovered) } else { let token_str = super::token_descr(&self.token); let msg = &format!("expected `where` or `{{` after union name, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where` or `{` after union name"); return Err(err); }; Ok((class_name, ItemKind::Union(vdata, generics))) } pub(super) fn parse_record_struct_body( &mut self, adt_ty: &str, ) -> PResult<'a, (Vec<FieldDef>, /* recovered */ bool)> { let mut fields = Vec::new(); let mut recovered = false; if self.eat(&token::OpenDelim(token::Brace)) { while self.token != token::CloseDelim(token::Brace) { let field = self.parse_field_def(adt_ty).map_err(|e| { self.consume_block(token::Brace, ConsumeClosingDelim::No); recovered = true; e }); match field { Ok(field) => fields.push(field), Err(mut err) => { err.emit(); break; } } } self.eat(&token::CloseDelim(token::Brace)); } else { let token_str = super::token_descr(&self.token); let msg = &format!("expected `where`, or `{{` after struct name, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where`, or `{` after struct name"); return Err(err); } Ok((fields, recovered)) } fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<FieldDef>> { // This is the case where we find `struct Foo<T>(T) where T: Copy;` // Unit like structs are handled in parse_item_struct function self.parse_paren_comma_seq(|p| { let attrs = p.parse_outer_attributes()?; p.collect_tokens_trailing_token(attrs, ForceCollect::No, |p, attrs| { let lo = p.token.span; let vis = p.parse_visibility(FollowedByType::Yes)?; let ty = p.parse_ty()?; Ok(( FieldDef { span: lo.to(ty.span), vis, ident: None, id: DUMMY_NODE_ID, ty, attrs: attrs.into(), is_placeholder: false, }, TrailingToken::MaybeComma, )) }) }) .map(|(r, _)| r) } /// Parses an element of a struct declaration. fn parse_field_def(&mut self, adt_ty: &str) -> PResult<'a, FieldDef> { let attrs = self.parse_outer_attributes()?; self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { let lo = this.token.span; let vis = this.parse_visibility(FollowedByType::No)?; Ok((this.parse_single_struct_field(adt_ty, lo, vis, attrs)?, TrailingToken::None)) }) } /// Parses a structure field declaration. fn parse_single_struct_field( &mut self, adt_ty: &str, lo: Span, vis: Visibility, attrs: Vec<Attribute>, ) -> PResult<'a, FieldDef> { let mut seen_comma: bool = false; let a_var = self.parse_name_and_ty(adt_ty, lo, vis, attrs)?; if self.token == token::Comma { seen_comma = true; } match self.token.kind { token::Comma => { self.bump(); } token::CloseDelim(token::Brace) => {} token::DocComment(..) => { let previous_span = self.prev_token.span; let mut err = self.span_err(self.token.span, Error::UselessDocComment); self.bump(); // consume the doc comment let comma_after_doc_seen = self.eat(&token::Comma); // `seen_comma` is always false, because we are inside doc block // condition is here to make code more readable if !seen_comma && comma_after_doc_seen { seen_comma = true; } if comma_after_doc_seen || self.token == token::CloseDelim(token::Brace) { err.emit(); } else { if !seen_comma { let sp = self.sess.source_map().next_point(previous_span); err.span_suggestion( sp, "missing comma here", ",".into(), Applicability::MachineApplicable, ); } return Err(err); } } _ => { let sp = self.prev_token.span.shrink_to_hi(); let mut err = self.struct_span_err( sp, &format!("expected `,`, or `}}`, found {}", super::token_descr(&self.token)), ); // Try to recover extra trailing angle brackets let mut recovered = false; if let TyKind::Path(_, Path { segments, .. }) = &a_var.ty.kind { if let Some(last_segment) = segments.last() { recovered = self.check_trailing_angle_brackets( last_segment, &[&token::Comma, &token::CloseDelim(token::Brace)], ); if recovered { // Handle a case like `Vec<u8>>,` where we can continue parsing fields // after the comma self.eat(&token::Comma); // `check_trailing_angle_brackets` already emitted a nicer error err.cancel(); } } } if self.token.is_ident() { // This is likely another field; emit the diagnostic and keep going err.span_suggestion( sp, "try adding a comma", ",".into(), Applicability::MachineApplicable, ); err.emit(); recovered = true; } if recovered { // Make sure an error was emitted (either by recovering an angle bracket, // or by finding an identifier as the next token), since we're // going to continue parsing assert!(self.sess.span_diagnostic.has_errors()); } else { return Err(err); } } } Ok(a_var) } fn expect_field_ty_separator(&mut self) -> PResult<'a, ()> { if let Err(mut err) = self.expect(&token::Colon) { let sm = self.sess.source_map(); let eq_typo = self.token.kind == token::Eq && self.look_ahead(1, |t| t.is_path_start()); let semi_typo = self.token.kind == token::Semi && self.look_ahead(1, |t| { t.is_path_start() // We check that we are in a situation like `foo; bar` to avoid bad suggestions // when there's no type and `;` was used instead of a comma. && match (sm.lookup_line(self.token.span.hi()), sm.lookup_line(t.span.lo())) { (Ok(l), Ok(r)) => l.line == r.line, _ => true, } }); if eq_typo || semi_typo { self.bump(); // Gracefully handle small typos. err.span_suggestion_short( self.prev_token.span, "field names and their types are separated with `:`", ":".to_string(), Applicability::MachineApplicable, ); err.emit(); } else { return Err(err); } } Ok(()) } /// Parses a structure field. fn parse_name_and_ty( &mut self, adt_ty: &str, lo: Span, vis: Visibility, attrs: Vec<Attribute>, ) -> PResult<'a, FieldDef> { let name = self.parse_field_ident(adt_ty, lo)?; self.expect_field_ty_separator()?; let ty = self.parse_ty()?; if self.token.kind == token::Eq { self.bump(); let const_expr = self.parse_anon_const_expr()?; let sp = ty.span.shrink_to_hi().to(const_expr.value.span); self.struct_span_err(sp, "default values on `struct` fields aren't supported") .span_suggestion( sp, "remove this unsupported default value", String::new(), Applicability::MachineApplicable, ) .emit(); } Ok(FieldDef { span: lo.to(self.prev_token.span), ident: Some(name), vis, id: DUMMY_NODE_ID, ty, attrs: attrs.into(), is_placeholder: false, }) } /// Parses a field identifier. Specialized version of `parse_ident_common` /// for better diagnostics and suggestions. fn parse_field_ident(&mut self, adt_ty: &str, lo: Span) -> PResult<'a, Ident> { let (ident, is_raw) = self.ident_or_err()?; if !is_raw && ident.is_reserved() { if ident.name == kw::Underscore { self.sess.gated_spans.gate(sym::unnamed_fields, lo); } else { let err = if self.check_fn_front_matter(false) { // We use `parse_fn` to get a span for the function if let Err(mut db) = self.parse_fn(&mut Vec::new(), |_| true, lo) { db.delay_as_bug(); } let mut err = self.struct_span_err( lo.to(self.prev_token.span), &format!("functions are not allowed in {} definitions", adt_ty), ); err.help( "unlike in C++, Java, and C#, functions are declared in `impl` blocks", ); err.help("see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information"); err } else { self.expected_ident_found() }; return Err(err); } } self.bump(); Ok(ident) } /// Parses a declarative macro 2.0 definition. /// The `macro` keyword has already been parsed. /// ``` /// MacBody = "{" TOKEN_STREAM "}" ; /// MacParams = "(" TOKEN_STREAM ")" ; /// DeclMac = "macro" Ident MacParams? MacBody ; /// ``` fn parse_item_decl_macro(&mut self, lo: Span) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let body = if self.check(&token::OpenDelim(token::Brace)) { self.parse_mac_args()? // `MacBody` } else if self.check(&token::OpenDelim(token::Paren)) { let params = self.parse_token_tree(); // `MacParams` let pspan = params.span(); if !self.check(&token::OpenDelim(token::Brace)) { return self.unexpected(); } let body = self.parse_token_tree(); // `MacBody` // Convert `MacParams MacBody` into `{ MacParams => MacBody }`. let bspan = body.span(); let arrow = TokenTree::token(token::FatArrow, pspan.between(bspan)); // `=>` let tokens = TokenStream::new(vec![params.into(), arrow.into(), body.into()]); let dspan = DelimSpan::from_pair(pspan.shrink_to_lo(), bspan.shrink_to_hi()); P(MacArgs::Delimited(dspan, MacDelimiter::Brace, tokens)) } else { return self.unexpected(); }; self.sess.gated_spans.gate(sym::decl_macro, lo.to(self.prev_token.span)); Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, macro_rules: false }))) } /// Is this unambiguously the start of a `macro_rules! foo` item definition? fn is_macro_rules_item(&mut self) -> bool { self.check_keyword(kw::MacroRules) && self.look_ahead(1, |t| *t == token::Not) && self.look_ahead(2, |t| t.is_ident()) } /// Parses a `macro_rules! foo { ... }` declarative macro. fn parse_item_macro_rules(&mut self, vis: &Visibility) -> PResult<'a, ItemInfo> { self.expect_keyword(kw::MacroRules)?; // `macro_rules` self.expect(&token::Not)?; // `!` let ident = self.parse_ident()?; let body = self.parse_mac_args()?; self.eat_semi_for_macro_if_needed(&body); self.complain_if_pub_macro(vis, true); Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, macro_rules: true }))) } /// Item macro invocations or `macro_rules!` definitions need inherited visibility. /// If that's not the case, emit an error. fn complain_if_pub_macro(&self, vis: &Visibility, macro_rules: bool) { if let VisibilityKind::Inherited = vis.kind { return; } let vstr = pprust::vis_to_string(vis); let vstr = vstr.trim_end(); if macro_rules { let msg = format!("can't qualify macro_rules invocation with `{}`", vstr); self.struct_span_err(vis.span, &msg) .span_suggestion( vis.span, "try exporting the macro", "#[macro_export]".to_owned(), Applicability::MaybeIncorrect, // speculative ) .emit(); } else { self.struct_span_err(vis.span, "can't qualify macro invocation with `pub`") .span_suggestion( vis.span, "remove the visibility", String::new(), Applicability::MachineApplicable, ) .help(&format!("try adjusting the macro to put `{}` inside the invocation", vstr)) .emit(); } } fn eat_semi_for_macro_if_needed(&mut self, args: &MacArgs) { if args.need_semicolon() && !self.eat(&token::Semi) { self.report_invalid_macro_expansion_item(args); } } fn report_invalid_macro_expansion_item(&self, args: &MacArgs) { let span = args.span().expect("undelimited macro call"); let mut err = self.struct_span_err( span, "macros that expand to items must be delimited with braces or followed by a semicolon", ); if self.unclosed_delims.is_empty() { let DelimSpan { open, close } = match args { MacArgs::Empty | MacArgs::Eq(..) => unreachable!(), MacArgs::Delimited(dspan, ..) => *dspan, }; err.multipart_suggestion( "change the delimiters to curly braces", vec![(open, "{".to_string()), (close, '}'.to_string())], Applicability::MaybeIncorrect, ); } else { err.span_suggestion( span, "change the delimiters to curly braces", " { /* items */ }".to_string(), Applicability::HasPlaceholders, ); } err.span_suggestion( span.shrink_to_hi(), "add a semicolon", ';'.to_string(), Applicability::MaybeIncorrect, ); err.emit(); } /// Checks if current token is one of tokens which cannot be nested like `kw::Enum`. In case /// it is, we try to parse the item and report error about nested types. fn recover_nested_adt_item(&mut self, keyword: Symbol) -> PResult<'a, bool> { if (self.token.is_keyword(kw::Enum) || self.token.is_keyword(kw::Struct) || self.token.is_keyword(kw::Union)) && self.look_ahead(1, |t| t.is_ident()) { let kw_token = self.token.clone(); let kw_str = pprust::token_to_string(&kw_token); let item = self.parse_item(ForceCollect::No)?; self.struct_span_err( kw_token.span, &format!("`{}` definition cannot be nested inside `{}`", kw_str, keyword), ) .span_suggestion( item.unwrap().span, &format!("consider creating a new `{}` definition instead of nesting", kw_str), String::new(), Applicability::MaybeIncorrect, ) .emit(); // We successfully parsed the item but we must inform the caller about nested problem. return Ok(false); } Ok(true) } } /// The parsing configuration used to parse a parameter list (see `parse_fn_params`). /// /// The function decides if, per-parameter `p`, `p` must have a pattern or just a type. type ReqName = fn(Edition) -> bool; /// Parsing of functions and methods. impl<'a> Parser<'a> { /// Parse a function starting from the front matter (`const ...`) to the body `{ ... }` or `;`. fn parse_fn( &mut self, attrs: &mut Vec<Attribute>, req_name: ReqName, sig_lo: Span, ) -> PResult<'a, (Ident, FnSig, Generics, Option<P<Block>>)> { let header = self.parse_fn_front_matter()?; // `const ... fn` let ident = self.parse_ident()?; // `foo` let mut generics = self.parse_generics()?; // `<'a, T, ...>` let decl = self.parse_fn_decl(req_name, AllowPlus::Yes, RecoverReturnSign::Yes)?; // `(p: u8, ...)` generics.where_clause = self.parse_where_clause()?; // `where T: Ord` let mut sig_hi = self.prev_token.span; let body = self.parse_fn_body(attrs, &ident, &mut sig_hi)?; // `;` or `{ ... }`. let fn_sig_span = sig_lo.to(sig_hi); Ok((ident, FnSig { header, decl, span: fn_sig_span }, generics, body)) } /// Parse the "body" of a function. /// This can either be `;` when there's no body, /// or e.g. a block when the function is a provided one. fn parse_fn_body( &mut self, attrs: &mut Vec<Attribute>, ident: &Ident, sig_hi: &mut Span, ) -> PResult<'a, Option<P<Block>>> { let (inner_attrs, body) = if self.eat(&token::Semi) { // Include the trailing semicolon in the span of the signature *sig_hi = self.prev_token.span; (Vec::new(), None) } else if self.check(&token::OpenDelim(token::Brace)) || self.token.is_whole_block() { self.parse_inner_attrs_and_block().map(|(attrs, body)| (attrs, Some(body)))? } else if self.token.kind == token::Eq { // Recover `fn foo() = $expr;`. self.bump(); // `=` let eq_sp = self.prev_token.span; let _ = self.parse_expr()?; self.expect_semi()?; // `;` let span = eq_sp.to(self.prev_token.span); self.struct_span_err(span, "function body cannot be `= expression;`") .multipart_suggestion( "surround the expression with `{` and `}` instead of `=` and `;`", vec![(eq_sp, "{".to_string()), (self.prev_token.span, " }".to_string())], Applicability::MachineApplicable, ) .emit(); (Vec::new(), Some(self.mk_block_err(span))) } else { if let Err(mut err) = self.expected_one_of_not_found(&[], &[token::Semi, token::OpenDelim(token::Brace)]) { if self.token.kind == token::CloseDelim(token::Brace) { // The enclosing `mod`, `trait` or `impl` is being closed, so keep the `fn` in // the AST for typechecking. err.span_label(ident.span, "while parsing this `fn`"); err.emit(); (Vec::new(), None) } else { return Err(err); } } else { unreachable!() } }; attrs.extend(inner_attrs); Ok(body) } /// Is the current token the start of an `FnHeader` / not a valid parse? /// /// `check_pub` adds additional `pub` to the checks in case users place it /// wrongly, can be used to ensure `pub` never comes after `default`. pub(super) fn check_fn_front_matter(&mut self, check_pub: bool) -> bool { // We use an over-approximation here. // `const const`, `fn const` won't parse, but we're not stepping over other syntax either. // `pub` is added in case users got confused with the ordering like `async pub fn`, // only if it wasn't preceeded by `default` as `default pub` is invalid. let quals: &[Symbol] = if check_pub { &[kw::Pub, kw::Const, kw::Async, kw::Unsafe, kw::Extern] } else { &[kw::Const, kw::Async, kw::Unsafe, kw::Extern] }; self.check_keyword(kw::Fn) // Definitely an `fn`. // `$qual fn` or `$qual $qual`: || quals.iter().any(|&kw| self.check_keyword(kw)) && self.look_ahead(1, |t| { // `$qual fn`, e.g. `const fn` or `async fn`. t.is_keyword(kw::Fn) // Two qualifiers `$qual $qual` is enough, e.g. `async unsafe`. || t.is_non_raw_ident_where(|i| quals.contains(&i.name) // Rule out 2015 `const async: T = val`. && i.is_reserved() // Rule out unsafe extern block. && !self.is_unsafe_foreign_mod()) }) // `extern ABI fn` || self.check_keyword(kw::Extern) && self.look_ahead(1, |t| t.can_begin_literal_maybe_minus()) && self.look_ahead(2, |t| t.is_keyword(kw::Fn)) } /// Parses all the "front matter" (or "qualifiers") for a `fn` declaration, /// up to and including the `fn` keyword. The formal grammar is: /// /// ``` /// Extern = "extern" StringLit? ; /// FnQual = "const"? "async"? "unsafe"? Extern? ; /// FnFrontMatter = FnQual "fn" ; /// ``` pub(super) fn parse_fn_front_matter(&mut self) -> PResult<'a, FnHeader> { let sp_start = self.token.span; let constness = self.parse_constness(); let asyncness = self.parse_asyncness(); let unsafety = self.parse_unsafety(); let ext = self.parse_extern(); if let Async::Yes { span, .. } = asyncness { self.ban_async_in_2015(span); } if !self.eat_keyword(kw::Fn) { // It is possible for `expect_one_of` to recover given the contents of // `self.expected_tokens`, therefore, do not use `self.unexpected()` which doesn't // account for this. match self.expect_one_of(&[], &[]) { Ok(true) => {} Ok(false) => unreachable!(), Err(mut err) => { // Recover incorrect visibility order such as `async pub`. if self.check_keyword(kw::Pub) { let sp = sp_start.to(self.prev_token.span); if let Ok(snippet) = self.span_to_snippet(sp) { let vis = match self.parse_visibility(FollowedByType::No) { Ok(v) => v, Err(mut d) => { d.cancel(); return Err(err); } }; let vs = pprust::vis_to_string(&vis); let vs = vs.trim_end(); err.span_suggestion( sp_start.to(self.prev_token.span), &format!("visibility `{}` must come before `{}`", vs, snippet), format!("{} {}", vs, snippet), Applicability::MachineApplicable, ); } } return Err(err); } } } Ok(FnHeader { constness, unsafety, asyncness, ext }) } /// We are parsing `async fn`. If we are on Rust 2015, emit an error. fn ban_async_in_2015(&self, span: Span) { if span.rust_2015() { let diag = self.diagnostic(); struct_span_err!(diag, span, E0670, "`async fn` is not permitted in Rust 2015") .span_label(span, "to use `async fn`, switch to Rust 2018 or later") .help(&format!("set `edition = \"{}\"` in `Cargo.toml`", LATEST_STABLE_EDITION)) .note("for more on editions, read https://doc.rust-lang.org/edition-guide") .emit(); } } /// Parses the parameter list and result type of a function declaration. pub(super) fn parse_fn_decl( &mut self, req_name: ReqName, ret_allow_plus: AllowPlus, recover_return_sign: RecoverReturnSign, ) -> PResult<'a, P<FnDecl>> { Ok(P(FnDecl { inputs: self.parse_fn_params(req_name)?, output: self.parse_ret_ty(ret_allow_plus, RecoverQPath::Yes, recover_return_sign)?, })) } /// Parses the parameter list of a function, including the `(` and `)` delimiters. fn parse_fn_params(&mut self, req_name: ReqName) -> PResult<'a, Vec<Param>> { let mut first_param = true; // Parse the arguments, starting out with `self` being allowed... let (mut params, _) = self.parse_paren_comma_seq(|p| { let param = p.parse_param_general(req_name, first_param).or_else(|mut e| { e.emit(); let lo = p.prev_token.span; // Skip every token until next possible arg or end. p.eat_to_tokens(&[&token::Comma, &token::CloseDelim(token::Paren)]); // Create a placeholder argument for proper arg count (issue #34264). Ok(dummy_arg(Ident::new(kw::Empty, lo.to(p.prev_token.span)))) }); // ...now that we've parsed the first argument, `self` is no longer allowed. first_param = false; param })?; // Replace duplicated recovered params with `_` pattern to avoid unnecessary errors. self.deduplicate_recovered_params_names(&mut params); Ok(params) } /// Parses a single function parameter. /// /// - `self` is syntactically allowed when `first_param` holds. fn parse_param_general(&mut self, req_name: ReqName, first_param: bool) -> PResult<'a, Param> { let lo = self.token.span; let attrs = self.parse_outer_attributes()?; self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { // Possibly parse `self`. Recover if we parsed it and it wasn't allowed here. if let Some(mut param) = this.parse_self_param()? { param.attrs = attrs.into(); let res = if first_param { Ok(param) } else { this.recover_bad_self_param(param) }; return Ok((res?, TrailingToken::None)); } let is_name_required = match this.token.kind { token::DotDotDot => false, _ => req_name(this.token.span.edition()), }; let (pat, ty) = if is_name_required || this.is_named_param() { debug!("parse_param_general parse_pat (is_name_required:{})", is_name_required); let (pat, colon) = this.parse_fn_param_pat_colon()?; if !colon { let mut err = this.unexpected::<()>().unwrap_err(); return if let Some(ident) = this.parameter_without_type(&mut err, pat, is_name_required, first_param) { err.emit(); Ok((dummy_arg(ident), TrailingToken::None)) } else { Err(err) }; } this.eat_incorrect_doc_comment_for_param_type(); (pat, this.parse_ty_for_param()?) } else { debug!("parse_param_general ident_to_pat"); let parser_snapshot_before_ty = this.clone(); this.eat_incorrect_doc_comment_for_param_type(); let mut ty = this.parse_ty_for_param(); if ty.is_ok() && this.token != token::Comma && this.token != token::CloseDelim(token::Paren) { // This wasn't actually a type, but a pattern looking like a type, // so we are going to rollback and re-parse for recovery. ty = this.unexpected(); } match ty { Ok(ty) => { let ident = Ident::new(kw::Empty, this.prev_token.span); let bm = BindingMode::ByValue(Mutability::Not); let pat = this.mk_pat_ident(ty.span, bm, ident); (pat, ty) } // If this is a C-variadic argument and we hit an error, return the error. Err(err) if this.token == token::DotDotDot => return Err(err), // Recover from attempting to parse the argument as a type without pattern. Err(mut err) => { err.cancel(); *this = parser_snapshot_before_ty; this.recover_arg_parse()? } } }; let span = lo.until(this.token.span); Ok(( Param { attrs: attrs.into(), id: ast::DUMMY_NODE_ID, is_placeholder: false, pat, span, ty, }, TrailingToken::None, )) }) } /// Returns the parsed optional self parameter and whether a self shortcut was used. fn parse_self_param(&mut self) -> PResult<'a, Option<Param>> { // Extract an identifier *after* having confirmed that the token is one. let expect_self_ident = |this: &mut Self| match this.token.ident() { Some((ident, false)) => { this.bump(); ident } _ => unreachable!(), }; // Is `self` `n` tokens ahead? let is_isolated_self = |this: &Self, n| { this.is_keyword_ahead(n, &[kw::SelfLower]) && this.look_ahead(n + 1, |t| t != &token::ModSep) }; // Is `mut self` `n` tokens ahead? let is_isolated_mut_self = |this: &Self, n| this.is_keyword_ahead(n, &[kw::Mut]) && is_isolated_self(this, n + 1); // Parse `self` or `self: TYPE`. We already know the current token is `self`. let parse_self_possibly_typed = |this: &mut Self, m| { let eself_ident = expect_self_ident(this); let eself_hi = this.prev_token.span; let eself = if this.eat(&token::Colon) { SelfKind::Explicit(this.parse_ty()?, m) } else { SelfKind::Value(m) }; Ok((eself, eself_ident, eself_hi)) }; // Recover for the grammar `*self`, `*const self`, and `*mut self`. let recover_self_ptr = |this: &mut Self| { let msg = "cannot pass `self` by raw pointer"; let span = this.token.span; this.struct_span_err(span, msg).span_label(span, msg).emit(); Ok((SelfKind::Value(Mutability::Not), expect_self_ident(this), this.prev_token.span)) }; // Parse optional `self` parameter of a method. // Only a limited set of initial token sequences is considered `self` parameters; anything // else is parsed as a normal function parameter list, so some lookahead is required. let eself_lo = self.token.span; let (eself, eself_ident, eself_hi) = match self.token.uninterpolate().kind { token::BinOp(token::And) => { let eself = if is_isolated_self(self, 1) { // `&self` self.bump(); SelfKind::Region(None, Mutability::Not) } else if is_isolated_mut_self(self, 1) { // `&mut self` self.bump(); self.bump(); SelfKind::Region(None, Mutability::Mut) } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_self(self, 2) { // `&'lt self` self.bump(); let lt = self.expect_lifetime(); SelfKind::Region(Some(lt), Mutability::Not) } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_mut_self(self, 2) { // `&'lt mut self` self.bump(); let lt = self.expect_lifetime(); self.bump(); SelfKind::Region(Some(lt), Mutability::Mut) } else { // `&not_self` return Ok(None); }; (eself, expect_self_ident(self), self.prev_token.span) } // `*self` token::BinOp(token::Star) if is_isolated_self(self, 1) => { self.bump(); recover_self_ptr(self)? } // `*mut self` and `*const self` token::BinOp(token::Star) if self.look_ahead(1, |t| t.is_mutability()) && is_isolated_self(self, 2) => { self.bump(); self.bump(); recover_self_ptr(self)? } // `self` and `self: TYPE` token::Ident(..) if is_isolated_self(self, 0) => { parse_self_possibly_typed(self, Mutability::Not)? } // `mut self` and `mut self: TYPE` token::Ident(..) if is_isolated_mut_self(self, 0) => { self.bump(); parse_self_possibly_typed(self, Mutability::Mut)? } _ => return Ok(None), }; let eself = source_map::respan(eself_lo.to(eself_hi), eself); Ok(Some(Param::from_self(AttrVec::default(), eself, eself_ident))) } fn is_named_param(&self) -> bool { let offset = match self.token.kind { token::Interpolated(ref nt) => match **nt { token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon), _ => 0, }, token::BinOp(token::And) | token::AndAnd => 1, _ if self.token.is_keyword(kw::Mut) => 1, _ => 0, }; self.look_ahead(offset, |t| t.is_ident()) && self.look_ahead(offset + 1, |t| t == &token::Colon) } fn recover_first_param(&mut self) -> &'static str { match self .parse_outer_attributes() .and_then(|_| self.parse_self_param()) .map_err(|mut e| e.cancel()) { Ok(Some(_)) => "method", _ => "function", } } }
s_unsafe_foreign_mod(
parser.py
# -*- coding: utf-8 -*- """Implements the xonsh parser.""" from xonsh.lazyasd import lazyobject from xonsh.platform import PYTHON_VERSION_INFO @lazyobject def Parser():
if PYTHON_VERSION_INFO > (3, 6): from xonsh.parsers.v36 import Parser as p elif PYTHON_VERSION_INFO > (3, 5): from xonsh.parsers.v35 import Parser as p else: from xonsh.parsers.v34 import Parser as p return p
backend.py
import datetime import logging from abc import ABCMeta, abstractmethod from decimal import Decimal from celery.result import EagerResult, allow_join_result from celery.backends.base import DisabledBackend logger = logging.getLogger(__name__) PROGRESS_STATE = 'PROGRESS' class AbstractProgressRecorder(object): __metaclass__ = ABCMeta @abstractmethod def set_progress(self, current, total, description=""): pass class ConsoleProgressRecorder(AbstractProgressRecorder): def set_progress(self, current, total, description=""): print('processed {} items of {}. {}'.format(current, total, description)) class ProgressRecorder(AbstractProgressRecorder): def __init__(self, task): self.task = task def set_progress(self, current, total, description=""): percent = 0 if total > 0: percent = (Decimal(current) / Decimal(total)) * Decimal(100) percent = float(round(percent, 2)) state = PROGRESS_STATE meta = { 'pending': False, 'current': current, 'total': total, 'percent': percent, 'description': description } self.task.update_state( state=state, meta=meta ) return state, meta class Progress(object): def __init__(self, result): """ result: an AsyncResult or an object that mimics it to a degree """ self.result = result def get_info(self): state = self.result.state response = {'state': state} if state in ['SUCCESS', 'FAILURE']: success = self.result.successful() with allow_join_result(): response.update({ 'complete': True, 'success': success, 'progress': _get_completed_progress(), 'result': self.result.get(self.result.id) if success else str(self.result.info), }) elif state in ['RETRY', 'REVOKED']: if state == 'RETRY': retry = self.result.info when = str(retry.when) if isinstance(retry.when, datetime.datetime) else str( datetime.datetime.now() + datetime.timedelta(seconds=retry.when)) result = {'when': when, 'message': retry.message or str(retry.exc)} else: result = 'Task ' + str(self.result.info) response.update({ 'complete': True, 'success': False, 'progress': _get_completed_progress(), 'result': result, }) elif state == 'IGNORED': response.update({ 'complete': True, 'success': None, 'progress': _get_completed_progress(), 'result': str(self.result.info) }) elif state == PROGRESS_STATE: response.update({ 'complete': False, 'success': None, 'progress': self.result.info, }) elif state in ['PENDING', 'STARTED']: response.update({ 'complete': False, 'success': None, 'progress': _get_unknown_progress(state), }) else: logger.error('Task %s has unknown state %s with metadata %s', self.result.id, state, self.result.info) response.update({ 'complete': True, 'success': False, 'progress': _get_unknown_progress(state), 'result': 'Unknown state {}'.format(state), }) return response class
(EagerResult): """Like EagerResult but supports non-ready states.""" def __init__(self, id, ret_value, state, traceback=None): """ ret_value: result, exception, or progress metadata """ # set backend to get state groups (like READY_STATES in ready()) self.backend = DisabledBackend super().__init__(id, ret_value, state, traceback) def ready(self): return super(EagerResult, self).ready() def __del__(self): # throws an exception if not overridden pass def _get_completed_progress(): return { 'pending': False, 'current': 100, 'total': 100, 'percent': 100, } def _get_unknown_progress(state): return { 'pending': state == 'PENDING', 'current': 0, 'total': 100, 'percent': 0, }
KnownResult
studentRecord.py
#!/usr/bin/python3 #https://practice.geeksforgeeks.org/problems/student-record/0 def
(records, n): mx = 0 res = [] for ni in range(0, n*4, 4): am = sum(map(int, records[ni+1:ni+4]))//3 if am > mx: # If we find a better average overwrite the result list # with the name of the student and the average mx = am res = [(records[ni], am)] elif am == mx: # If the averages are same append in the result list res.append((records[ni], am)) for name, marks in res: print(name, end=" ") print(marks) # print the result as stated in the problem
sol
mod.rs
//! Formatting for various types. pub(crate) mod formattable; use std::io; pub use self::formattable::Formattable; use crate::format_description::{modifier, Component}; use crate::{error, Date, Time, UtcOffset}; #[allow(clippy::missing_docs_in_private_items)] const MONTH_NAMES: [&[u8]; 12] = [ b"January", b"February", b"March", b"April", b"May", b"June", b"July", b"August", b"September", b"October", b"November", b"December", ]; #[allow(clippy::missing_docs_in_private_items)] const WEEKDAY_NAMES: [&[u8]; 7] = [ b"Monday", b"Tuesday", b"Wednesday", b"Thursday", b"Friday", b"Saturday", b"Sunday", ]; // region: extension trait /// A trait that indicates the formatted width of the value can be determined. /// /// Note that this should not be implemented for any signed integers. This forces the caller to /// write the sign if desired. pub(crate) trait DigitCount { /// The number of digits in the stringified value. fn num_digits(self) -> u8; } impl DigitCount for u8 { fn num_digits(self) -> u8 { // Using a lookup table as with u32 is *not* faster in standalone benchmarks. if self < 10 { 1 } else if self < 100 { 2 } else { 3 } } } impl DigitCount for u16 { fn num_digits(self) -> u8 { // Using a lookup table as with u32 is *not* faster in standalone benchmarks. if self < 10 { 1 } else if self < 100 { 2 } else if self < 1_000 { 3 } else if self < 10_000 { 4 } else { 5 } } } impl DigitCount for u32 { fn num_digits(self) -> u8 { /// Lookup table const TABLE: &[u64] = &[ 0x0001_0000_0000, 0x0001_0000_0000, 0x0001_0000_0000, 0x0001_FFFF_FFF6, 0x0002_0000_0000, 0x0002_0000_0000, 0x0002_FFFF_FF9C, 0x0003_0000_0000, 0x0003_0000_0000, 0x0003_FFFF_FC18, 0x0004_0000_0000, 0x0004_0000_0000, 0x0004_0000_0000, 0x0004_FFFF_D8F0, 0x0005_0000_0000, 0x0005_0000_0000, 0x0005_FFFE_7960, 0x0006_0000_0000, 0x0006_0000_0000, 0x0006_FFF0_BDC0, 0x0007_0000_0000, 0x0007_0000_0000, 0x0007_0000_0000, 0x0007_FF67_6980, 0x0008_0000_0000, 0x0008_0000_0000, 0x0008_FA0A_1F00, 0x0009_0000_0000, 0x0009_0000_0000, 0x0009_C465_3600, 0x000A_0000_0000, 0x000A_0000_0000, ]; ((self as u64 + TABLE[31_u32.saturating_sub(self.leading_zeros()) as usize]) >> 32) as _ } } // endregion extension trait /// Write all bytes to the output, returning the number of bytes written. fn write(output: &mut impl io::Write, bytes: &[u8]) -> io::Result<usize> { output.write_all(bytes)?; Ok(bytes.len()) } /// Format a number with the provided padding and width. /// /// The sign must be written by the caller. pub(crate) fn format_number<W: io::Write, V: itoa::Integer + DigitCount + Copy, const WIDTH: u8>( output: &mut W, value: V, padding: modifier::Padding, ) -> Result<usize, io::Error> { match padding { modifier::Padding::Space => format_number_pad_space::<_, _, WIDTH>(output, value), modifier::Padding::Zero => format_number_pad_zero::<_, _, WIDTH>(output, value), modifier::Padding::None => write(output, itoa::Buffer::new().format(value).as_bytes()), } } /// Format a number with the provided width and spaces as padding. /// /// The sign must be written by the caller. pub(crate) fn format_number_pad_space< W: io::Write, V: itoa::Integer + DigitCount + Copy, const WIDTH: u8, >( output: &mut W, value: V, ) -> Result<usize, io::Error> { let mut bytes = 0; for _ in 0..(WIDTH.saturating_sub(value.num_digits())) { bytes += write(output, &[b' '])?; } bytes += write(output, itoa::Buffer::new().format(value).as_bytes())?; Ok(bytes) } /// Format a number with the provided width and zeros as padding. /// /// The sign must be written by the caller. pub(crate) fn format_number_pad_zero< W: io::Write, V: itoa::Integer + DigitCount + Copy, const WIDTH: u8, >( output: &mut W, value: V, ) -> Result<usize, io::Error> { let mut bytes = 0; for _ in 0..(WIDTH.saturating_sub(value.num_digits())) { bytes += write(output, &[b'0'])?; } bytes += write(output, itoa::Buffer::new().format(value).as_bytes())?; Ok(bytes) } /// Format the provided component into the designated output. An `Err` will be returned if the /// component requires information that it does not provide or if the value cannot be output to the /// stream. pub(crate) fn format_component( output: &mut impl io::Write, component: Component, date: Option<Date>, time: Option<Time>, offset: Option<UtcOffset>, ) -> Result<usize, error::Format> { use Component::*; Ok(match (component, date, time, offset) { (Day(modifier), Some(date), ..) => fmt_day(output, date, modifier)?, (Month(modifier), Some(date), ..) => fmt_month(output, date, modifier)?, (Ordinal(modifier), Some(date), ..) => fmt_ordinal(output, date, modifier)?, (Weekday(modifier), Some(date), ..) => fmt_weekday(output, date, modifier)?, (WeekNumber(modifier), Some(date), ..) => fmt_week_number(output, date, modifier)?, (Year(modifier), Some(date), ..) => fmt_year(output, date, modifier)?, (Hour(modifier), _, Some(time), _) => fmt_hour(output, time, modifier)?, (Minute(modifier), _, Some(time), _) => fmt_minute(output, time, modifier)?, (Period(modifier), _, Some(time), _) => fmt_period(output, time, modifier)?, (Second(modifier), _, Some(time), _) => fmt_second(output, time, modifier)?, (Subsecond(modifier), _, Some(time), _) => fmt_subsecond(output, time, modifier)?, (OffsetHour(modifier), .., Some(offset)) => fmt_offset_hour(output, offset, modifier)?, (OffsetMinute(modifier), .., Some(offset)) => fmt_offset_minute(output, offset, modifier)?, (OffsetSecond(modifier), .., Some(offset)) => fmt_offset_second(output, offset, modifier)?, _ => return Err(error::Format::InsufficientTypeInformation), }) } // region: date formatters /// Format the day into the designated output. fn fmt_day( output: &mut impl io::Write, date: Date, modifier::Day { padding }: modifier::Day, ) -> Result<usize, io::Error> { format_number::<_, _, 2>(output, date.day(), padding) } /// Format the month into the designated output. fn fmt_month( output: &mut impl io::Write, date: Date, modifier::Month { padding, repr, case_sensitive: _, // no effect on formatting }: modifier::Month, ) -> Result<usize, io::Error> { match repr { modifier::MonthRepr::Numerical => { format_number::<_, _, 2>(output, date.month() as u8, padding) } modifier::MonthRepr::Long => write(output, MONTH_NAMES[date.month() as usize - 1]), modifier::MonthRepr::Short => write(output, &MONTH_NAMES[date.month() as usize - 1][..3]), } } /// Format the ordinal into the designated output. fn fmt_ordinal( output: &mut impl io::Write, date: Date, modifier::Ordinal { padding }: modifier::Ordinal, ) -> Result<usize, io::Error> { format_number::<_, _, 3>(output, date.ordinal(), padding) } /// Format the weekday into the designated output. fn fmt_weekday( output: &mut impl io::Write, date: Date, modifier::Weekday { repr, one_indexed, case_sensitive: _, // no effect on formatting }: modifier::Weekday, ) -> Result<usize, io::Error> { match repr { modifier::WeekdayRepr::Short => write( output, &WEEKDAY_NAMES[date.weekday().number_days_from_monday() as usize][..3], ), modifier::WeekdayRepr::Long => write( output, WEEKDAY_NAMES[date.weekday().number_days_from_monday() as usize], ), modifier::WeekdayRepr::Sunday => format_number::<_, _, 1>( output, date.weekday().number_days_from_sunday() + one_indexed as u8, modifier::Padding::None, ), modifier::WeekdayRepr::Monday => format_number::<_, _, 1>( output, date.weekday().number_days_from_monday() + one_indexed as u8, modifier::Padding::None, ), } } /// Format the week number into the designated output. fn fmt_week_number( output: &mut impl io::Write, date: Date, modifier::WeekNumber { padding, repr }: modifier::WeekNumber, ) -> Result<usize, io::Error> { format_number::<_, _, 2>( output, match repr { modifier::WeekNumberRepr::Iso => date.iso_week(), modifier::WeekNumberRepr::Sunday => date.sunday_based_week(), modifier::WeekNumberRepr::Monday => date.monday_based_week(), }, padding, ) } /// Format the year into the designated output. fn fmt_year( output: &mut impl io::Write, date: Date, modifier::Year { padding, repr, iso_week_based, sign_is_mandatory, }: modifier::Year, ) -> Result<usize, io::Error> { let full_year = if iso_week_based { date.iso_year_week().0 } else { date.year() }; let value = match repr { modifier::YearRepr::Full => full_year, modifier::YearRepr::LastTwo => (full_year % 100).abs(), }; let format_number = match repr { #[cfg(feature = "large-dates")] modifier::YearRepr::Full if value.abs() >= 100_000 => format_number::<_, _, 6>, #[cfg(feature = "large-dates")] modifier::YearRepr::Full if value.abs() >= 10_000 => format_number::<_, _, 5>, modifier::YearRepr::Full => format_number::<_, _, 4>, modifier::YearRepr::LastTwo => format_number::<_, _, 2>, }; let mut bytes = 0; if repr != modifier::YearRepr::LastTwo { if full_year < 0 { bytes += write(output, &[b'-'])?; } else if sign_is_mandatory || cfg!(feature = "large-dates") && full_year >= 10_000 { bytes += write(output, &[b'+'])?; } } bytes += format_number(output, value.unsigned_abs(), padding)?; Ok(bytes) } // endregion date formatters // region: time formatters /// Format the hour into the designated output. fn fmt_hour( output: &mut impl io::Write, time: Time, modifier::Hour { padding, is_12_hour_clock, }: modifier::Hour, ) -> Result<usize, io::Error> { let value = match (time.hour(), is_12_hour_clock) { (hour, false) => hour, (0 | 12, true) => 12, (hour, true) if hour < 12 => hour, (hour, true) => hour - 12, }; format_number::<_, _, 2>(output, value, padding) } /// Format the minute into the designated output. fn fmt_minute( output: &mut impl io::Write, time: Time, modifier::Minute { padding }: modifier::Minute, ) -> Result<usize, io::Error> { format_number::<_, _, 2>(output, time.minute(), padding) } /// Format the period into the designated output. fn fmt_period( output: &mut impl io::Write, time: Time, modifier::Period { is_uppercase, case_sensitive: _, // no effect on formatting }: modifier::Period, ) -> Result<usize, io::Error> { match (time.hour() >= 12, is_uppercase) { (false, false) => write(output, b"am"), (false, true) => write(output, b"AM"), (true, false) => write(output, b"pm"), (true, true) => write(output, b"PM"), } }
modifier::Second { padding }: modifier::Second, ) -> Result<usize, io::Error> { format_number::<_, _, 2>(output, time.second(), padding) } /// Format the subsecond into the designated output. fn fmt_subsecond<W: io::Write>( output: &mut W, time: Time, modifier::Subsecond { digits }: modifier::Subsecond, ) -> Result<usize, io::Error> { use modifier::SubsecondDigits::*; let nanos = time.nanosecond(); if digits == Nine || (digits == OneOrMore && nanos % 10 != 0) { format_number_pad_zero::<_, _, 9>(output, nanos) } else if digits == Eight || (digits == OneOrMore && (nanos / 10) % 10 != 0) { format_number_pad_zero::<_, _, 8>(output, nanos / 10) } else if digits == Seven || (digits == OneOrMore && (nanos / 100) % 10 != 0) { format_number_pad_zero::<_, _, 7>(output, nanos / 100) } else if digits == Six || (digits == OneOrMore && (nanos / 1_000) % 10 != 0) { format_number_pad_zero::<_, _, 6>(output, nanos / 1_000) } else if digits == Five || (digits == OneOrMore && (nanos / 10_000) % 10 != 0) { format_number_pad_zero::<_, _, 5>(output, nanos / 10_000) } else if digits == Four || (digits == OneOrMore && (nanos / 100_000) % 10 != 0) { format_number_pad_zero::<_, _, 4>(output, nanos / 100_000) } else if digits == Three || (digits == OneOrMore && (nanos / 1_000_000) % 10 != 0) { format_number_pad_zero::<_, _, 3>(output, nanos / 1_000_000) } else if digits == Two || (digits == OneOrMore && (nanos / 10_000_000) % 10 != 0) { format_number_pad_zero::<_, _, 2>(output, nanos / 10_000_000) } else { format_number_pad_zero::<_, _, 1>(output, nanos / 100_000_000) } } // endregion time formatters // region: offset formatters /// Format the offset hour into the designated output. fn fmt_offset_hour( output: &mut impl io::Write, offset: UtcOffset, modifier::OffsetHour { padding, sign_is_mandatory, }: modifier::OffsetHour, ) -> Result<usize, io::Error> { let mut bytes = 0; if offset.is_negative() { bytes += write(output, &[b'-'])?; } else if sign_is_mandatory { bytes += write(output, &[b'+'])?; } bytes += format_number::<_, _, 2>(output, offset.whole_hours().unsigned_abs(), padding)?; Ok(bytes) } /// Format the offset minute into the designated output. fn fmt_offset_minute( output: &mut impl io::Write, offset: UtcOffset, modifier::OffsetMinute { padding }: modifier::OffsetMinute, ) -> Result<usize, io::Error> { format_number::<_, _, 2>(output, offset.minutes_past_hour().unsigned_abs(), padding) } /// Format the offset second into the designated output. fn fmt_offset_second( output: &mut impl io::Write, offset: UtcOffset, modifier::OffsetSecond { padding }: modifier::OffsetSecond, ) -> Result<usize, io::Error> { format_number::<_, _, 2>(output, offset.seconds_past_minute().unsigned_abs(), padding) } // endregion offset formatters
/// Format the second into the designated output. fn fmt_second( output: &mut impl io::Write, time: Time,
propSpreaders.ts
import { isNotStylishProp, isStylishProp, StylishPropNames, } from './stylishProps'; type PropSpreaderProps = Record<string, any>; const { keys } = Object; const spreadStyleProps = ( props: PropSpreaderProps, propConfig?: Partial<Record<StylishPropNames, false>> ): PropSpreaderProps => keys(props).reduce( (reducer: PropSpreaderProps, prop: string): PropSpreaderProps => { if (propConfig?.[prop] === false) { return reducer; } else if (isStylishProp(prop)) { return { ...reducer, [prop]: props[prop], }; } return reducer; }, {} ); const preventSpreadingStyleProps = ( props: PropSpreaderProps, propConfig?: Partial<Record<StylishPropNames, true>> ): PropSpreaderProps => keys(props).reduce( (reducer: PropSpreaderProps, prop: string): PropSpreaderProps => { if (propConfig?.[prop]) { return { ...reducer, [prop]: props[prop], }; } else if (isNotStylishProp(prop)) { return { ...reducer, [prop]: props[prop], }; } return reducer; }, {}
); export { preventSpreadingStyleProps, spreadStyleProps };
for.go
/* For Go has only one looping construct, the for loop. The basic for loop has three components separated by semicolons: the init statement: executed before the first iteration the condition expression: evaluated before every iteration the post statement: executed at the end of every iteration The init statement will often be a short variable declaration, and the variables declared there are visible only in the scope of the for statement. The loop will stop iterating once the boolean condition evaluates to false. Note: Unlike other languages like C, Java, or JavaScript there are no parentheses surrounding the three components of the for statement and the braces { } are always required. */ package main import "fmt" func main()
{ sum := 0 for i := 0; i < 10; i++ { sum += i } fmt.Println(sum) }
app.js
var app = (function() { var loadIcon = $('#loader'), viewContainer = $('#viewContainer'), offScreenNav = $('#effeckt-off-screen-nav'), modalViewWrap = $('#effeckt-modal-wrap'), modalViewListContainer = $('#modalViewListContainer'), topcoatListTpl = Handlebars.compile($('#topcoatList-tpl').html()), welcomeViewTpl = Handlebars.compile($('#welcomeView-tpl').html()), dropboxViewTpl = Handlebars.compile($('#dropboxView-tpl').html()), fileListTpl = Handlebars.compile($('#fileList-tpl').html()), fileUploadViewTpl = Handlebars.compile($('#fileUploadView-tpl').html()), localFileListTpl = Handlebars.compile($('#localFileList-tpl').html()), slider = new PageSlider(viewContainer); function showWelcomeView() { var welcomeView = new WelcomeView(welcomeViewTpl); viewContainer.empty(); slider.slidePageFrom(welcomeView.render().el, 'left'); } function showDropboxView() { var dropboxView = new DropboxView(dropboxViewTpl, fileListTpl), fromFileUploadView = $('#fileUploadView').length > 0; viewContainer.empty(); slider.slidePageFrom(dropboxView.render().el, (fromFileUploadView) ? 'left' : 'right'); dropboxView.listFolder(); DropboxSync.addObserver('/'); } function showFileUploadView() { var fileUploadView = new FileUploadView(fileUploadViewTpl, localFileListTpl); viewContainer.empty(); slider.slidePageFrom(fileUploadView.render().el, 'right'); if (app.localFileFullPath == '') { // request the persistent file system window.requestFileSystem(LocalFileSystem.PERSISTENT, 0, fileUploadView.getFSRoot, fileUploadView.FSfail); } else { fileUploadView.getFolderWithPath(); } } function createNavMenu(obj) { offScreenNav.empty().html(topcoatListTpl(obj)); } function createModal(obj) { modalViewListContainer.empty().html(topcoatListTpl(obj)); } function showUnlinkModal() { createModal({ header: 'Unlink from Dropbox?', listItem: [ { text: 'Unlink', id: 'btn-unlinkDropbox' } ] }); showModal(); } function toggleNav() { //EffecktOffScreenNav.toggleNav(); // not as reliable as below $('#btn-navMenu').trigger('click'); // must wait at least 500 ms for the navMenu transform to finish, webkitTransitionEnd isn't working, i tried :~( var deferred = $.Deferred(); setTimeout(function() { deferred.resolve(); }, 725); return deferred.promise(); } function showModal() { $('#btn-modalView').trigger('click'); } function hideModal() { $('.effeckt-overlay').trigger('click'); if (app.modalDeferred) { app.modalDeferred.reject(); } } function resolveModalDeferred(el) { app.modalDeferred.resolve(el); } function navMenuIsVisible() { return offScreenNav.hasClass('effeckt-off-screen-nav-show'); } function modalIsVisible() { return modalViewWrap.hasClass('effeckt-show'); } function showLoader() { loadIcon.show(); } function hideLoader() { loadIcon.hide(); } modalViewListContainer.on('click', '#btn-unlinkDropbox', function(event) { hideModal(); toggleNav().done(function() { showLoader(); DropboxSync.unlink(function() { hideLoader(); showWelcomeView(); }, function(error) { console.log('DropboxSync unlink error'); }); }); event.preventDefault(); }); document.addEventListener("deviceReady", function() { // ready for kickoff FastClick.attach(document.body); if (navigator.notification) { // Override default HTML alert with native dialog window.showConfirm = function(message, title, labels, success) { navigator.notification.confirm( message, // message string success, // callback to invoke with index of button pressed title, // title string labels // buttonLabels array ); }; window.showPrompt = function(message, callback, title, labels, defaultText) { navigator.notification.prompt( message, callback, title, labels, defaultText // shown in input textbox ) }; } DropboxSync.checkLink(showDropboxView, showWelcomeView); // hook btn-back to the device's back button document.addEventListener('backbutton', onBackKeyDown, false); function onBackKeyDown(event) { if (navMenuIsVisible()) { toggleNav(); // no need to wait for deferred } else if (modalIsVisible()) { hideModal(); } else { $('#btn-back').trigger('click'); } event.preventDefault(); } document.addEventListener("menubutton", onMenuKeyDown, false); function onMenuKeyDown(event) { if (modalIsVisible()) return false; toggleNav(); event.preventDefault(); } }); Array.prototype.sortByKey = function(key) { this.sort(function(a, b) { var x = a[key].toLowerCase(); var y = b[key].toLowerCase(); return ((x < y) ? -1 : ((x > y) ? 1 : 0)); }); }; Array.prototype.contains = function(key, value) { for (var i = 0; i < this.length; i++) { if (this[i][key] == value) { return i; } } return -1; }; return { dropboxPath: '/', localFileFullPath: '', showWelcomeView: showWelcomeView, showFileUploadView: showFileUploadView, showDropboxView: showDropboxView, dropboxViewScrollCache: [], fileUploadViewScrollCache: [], dropboxViewIScroll: null, fileUploadViewIScroll: null, showUnlinkModal: showUnlinkModal, createModal: createModal, createNavMenu: createNavMenu, toggleNav: toggleNav, showModal: showModal, hideModal: hideModal, modalDeferred: null, resolveModalDeferred: resolveModalDeferred, loadIcon: loadIcon,
showLoader: showLoader, hideLoader: hideLoader } })(); // called from the onActivityResult method in the plugin when linking is successful. function dropbox_linked() { app.showDropboxView(); } // called by observer in the plugin when there's a change to the status of background synchronization (download/upload). function dropbox_onSyncStatusChange(status) { (status == 'none') ? app.hideLoader() : app.showLoader(); } // called by observer in the plugin when a file is changed. function dropbox_fileChange() { /*if ($('#dropboxView').length > 0) { app.dropboxView.listFolder(); }*/ // no need to list folder anymore since i added pull to refresh feature console.log('dropbox_fileChange()'); }
tokentype.rs
// Copyright (c) 2021-present, Cruise LLC // // This source code is licensed under the Apache License, Version 2.0, // found in the LICENSE-APACHE file in the root directory of this source tree. // You may not use this file except in compliance with the License. // the 'rust' tokenizer pub struct TokenizerState<'a> { pub prev: char, pub cur: char, pub next: char, pub lines: &'a [Vec<char>], pub line_start: usize, pub line_counter: usize, pub eof: bool, pub offset: usize, iter: std::slice::Iter<'a, char>, } impl<'a> TokenizerState<'a> { pub fn
(lines: &'a [Vec<char>]) -> Self { let mut ret = Self { lines, line_start: 0, line_counter: 0, offset: 0, eof: false, prev: '\0', cur: '\0', next: '\0', iter: lines[0].iter(), }; ret.advance_with_cur(); ret } pub fn advance(&mut self) { if let Some(next) = self.iter.next() { self.next = *next; self.offset += 1; } else { self.next_line(); } } pub fn next_line(&mut self) { if self.line_counter < self.lines.len() - 1 { self.line_counter += 1; self.line_start = self.offset; self.offset += 1; self.iter = self.lines[self.line_counter].iter(); self.next = '\n' } else { self.offset += 1; self.eof = true; self.next = '\0' } } pub fn next_is_digit(&self) -> bool { self.next >= '0' && self.next <= '9' } pub fn next_is_letter(&self) -> bool { self.next >= 'a' && self.next <= 'z' || self.next >= 'A' && self.next <= 'Z' } pub fn next_is_lowercase_letter(&self) -> bool { self.next >= 'a' && self.next <= 'z' } pub fn next_is_uppercase_letter(&self) -> bool { self.next >= 'A' && self.next <= 'Z' } pub fn next_is_hex(&self) -> bool { self.next >= '0' && self.next <= '9' || self.next >= 'a' && self.next <= 'f' || self.next >= 'A' && self.next <= 'F' } pub fn advance_with_cur(&mut self) { self.cur = self.next; self.advance(); } pub fn advance_with_prev(&mut self) { self.prev = self.cur; self.cur = self.next; self.advance(); } pub fn keyword(&mut self, chunk: &mut Vec<char>, word: &str) -> bool { for m in word.chars() { if m == self.next { chunk.push(m); self.advance(); } else { return false; } } true } } #[derive(Clone, Debug)] pub struct TokenChunk { pub token_type: TokenType, pub offset: usize, pub pair_token: usize, pub len: usize, pub next: char, // pub chunk: Vec<char> } impl TokenChunk { pub fn scan_last_token(token_chunks: &[TokenChunk]) -> TokenType { let mut prev_tok_index = token_chunks.len(); while prev_tok_index > 0 { let tt = &token_chunks[prev_tok_index - 1].token_type; if !tt.should_ignore() { return *tt; } prev_tok_index -= 1; } TokenType::Unexpected } pub fn push_with_pairing( token_chunks: &mut Vec<TokenChunk>, pair_stack: &mut Vec<usize>, next: char, offset: usize, offset2: usize, token_type: TokenType, ) -> bool { let mut invalid_pair = false; let pair_token = if token_type == TokenType::ParenOpen { pair_stack.push(token_chunks.len()); token_chunks.len() } else if token_type == TokenType::ParenClose { if !pair_stack.is_empty() { let other = pair_stack.pop().unwrap(); token_chunks[other].pair_token = token_chunks.len(); other } else { invalid_pair = true; token_chunks.len() } } else { token_chunks.len() }; token_chunks.push(TokenChunk { offset, pair_token, len: offset2 - offset, next, token_type }); invalid_pair } } #[derive(Clone, PartialEq, Copy, Debug)] pub enum TokenType { Whitespace, Newline, Keyword, Flow, Fn, TypeDef, Impl, Looping, Identifier, Call, Macro, TypeName, ThemeName, BuiltinType, Hash, Color, Regex, String, Number, Bool, StringMultiBegin, StringChunk, StringMultiEnd, CommentLine, CommentMultiBegin, CommentChunk, CommentMultiEnd, ParenOpen, ParenClose, Operator, Namespace, Splat, Delimiter, Colon, Warning, Error, Defocus, Unexpected, Eof, } impl TokenType { pub fn should_ignore(&self) -> bool { matches!( self, TokenType::Whitespace | TokenType::Newline | TokenType::CommentLine | TokenType::CommentMultiBegin | TokenType::CommentChunk | TokenType::CommentMultiEnd ) } }
new
shared.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: google/cloud/dataproc/v1/shared.proto package dataproc import ( fmt "fmt" math "math" proto "github.com/catper/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Cluster components that can be activated. type Component int32 const ( // Unspecified component. Component_COMPONENT_UNSPECIFIED Component = 0 // The Anaconda python distribution. Component_ANACONDA Component = 5 // The Hive Web HCatalog (the REST service for accessing HCatalog). Component_HIVE_WEBHCAT Component = 3 // The Jupyter Notebook. Component_JUPYTER Component = 1 // The Zeppelin notebook. Component_ZEPPELIN Component = 4 ) var Component_name = map[int32]string{ 0: "COMPONENT_UNSPECIFIED", 5: "ANACONDA", 3: "HIVE_WEBHCAT", 1: "JUPYTER", 4: "ZEPPELIN", } var Component_value = map[string]int32{ "COMPONENT_UNSPECIFIED": 0, "ANACONDA": 5, "HIVE_WEBHCAT": 3, "JUPYTER": 1, "ZEPPELIN": 4, } func (x Component) String() string { return proto.EnumName(Component_name, int32(x)) } func (Component) EnumDescriptor() ([]byte, []int) { return fileDescriptor_c84c579aa4cb1c4c, []int{0} } func init()
func init() { proto.RegisterFile("google/cloud/dataproc/v1/shared.proto", fileDescriptor_c84c579aa4cb1c4c) } var fileDescriptor_c84c579aa4cb1c4c = []byte{ // 247 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd0, 0xcf, 0x4b, 0xc3, 0x30, 0x14, 0x07, 0x70, 0x87, 0xbf, 0xb3, 0x1d, 0x42, 0x40, 0x50, 0xd9, 0xd5, 0x8b, 0x87, 0x84, 0xe1, 0xd1, 0x8b, 0x6d, 0x16, 0x59, 0x45, 0xd3, 0xe0, 0x3a, 0xc5, 0x5d, 0xc6, 0xb3, 0x2d, 0xb1, 0xd0, 0xe5, 0x85, 0x36, 0xee, 0xef, 0x97, 0xb5, 0xdb, 0x49, 0x76, 0x7b, 0xf0, 0x3e, 0xef, 0xf1, 0x7d, 0x8f, 0xdc, 0x59, 0x44, 0x5b, 0x97, 0x22, 0xaf, 0xf1, 0xb7, 0x10, 0x05, 0x04, 0xf0, 0x0d, 0xe6, 0x62, 0x33, 0x11, 0xed, 0x0f, 0x34, 0x65, 0xc1, 0x7d, 0x83, 0x01, 0xd9, 0x75, 0xcf, 0x78, 0xc7, 0xf8, 0x9e, 0xf1, 0xcd, 0xe4, 0x76, 0xbc, 0x5b, 0x00, 0xbe, 0x12, 0xe0, 0x1c, 0x06, 0x08, 0x15, 0xba, 0xb6, 0x9f, 0xbb, 0x07, 0x72, 0x29, 0x71, 0xed, 0xd1, 0x95, 0x2e, 0xb0, 0x1b, 0x72, 0x25, 0xd3, 0x37, 0x93, 0x6a, 0xa5, 0xb3, 0xd5, 0x42, 0xcf, 0x8d, 0x92, 0xc9, 0x73, 0xa2, 0xa6, 0xf4, 0x88, 0x8d, 0xc8, 0x45, 0xa4, 0x23, 0x99, 0xea, 0x69, 0x44, 0x4f, 0x19, 0x25, 0xa3, 0x59, 0xf2, 0xa1, 0x56, 0x9f, 0x2a, 0x9e, 0xc9, 0x28, 0xa3, 0xc7, 0x6c, 0x48, 0xce, 0x5f, 0x16, 0xe6, 0x2b, 0x53, 0xef, 0x74, 0xb0, 0xc5, 0x4b, 0x65, 0x8c, 0x7a, 0x4d, 0x34, 0x3d, 0x89, 0x91, 0x8c, 0x73, 0x5c, 0xf3, 0x43, 0x01, 0xe3, 0xe1, 0xbc, 0x3b, 0xc4, 0x6c, 0xf3, 0x98, 0xc1, 0xf2, 0x69, 0x07, 0x2d, 0xd6, 0xe0, 0x2c, 0xc7, 0xc6, 0x0a, 0x5b, 0xba, 0x2e, 0xad, 0xe8, 0x5b, 0xe0, 0xab, 0xf6, 0xff, 0x3f, 0x1e, 0xf7, 0xf5, 0xf7, 0x59, 0x87, 0x1f, 0xfe, 0x02, 0x00, 0x00, 0xff, 0xff, 0xcb, 0xf9, 0x10, 0xb4, 0x3b, 0x01, 0x00, 0x00, }
{ proto.RegisterEnum("google.cloud.dataproc.v1.Component", Component_name, Component_value) }
api_test.py
#!/usr/bin/env vpython # Copyright 2014 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. # Disable 'Access to a protected member', Unused argument', 'Unused variable'. # pylint: disable=W0212,W0612,W0613 # pylint: disable=redefined-outer-name import datetime import sys import threading import unittest from six.moves import queue import mock from test_support import test_env test_env.setup_test_env() from google.appengine.ext import ndb from components.auth import api from components.auth import config from components.auth import ipaddr from components.auth import model from components.auth import realms from components.auth import replication from components.auth.proto import replication_pb2 from components.auth.proto import security_config_pb2 from components import utils from test_support import test_case def new_auth_db( replication_state=None, global_config=None, groups=None, ip_whitelist_assignments=None, ip_whitelists=None, internal_service_regexp=None, additional_client_ids=None ): global_config = global_config or model.AuthGlobalConfig() global_config.security_config = security_config_blob(internal_service_regexp) return api.AuthDB.from_entities( replication_state=replication_state or model.AuthReplicationState(), global_config=global_config, groups=groups or [], ip_whitelist_assignments=( ip_whitelist_assignments or model.AuthIPWhitelistAssignments()), ip_whitelists=ip_whitelists or [], additional_client_ids=additional_client_ids or []) def security_config_blob(regexps=None): regexps = regexps or ['(.*-dot-)?internal\\.example\\.com'] msg = security_config_pb2.SecurityConfig(internal_service_regexp=regexps) return msg.SerializeToString() class AuthDBTest(test_case.TestCase): """Tests for AuthDB class.""" def setUp(self): super(AuthDBTest, self).setUp() self.mock(api.logging, 'warning', lambda *_args: None) self.mock(api.logging, 'error', lambda *_args: None) def test_get_group(self): g = model.AuthGroup( key=model.group_key('group'), members=[ model.Identity.from_bytes('user:[email protected]'), model.Identity.from_bytes('user:[email protected]'), ], globs=[model.IdentityGlob.from_bytes('user:*')], nested=['blah'], created_by=model.Identity.from_bytes('user:[email protected]'), created_ts=datetime.datetime(2014, 1, 2, 3, 4, 5), modified_by=model.Identity.from_bytes('user:[email protected]'), modified_ts=datetime.datetime(2015, 1, 2, 3, 4, 5)) db = new_auth_db(groups=[g]) # Unknown group. self.assertIsNone(db.get_group('blah')) # Known group. from_cache = db.get_group('group') self.assertEqual(from_cache.key, g.key) # Members list is sorted. self.assertEqual(from_cache.members, [ model.Identity.from_bytes('user:[email protected]'), model.Identity.from_bytes('user:[email protected]'), ]) # Fields that are know to be different. exclude = ['members', 'auth_db_rev', 'auth_db_prev_rev'] self.assertEqual( from_cache.to_dict(exclude=exclude), g.to_dict(exclude=exclude)) def test_is_group_member(self): # Test identity. joe = model.Identity(model.IDENTITY_USER, '[email protected]') # Group that includes joe via glob. with_glob = model.AuthGroup(id='WithGlob') with_glob.globs.append( model.IdentityGlob(model.IDENTITY_USER, '*@example.com')) # Group that includes joe via explicit listing. with_listing = model.AuthGroup(id='WithListing') with_listing.members.append(joe) # Group that includes joe via nested group. with_nesting = model.AuthGroup(id='WithNesting') with_nesting.nested.append('WithListing') # Creates AuthDB with given list of groups and then runs the check. is_member = (lambda groups, ident, group: new_auth_db(groups=groups).is_group_member(group, ident)) # Wildcard group includes everyone (even anonymous). self.assertTrue(is_member([], joe, '*')) self.assertTrue(is_member([], model.Anonymous, '*')) # An unknown group includes nobody. self.assertFalse(is_member([], joe, 'Missing')) self.assertFalse(is_member([], model.Anonymous, 'Missing')) # Globs are respected. self.assertTrue(is_member([with_glob], joe, 'WithGlob')) self.assertFalse(is_member([with_glob], model.Anonymous, 'WithGlob')) # Members lists are respected. self.assertTrue(is_member([with_listing], joe, 'WithListing')) self.assertFalse(is_member([with_listing], model.Anonymous, 'WithListing')) # Nested groups are respected. self.assertTrue(is_member([with_nesting, with_listing], joe, 'WithNesting')) self.assertFalse( is_member([with_nesting, with_listing], model.Anonymous, 'WithNesting')) def test_list_group(self): def list_group(groups, group, recursive): l = new_auth_db(groups=groups).list_group(group, recursive) return api.GroupListing( sorted(l.members), sorted(l.globs), sorted(l.nested)) grp_1 = model.AuthGroup(id='1') grp_1.members.extend([ model.Identity(model.IDENTITY_USER, '[email protected]'), model.Identity(model.IDENTITY_USER, '[email protected]'), ]) grp_1.globs.extend([ model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'), model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'), ]) grp_2 = model.AuthGroup(id='2') grp_2.nested.append('1') grp_2.members.extend([ # Specify 'b' again, even though it's in a nested group. model.Identity(model.IDENTITY_USER, '[email protected]'), model.Identity(model.IDENTITY_USER, '[email protected]'), ]) grp_2.globs.extend([ # Specify '*@b.example.com' again, even though it's in a nested group. model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'), model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'), ]) # Unknown group. empty = api.GroupListing([], [], []) self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', False)) self.assertEqual(empty, list_group([grp_1, grp_2], 'blah', True)) # Non recursive. expected = api.GroupListing( members=[ model.Identity(model.IDENTITY_USER, '[email protected]'), model.Identity(model.IDENTITY_USER, '[email protected]'), ], globs=[ model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'), model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'), ], nested=['1']) self.assertEqual(expected, list_group([grp_1, grp_2], '2', False)) # Recursive. expected = api.GroupListing( members=[ model.Identity(model.IDENTITY_USER, '[email protected]'), model.Identity(model.IDENTITY_USER, '[email protected]'), model.Identity(model.IDENTITY_USER, '[email protected]'), ], globs=[ model.IdentityGlob(model.IDENTITY_USER, '*@a.example.com'), model.IdentityGlob(model.IDENTITY_USER, '*@b.example.com'), model.IdentityGlob(model.IDENTITY_USER, '*@c.example.com'), ], nested=['1']) self.assertEqual(expected, list_group([grp_1, grp_2], '2', True)) def test_nested_groups_cycle(self): # Groups that nest each other. group1 = model.AuthGroup(id='Group1') group1.nested.append('Group2') group2 = model.AuthGroup(id='Group2') group2.nested.append('Group1') # Collect warnings. warnings = [] self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg)) # This should not hang, but produce error message. auth_db = new_auth_db(groups=[group1, group2]) self.assertFalse( auth_db.is_group_member('Group1', model.Anonymous)) self.assertEqual(1, len(warnings)) self.assertTrue('Cycle in a group graph' in warnings[0]) def test_not_real_nested_group_cycle_aka_issue_251(self): # See https://github.com/luci/luci-py/issues/251. # # B -> A, C -> [B, A]. When traversing C, A is seen twice, and this is fine. group_A = model.AuthGroup(id='A') group_B = model.AuthGroup(id='B') group_C = model.AuthGroup(id='C') group_B.nested = ['A'] group_C.nested = ['A', 'B'] db = new_auth_db(groups=[group_A, group_B, group_C]) # 'is_group_member' must not report 'Cycle in a group graph' warning. warnings = [] self.mock(api.logging, 'warning', lambda msg, *_args: warnings.append(msg)) self.assertFalse(db.is_group_member('C', model.Anonymous)) self.assertFalse(warnings) def test_is_allowed_oauth_client_id(self): global_config = model.AuthGlobalConfig( oauth_client_id='1', oauth_additional_client_ids=['2', '3']) auth_db = new_auth_db( global_config=global_config, additional_client_ids=['local']) self.assertFalse(auth_db.is_allowed_oauth_client_id(None)) self.assertTrue(auth_db.is_allowed_oauth_client_id('1')) self.assertTrue(auth_db.is_allowed_oauth_client_id('2')) self.assertTrue(auth_db.is_allowed_oauth_client_id('3')) self.assertTrue(auth_db.is_allowed_oauth_client_id('local')) self.assertTrue( auth_db.is_allowed_oauth_client_id(api.API_EXPLORER_CLIENT_ID)) self.assertFalse(auth_db.is_allowed_oauth_client_id('4')) def test_fetch_auth_db_lazy_bootstrap(self): # Don't exist before the call. self.assertFalse(model.root_key().get()) # Run bootstrap. api._lazy_bootstrap_ran = False api.fetch_auth_db() # Exist now. self.assertTrue(model.root_key().get()) # Simulate datastore wipe which can happen in tests, verify fetch_auth_db # still works. It hits slightly different code path since wiping datastore # doesn't reset _lazy_bootstrap_ran global var. model.root_key().delete() api.fetch_auth_db() def run_auth_db_fetch_test(self, setup_cb): now = utils.utcnow() ident = model.Identity.from_bytes('user:[email protected]') # Client IDs callback. Disable config.ensure_configured() since it overrides # _additional_client_ids_cb after we mock it. self.mock(config, 'ensure_configured', lambda: None) self.mock(api, '_additional_client_ids_cb', lambda: ['', 'cb_client_id']) self.mock(api, 'get_web_client_id', lambda: 'web_client_id') # Create AuthGlobalConfig. global_config = model.AuthGlobalConfig(key=model.root_key()) global_config.oauth_client_id = '1' global_config.oauth_client_secret = 'secret' global_config.oauth_additional_client_ids = ['2', '3'] global_config.security_config = security_config_blob() global_config.token_server_url = 'token_server_url' global_config.put() # What we expect to see in the AuthDB. expected_groups = {} def add_group(name, members, globs, nested, owners): expected_groups[name] = ( frozenset(members), tuple(model.IdentityGlob.from_bytes(g) for g in globs), tuple(nested), owners,
key=model.group_key(name), members=[model.Identity.from_bytes(m) for m in members], globs=[model.IdentityGlob.from_bytes(g) for g in globs], nested=nested, owners=owners, created_ts=now, created_by=ident, modified_ts=now, modified_by=ident, ).put() # Create a bunch of groups. add_group( name='Group A', members=['user:[email protected]', 'user:[email protected]'], globs=['user:*@example.com'], nested=['Group B', 'Group C'], owners='Group A') add_group( name='Group B', members=['user:[email protected]'], globs=['user:*@example.com'], nested=[], owners='Group A') add_group( name='Group C', members=[], globs=[], nested=[], owners='Group C') # And a bunch IP whitelist. model.AuthIPWhitelistAssignments( key=model.ip_whitelist_assignments_key(), assignments=[ model.AuthIPWhitelistAssignments.Assignment( identity=model.Anonymous, ip_whitelist='some ip whitelist', created_ts=now, created_by=ident, comment='comment', ), ], ).put() model.AuthIPWhitelist( key=model.ip_whitelist_key('some ip whitelist'), subnets=['127.0.0.1/32'], description='description', created_ts=now, created_by=ident, modified_ts=now, modified_by=ident, ).put() model.AuthIPWhitelist( key=model.ip_whitelist_key('bots'), subnets=['127.0.0.1/32'], description='description', created_ts=now, created_by=ident, modified_ts=now, modified_by=ident, ).put() if setup_cb: setup_cb() # Verify all the stuff above ends up in the auth_db. auth_db = api.fetch_auth_db() # global_config and additional_client_ids_cb self.assertEqual('token_server_url', auth_db.token_server_url) self.assertEqual(('1', 'secret', ['2', '3']), auth_db.get_oauth_config()) self.assertTrue(auth_db.is_allowed_oauth_client_id('1')) self.assertTrue(auth_db.is_allowed_oauth_client_id('cb_client_id')) self.assertTrue(auth_db.is_allowed_oauth_client_id('web_client_id')) self.assertFalse(auth_db.is_allowed_oauth_client_id('')) # Groups. self.assertEqual( expected_groups, { name: (g.members, g.globs, g.nested, g.owners) for name, g in auth_db._groups.items() }) # IP whitelists and whitelist assignments. self.assertEqual( {model.Anonymous: 'some ip whitelist'}, auth_db._ip_whitelist_assignments) self.assertEqual( {'bots': ['127.0.0.1/32'], 'some ip whitelist': ['127.0.0.1/32']}, auth_db._ip_whitelists) return auth_db def test_fetch_auth_db_from_entities(self): auth_db = self.run_auth_db_fetch_test(None) self.assertEqual('from_entities', auth_db._from_what) def test_fetch_auth_db_from_snapshot(self): PRIMARY_ID = 'primary_id' PRIMARY_URL = 'https://primary_url' AUTH_DB_REV = 12345 def setup_snapshot(): # Create AuthDB snapshot entities from existing "detailed" entities in # the datastore. _, snap = replication.new_auth_db_snapshot() # Switch into Replica mode, store the snapshot. model.AuthReplicationState( key=model.replication_state_key(), primary_id=PRIMARY_ID, primary_url=PRIMARY_URL, auth_db_rev=AUTH_DB_REV, shard_ids=replication.store_sharded_auth_db( auth_db=replication.auth_db_snapshot_to_proto(snap), primary_url=PRIMARY_URL, auth_db_rev=AUTH_DB_REV, shard_size=100, ), ).put() auth_db = self.run_auth_db_fetch_test(setup_snapshot) self.assertEqual('from_proto', auth_db._from_what) self.assertEqual(PRIMARY_ID, auth_db.primary_id) self.assertEqual(PRIMARY_URL, auth_db.primary_url) self.assertEqual(AUTH_DB_REV, auth_db.auth_db_rev) def test_get_secret_bootstrap(self): # Mock AuthSecret.bootstrap to capture calls to it. original = api.model.AuthSecret.bootstrap calls = [] @classmethod def mocked_bootstrap(cls, name): calls.append(name) result = original(name) result.values = ['123'] return result self.mock(api.model.AuthSecret, 'bootstrap', mocked_bootstrap) auth_db = new_auth_db() got = auth_db.get_secret(api.SecretKey('some_secret')) self.assertEqual(['123'], got) self.assertEqual(['some_secret'], calls) def test_is_in_ip_whitelist(self): auth_db = new_auth_db(ip_whitelists=[ model.AuthIPWhitelist( key=model.ip_whitelist_key('l'), subnets=['127.0.0.1', '192.168.0.0/24']), ]) test = lambda ip: auth_db.is_in_ip_whitelist('l', ipaddr.ip_from_string(ip)) self.assertTrue(test('127.0.0.1')) self.assertTrue(test('192.168.0.0')) self.assertTrue(test('192.168.0.9')) self.assertTrue(test('192.168.0.255')) self.assertFalse(test('192.168.1.0')) self.assertFalse(test('192.1.0.0')) @staticmethod def make_auth_db_with_ip_whitelist(): """AuthDB with [email protected] assigned IP whitelist '127.0.0.1/32'.""" return new_auth_db( ip_whitelists=[ model.AuthIPWhitelist( key=model.ip_whitelist_key('some ip whitelist'), subnets=['127.0.0.1/32'], ), model.AuthIPWhitelist( key=model.ip_whitelist_key('bots'), subnets=['192.168.1.1/32', '::1/32'], ), ], ip_whitelist_assignments=model.AuthIPWhitelistAssignments( assignments=[ model.AuthIPWhitelistAssignments.Assignment( identity=model.Identity(model.IDENTITY_USER, '[email protected]'), ip_whitelist='some ip whitelist',) ], ), ) def test_verify_ip_whitelisted_ok(self): # Should not raise: IP is whitelisted. ident = model.Identity(model.IDENTITY_USER, '[email protected]') self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted( ident, ipaddr.ip_from_string('127.0.0.1')) def test_verify_ip_whitelisted_not_whitelisted(self): with self.assertRaises(api.AuthorizationError): self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted( model.Identity(model.IDENTITY_USER, '[email protected]'), ipaddr.ip_from_string('192.168.0.100')) def test_verify_ip_whitelisted_not_assigned(self): # Should not raise: whitelist is not required for [email protected]. ident = model.Identity(model.IDENTITY_USER, '[email protected]') self.make_auth_db_with_ip_whitelist().verify_ip_whitelisted( ident, ipaddr.ip_from_string('192.168.0.100')) def test_verify_ip_whitelisted_missing_whitelist(self): auth_db = new_auth_db( ip_whitelist_assignments=model.AuthIPWhitelistAssignments( assignments=[ model.AuthIPWhitelistAssignments.Assignment( identity=model.Identity(model.IDENTITY_USER, '[email protected]'), ip_whitelist='missing ip whitelist',) ], ), ) with self.assertRaises(api.AuthorizationError): auth_db.verify_ip_whitelisted( model.Identity(model.IDENTITY_USER, '[email protected]'), ipaddr.ip_from_string('127.0.0.1')) def test_is_internal_domain(self): auth_db = new_auth_db(internal_service_regexp=[ '(.*-dot-)?a-int\\.example\\.com', '(.*-dot-)?b-int\\.example\\.com', ]) self.assertTrue(auth_db.is_internal_domain('a-int.example.com')) self.assertTrue(auth_db.is_internal_domain('b-int.example.com')) self.assertTrue(auth_db.is_internal_domain('z-dot-a-int.example.com')) self.assertTrue(auth_db.is_internal_domain('z-dot-b-int.example.com')) self.assertFalse(auth_db.is_internal_domain('int.example.com')) self.assertFalse(auth_db.is_internal_domain('a-int.example')) self.assertFalse(auth_db.is_internal_domain('dot-a-int.example.com')) def mock_replication_state(auth_db_rev): return model.AuthReplicationState( key=model.replication_state_key(), primary_id='primary-id', auth_db_rev=auth_db_rev) class TestAuthDBCache(test_case.TestCase): """Tests for process-global and request-local AuthDB cache.""" def setUp(self): super(TestAuthDBCache, self).setUp() api.reset_local_state() def set_time(self, ts): """Mocks time.time() to return |ts|.""" self.mock(api.time, 'time', lambda: ts) def set_fetched_auth_db(self, auth_db): """Mocks fetch_auth_db to return |auth_db|.""" def mock_fetch_auth_db(known_auth_db=None): if (known_auth_db is not None and auth_db.auth_db_rev == known_auth_db.auth_db_rev): return known_auth_db return auth_db self.mock(api, 'fetch_auth_db', mock_fetch_auth_db) def test_get_request_cache_different_threads(self): """Ensure get_request_cache() respects multiple threads.""" # Runs in its own thread. def thread_proc(): request_cache = api.reinitialize_request_cache() self.assertTrue(request_cache) # Returns same object in a context of a same request thread. self.assertTrue(api.get_request_cache() is request_cache) return request_cache # Launch two threads running 'thread_proc', wait for them to stop, collect # whatever they return. results_queue = queue.Queue() threads = [ threading.Thread(target=lambda: results_queue.put(thread_proc())) for _ in range(2) ] for t in threads: t.start() results = [results_queue.get(timeout=1) for _ in range(len(threads))] # Different threads use different RequestCache objects. self.assertTrue(results[0] is not results[1]) def test_get_request_cache_different_requests(self): """Ensure get_request_cache() returns new object for a new request.""" # Grab request cache for 'current' request. request_cache = api.reinitialize_request_cache() # Track calls to 'close'. close_calls = [] self.mock(request_cache, 'close', lambda: close_calls.append(1)) # Should return a new instance of request cache now. self.assertTrue(api.reinitialize_request_cache() is not request_cache) # Old one should have been closed. self.assertEqual(1, len(close_calls)) def test_get_process_auth_db_expiration(self): """Ensure get_process_auth_db() respects expiration.""" # Prepare several instances of AuthDB to be used in mocks. auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0)) auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1)) # Fetch initial copy of AuthDB. self.set_time(0) self.set_fetched_auth_db(auth_db_v0) self.assertEqual(auth_db_v0, api.get_process_auth_db()) # It doesn't expire for some time. self.set_time(api.get_process_cache_expiration_sec() - 1) self.set_fetched_auth_db(auth_db_v1) self.assertEqual(auth_db_v0, api.get_process_auth_db()) # But eventually it does. self.set_time(api.get_process_cache_expiration_sec() + 1) self.set_fetched_auth_db(auth_db_v1) self.assertEqual(auth_db_v1, api.get_process_auth_db()) def test_get_process_auth_db_known_version(self): """Ensure get_process_auth_db() respects entity group version.""" # Prepare several instances of AuthDB to be used in mocks. auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0)) auth_db_v0_again = new_auth_db(replication_state=mock_replication_state(0)) # Fetch initial copy of AuthDB. self.set_time(0) self.set_fetched_auth_db(auth_db_v0) self.assertEqual(auth_db_v0, api.get_process_auth_db()) # Make cache expire, but setup fetch_auth_db to return a new instance of # AuthDB, but with same entity group version. Old known instance of AuthDB # should be reused. self.set_time(api.get_process_cache_expiration_sec() + 1) self.set_fetched_auth_db(auth_db_v0_again) self.assertTrue(api.get_process_auth_db() is auth_db_v0) def test_get_process_auth_db_multithreading(self): """Ensure get_process_auth_db() plays nice with multiple threads.""" def run_in_thread(func): """Runs |func| in a parallel thread, returns future (as Queue).""" result = queue.Queue() thread = threading.Thread(target=lambda: result.put(func())) thread.start() return result # Prepare several instances of AuthDB to be used in mocks. auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0)) auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1)) # Run initial fetch, should cache |auth_db_v0| in process cache. self.set_time(0) self.set_fetched_auth_db(auth_db_v0) self.assertEqual(auth_db_v0, api.get_process_auth_db()) # Make process cache expire. self.set_time(api.get_process_cache_expiration_sec() + 1) # Start fetching AuthDB from another thread, at some point it will call # 'fetch_auth_db', and we pause the thread then and resume main thread. fetching_now = threading.Event() auth_db_queue = queue.Queue() def mock_fetch_auth_db(**_kwargs): fetching_now.set() return auth_db_queue.get() self.mock(api, 'fetch_auth_db', mock_fetch_auth_db) future = run_in_thread(api.get_process_auth_db) # Wait for internal thread to call |fetch_auth_db|. fetching_now.wait() # Ok, now main thread is unblocked, while internal thread is blocking on a # artificially slow 'fetch_auth_db' call. Main thread can now try to get # AuthDB via get_process_auth_db(). It should get older stale copy right # away. self.assertEqual(auth_db_v0, api.get_process_auth_db()) # Finish background 'fetch_auth_db' call by returning 'auth_db_v1'. # That's what internal thread should get as result of 'get_process_auth_db'. auth_db_queue.put(auth_db_v1) self.assertEqual(auth_db_v1, future.get()) # Now main thread should get it as well. self.assertEqual(auth_db_v1, api.get_process_auth_db()) def test_get_process_auth_db_exceptions(self): """Ensure get_process_auth_db() handles DB exceptions well.""" # Prepare several instances of AuthDB to be used in mocks. auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0)) auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1)) # Fetch initial copy of AuthDB. self.set_time(0) self.set_fetched_auth_db(auth_db_v0) self.assertEqual(auth_db_v0, api.get_process_auth_db()) # Make process cache expire. self.set_time(api.get_process_cache_expiration_sec() + 1) # Emulate an exception in fetch_auth_db. def mock_fetch_auth_db(*_kwargs): raise Exception('Boom!') self.mock(api, 'fetch_auth_db', mock_fetch_auth_db) # Capture calls to logging.exception. logger_calls = [] self.mock(api.logging, 'exception', lambda *_args: logger_calls.append(1)) # Should return older copy of auth_db_v0 and log the exception. self.assertEqual(auth_db_v0, api.get_process_auth_db()) self.assertEqual(1, len(logger_calls)) # Make fetch_auth_db to work again. Verify get_process_auth_db() works too. self.set_fetched_auth_db(auth_db_v1) self.assertEqual(auth_db_v1, api.get_process_auth_db()) def test_get_latest_auth_db(self): """Ensure get_latest_auth_db "rushes" cached AuthDB update.""" auth_db_v0 = new_auth_db(replication_state=mock_replication_state(0)) auth_db_v1 = new_auth_db(replication_state=mock_replication_state(1)) # Fetch initial copy of AuthDB. self.set_time(0) self.set_fetched_auth_db(auth_db_v0) self.assertEqual(auth_db_v0, api.get_process_auth_db()) # Rig up fetch_auth_db to return a newer version. self.set_fetched_auth_db(auth_db_v1) # 'get_process_auth_db' still returns the cached one. self.assertEqual(auth_db_v0, api.get_process_auth_db()) # But 'get_latest_auth_db' returns a new one and updates the cached copy. self.assertEqual(auth_db_v1, api.get_latest_auth_db()) self.assertEqual(auth_db_v1, api.get_process_auth_db()) def test_get_request_auth_db(self): """Ensure get_request_auth_db() caches AuthDB in request cache.""" api.reinitialize_request_cache() # 'get_request_auth_db()' returns whatever get_process_auth_db() returns # when called for a first time. self.mock(api, 'get_process_auth_db', lambda: 'fake') self.assertEqual('fake', api.get_request_auth_db()) # But then it caches it locally and reuses local copy, instead of calling # 'get_process_auth_db()' all the time. self.mock(api, 'get_process_auth_db', lambda: 'another-fake') self.assertEqual('fake', api.get_request_auth_db()) def test_warmup(self): """Ensure api.warmup() fetches AuthDB into process-global cache.""" self.assertFalse(api._auth_db) api.warmup() self.assertTrue(api._auth_db) class ApiTest(test_case.TestCase): """Test for publicly exported API.""" def setUp(self): super(ApiTest, self).setUp() api.reset_local_state() def test_get_current_identity_unitialized(self): """If request cache is not initialized, returns Anonymous.""" self.assertEqual(api.get_current_identity(), model.Anonymous) def test_get_current_identity(self): """Ensure get_current_identity returns whatever was put in request cache.""" ident = model.Identity.from_bytes('user:[email protected]') api.get_request_cache().current_identity = ident self.assertEqual(ident, api.get_current_identity()) def test_require_decorator_ok(self): """@require calls the callback and then decorated function.""" callback_calls = [] def require_callback(): callback_calls.append(1) return True @api.require(require_callback) def allowed(*args, **kwargs): return (args, kwargs) self.assertEqual(((1, 2), {'a': 3}), allowed(1, 2, a=3)) self.assertEqual(1, len(callback_calls)) def test_require_decorator_fail(self): """@require raises exception and doesn't call decorated function.""" forbidden_calls = [] @api.require(lambda: False) def forbidden(): forbidden_calls.append(1) with self.assertRaises(api.AuthorizationError): forbidden() self.assertFalse(forbidden_calls) def test_require_decorator_error_msg(self): @api.require(lambda: False, 'Forbidden!') def forbidden(): pass with self.assertRaisesRegexp(api.AuthorizationError, 'Forbidden!'): forbidden() def test_require_decorator_nesting_ok(self): """Permission checks are called in order.""" calls = [] def check(name): calls.append(name) return True @api.require(lambda: check('A')) @api.require(lambda: check('B')) def allowed(arg): return arg self.assertEqual('value', allowed('value')) self.assertEqual(['A', 'B'], calls) def test_require_decorator_nesting_first_deny(self): """First deny raises AuthorizationError.""" calls = [] def check(name, result): calls.append(name) return result forbidden_calls = [] @api.require(lambda: check('A', False)) @api.require(lambda: check('B', True)) def forbidden(arg): forbidden_calls.append(1) with self.assertRaises(api.AuthorizationError): forbidden('value') self.assertFalse(forbidden_calls) self.assertEqual(['A'], calls) def test_require_decorator_nesting_non_first_deny(self): """Non-first deny also raises AuthorizationError.""" calls = [] def check(name, result): calls.append(name) return result forbidden_calls = [] @api.require(lambda: check('A', True)) @api.require(lambda: check('B', False)) def forbidden(arg): forbidden_calls.append(1) with self.assertRaises(api.AuthorizationError): forbidden('value') self.assertFalse(forbidden_calls) self.assertEqual(['A', 'B'], calls) def test_require_decorator_on_method(self): calls = [] def checker(): calls.append(1) return True class Class(object): @api.require(checker) def method(self, *args, **kwargs): return (self, args, kwargs) obj = Class() self.assertEqual((obj, ('value',), {'a': 2}), obj.method('value', a=2)) self.assertEqual(1, len(calls)) def test_require_decorator_on_static_method(self): calls = [] def checker(): calls.append(1) return True class Class(object): @staticmethod @api.require(checker) def static_method(*args, **kwargs): return (args, kwargs) self.assertEqual((('value',), {'a': 2}), Class.static_method('value', a=2)) self.assertEqual(1, len(calls)) def test_require_decorator_on_class_method(self): calls = [] def checker(): calls.append(1) return True class Class(object): @classmethod @api.require(checker) def class_method(cls, *args, **kwargs): return (cls, args, kwargs) self.assertEqual( (Class, ('value',), {'a': 2}), Class.class_method('value', a=2)) self.assertEqual(1, len(calls)) def test_require_decorator_ndb_nesting_require_first(self): calls = [] def checker(): calls.append(1) return True @api.require(checker) @ndb.non_transactional def func(*args, **kwargs): return (args, kwargs) self.assertEqual((('value',), {'a': 2}), func('value', a=2)) self.assertEqual(1, len(calls)) def test_require_decorator_ndb_nesting_require_last(self): calls = [] def checker(): calls.append(1) return True @ndb.non_transactional @api.require(checker) def func(*args, **kwargs): return (args, kwargs) self.assertEqual((('value',), {'a': 2}), func('value', a=2)) self.assertEqual(1, len(calls)) def test_public_then_require_fails(self): with self.assertRaises(TypeError): @api.public @api.require(lambda: True) def func(): pass def test_require_then_public_fails(self): with self.assertRaises(TypeError): @api.require(lambda: True) @api.public def func(): pass def test_is_decorated(self): self.assertTrue(api.is_decorated(api.public(lambda: None))) self.assertTrue( api.is_decorated(api.require(lambda: True)(lambda: None))) @mock.patch('logging.info') def test_require_log_identity(self, logfunc): ident = model.Identity.from_bytes('user:[email protected]') api.get_request_cache().current_identity = ident @api.require(lambda: True, log_identity=True) def func(): pass func() logfunc.assert_called_once_with('Accessed from user:[email protected]') class OAuthAccountsTest(test_case.TestCase): """Test for extract_oauth_caller_identity function.""" def mock_all(self, user_email, client_id, allowed_client_ids=()): class FakeUser(object): email = lambda _: user_email class FakeAuthDB(object): is_allowed_oauth_client_id = lambda _, cid: cid in allowed_client_ids self.mock(api.oauth, 'get_current_user', lambda _: FakeUser()) self.mock(api.oauth, 'get_client_id', lambda _: client_id) self.mock(api, 'get_request_auth_db', FakeAuthDB) @staticmethod def user(email): return model.Identity(model.IDENTITY_USER, email) def test_is_allowed_oauth_client_id_ok(self): self.mock_all('[email protected]', 'some-client-id', ['some-client-id']) self.assertEqual( (self.user('[email protected]'), api.new_auth_details()), api.extract_oauth_caller_identity()) def test_is_allowed_oauth_client_id_not_ok(self): self.mock_all('[email protected]', 'some-client-id', ['another-client-id']) with self.assertRaises(api.AuthorizationError): api.extract_oauth_caller_identity() def test_is_allowed_oauth_client_id_not_ok_empty(self): self.mock_all('[email protected]', 'some-client-id') with self.assertRaises(api.AuthorizationError): api.extract_oauth_caller_identity() class AuthWebUIConfigTest(test_case.TestCase): def test_works(self): utils.clear_cache(api.get_web_client_id) self.assertEqual('', api.get_web_client_id_uncached()) api.set_web_client_id('zzz') self.assertEqual('zzz', api.get_web_client_id_uncached()) self.assertEqual('zzz', api.get_web_client_id()) class AuthDBBuilder(object): def __init__(self): self.groups = [] def group(self, name, members=None, globs=None, nested=None, owners=None): self.groups.append(model.AuthGroup( key=model.group_key(name), members=[model.Identity.from_bytes(m) for m in (members or [])], globs=[model.IdentityGlob.from_bytes(g) for g in (globs or [])], nested=nested or [], owners=owners or 'default-owners-group', )) return self def build(self): return new_auth_db(groups=self.groups) class RelevantSubgraphTest(test_case.TestCase): def call(self, db, principal): if '*' in principal: principal = model.IdentityGlob.from_bytes(principal) elif '@' in principal: principal = model.Identity.from_bytes(principal) graph = db.get_relevant_subgraph(principal) # Use a dict with integer keys instead of a list to improve the readability # of assertions below. nodes = {} for i, (node, edges) in enumerate(graph.describe()): if isinstance(node, (model.Identity, model.IdentityGlob)): node = node.to_bytes() nodes[i] = (node, {l: sorted(s) for l, s in edges.items() if s}) return nodes def test_empty(self): db = AuthDBBuilder().build() self.assertEqual( {0: ('user:[email protected]', {})}, self.call(db, 'user:[email protected]')) self.assertEqual( {0: ('user:*@example.com', {})}, self.call(db, 'user:*@example.com')) self.assertEqual( {0: ('group', {})}, self.call(db, 'group')) def test_identity_discoverable_directly_and_through_glob(self): b = AuthDBBuilder() b.group('g1', ['user:[email protected]']) b.group('g2', ['user:[email protected]']) b.group('g3', [], ['user:*@example.com']) b.group('g4', ['user:[email protected]'], ['user:*']) self.assertEqual({ 0: ('user:[email protected]', {'IN': [1, 3, 4, 5]}), 1: ('user:*@example.com', {'IN': [2]}), 2: ('g3', {}), 3: ('user:*', {'IN': [4]}), 4: ('g4', {}), 5: ('g1', {}), }, self.call(b.build(), 'user:[email protected]')) def test_glob_is_matched_directly(self): b = AuthDBBuilder() b.group('g1', [], ['user:*@example.com']) b.group('g2', [], ['user:*']) self.assertEqual({ 0: ('user:*@example.com', {'IN': [1]}), 1: ('g1', {}), }, self.call(b.build(), 'user:*@example.com')) def test_simple_group_lookup(self): b = AuthDBBuilder() b.group('g1', nested=['g2', 'g3']) b.group('g2', nested=['g3']) b.group('g3') self.assertEqual({ 0: ('g3', {'IN': [1, 2]}), 1: ('g1', {}), 2: ('g2', {'IN': [1]}), }, self.call(b.build(), 'g3')) def test_ownership_relations(self): b = AuthDBBuilder() b.group('a-root', nested=['b-inner']) b.group('b-inner') b.group('c-owned-by-root', owners='a-root') b.group('d-includes-owned-by-root', nested=['c-owned-by-root']) b.group('e-owned-by-3', owners='d-includes-owned-by-root') self.assertEqual({ 0: ('b-inner', {'IN': [1]}), 1: ('a-root', {'OWNS': [2]}), 2: ('c-owned-by-root', {'IN': [3]}), 3: ('d-includes-owned-by-root', {'OWNS': [4]}), 4: ('e-owned-by-3', {}), }, self.call(b.build(), 'b-inner')) def test_diamond(self): b = AuthDBBuilder() b.group('top', nested=['middle1', 'middle2']) b.group('middle1', nested=['bottom']) b.group('middle2', nested=['bottom']) b.group('bottom') self.assertEqual({ 0: ('bottom', {'IN': [1, 3]}), 1: ('middle1', {'IN': [2]}), 2: ('top', {}), 3: ('middle2', {'IN': [2]}), }, self.call(b.build(), 'bottom')) def test_cycle(self): # Note: cycles in groups are forbidden on API layer, but make sure we still # handle them without hanging in case something unexpected happens and they # appear. b = AuthDBBuilder() b.group('g1', nested=['g2']) b.group('g2', nested=['g1', 'g2']) self.assertEqual({ 0: ('g2', {'IN': [0, 1]}), 1: ('g1', {'IN': [0]}), }, self.call(b.build(), 'g2')) def test_selfowners(self): b = AuthDBBuilder() b.group('g1', nested=['g2'], owners='g1') b.group('g2') self.assertEqual({0: ('g1', {'OWNS': [0]})}, self.call(b.build(), 'g1')) self.assertEqual({ 0: ('g2', {'IN': [1]}), 1: ('g1', {'OWNS': [1]}), }, self.call(b.build(), 'g2')) def test_messy_graph(self): b = AuthDBBuilder() b.group('directly', ['user:[email protected]']) b.group('via-glob', [], ['user:*@example.com']) b.group('g1', nested=['via-glob'], owners='g2') b.group('g2', nested=['directly']) b.group('g3', nested=['g1']) self.assertEqual({ 0: ('user:[email protected]', {'IN': [1, 5]}), 1: ('user:*@example.com', {'IN': [2]}), 2: ('via-glob', {'IN': [3]}), 3: ('g1', {'IN': [4]}), 4: ('g3', {}), 5: ('directly', {'IN': [6]}), 6: ('g2', {'OWNS': [3]}), }, self.call(b.build(), 'user:[email protected]')) class PermissionsTest(test_case.TestCase): def test_happy_path(self): p1 = api.Permission('service.subject.verb') p2 = api.Permission('service.subject.verb') p3 = api.Permission('service.subject.another') self.assertEqual(p1, p2) self.assertTrue(p1 is p2) self.assertNotEqual(p1, p3) self.assertEqual('service.subject.verb', str(p1)) self.assertEqual("'service.subject.verb'", '%r' % (p1,)) def test_validation_errors(self): with self.assertRaises(TypeError): api.Permission(123) with self.assertRaises(TypeError): api.Permission(u'no.unicode.here') with self.assertRaises(ValueError): api.Permission('too.few') with self.assertRaises(ValueError): api.Permission('too.too.too.much') with self.assertRaises(ValueError): api.Permission('has..empty') class RealmStringsTest(test_case.TestCase): def test_happy_path(self): self.assertEqual(api.root_realm('proj'), 'proj:@root') self.assertEqual(api.root_realm(u'proj'), 'proj:@root') self.assertEqual(api.legacy_realm('proj'), 'proj:@legacy') self.assertEqual(api.legacy_realm(u'proj'), 'proj:@legacy') def test_validation_errors(self): with self.assertRaises(TypeError): api.root_realm(None) with self.assertRaises(TypeError): api.legacy_realm(None) with self.assertRaises(ValueError): api.root_realm('') with self.assertRaises(ValueError): api.legacy_realm('') def test_validate_realm_name(self): self.assertIsNone(api.validate_realm_name('proj:realm')) self.assertIsNone(api.validate_realm_name('proj:@root')) self.assertIsNone(api.validate_realm_name('proj:@legacy')) self.assertIsNone(api.validate_realm_name('@internal:realm')) self.assertIsNone(api.validate_realm_name('@internal:@root')) self.assertIsNone(api.validate_realm_name('@internal:@legacy')) def test_validate_realm_name_errors(self): with self.assertRaises(ValueError): self.assertFalse(api.validate_realm_name('realm')) with self.assertRaises(ValueError): self.assertFalse(api.validate_realm_name('proj:@invalid')) with self.assertRaises(ValueError): self.assertFalse(api.validate_realm_name('proj:re:alm')) with self.assertRaises(ValueError): self.assertFalse(api.validate_realm_name('@proj:realm')) PERM0 = api.Permission('luci.dev.testing0') PERM1 = api.Permission('luci.dev.testing1') PERM2 = api.Permission('luci.dev.testing2') ALL_PERMS = [PERM0, PERM1, PERM2] ID1 = model.Identity.from_bytes('user:[email protected]') ID2 = model.Identity.from_bytes('user:[email protected]') ID3 = model.Identity.from_bytes('user:[email protected]') ADMIN = model.Identity.from_bytes('user:[email protected]') class RealmsTest(test_case.TestCase): @staticmethod def auth_db(realms_map, groups=None, api_version=None): return api.AuthDB.from_proto( replication_state=model.AuthReplicationState(), auth_db=replication_pb2.AuthDB( groups=[ { 'name': name, 'members': [m.to_bytes() for m in members], 'created_by': 'user:[email protected]', 'modified_by': 'user:[email protected]', } for name, members in (groups or {}).items() ], realms={ 'api_version': api_version or realms.API_VERSION, 'permissions': [ {'name': p.name} for p in ALL_PERMS ], 'realms': [ { 'name': name, 'bindings': [ { 'permissions': [ ALL_PERMS.index(p) for p in perms ], 'principals': [ p if isinstance(p, str) else p.to_bytes() for p in principals ], } for perms, principals in sorted(bindings.items()) ], 'data': { 'enforce_in_service': ['data for %s' % name], }, } for name, bindings in sorted(realms_map.items()) ], }, ), additional_client_ids=[]) def setUp(self): super(RealmsTest, self).setUp() self.all_perms = {p.name: p for p in ALL_PERMS} self.mock(api, '_all_perms', self.all_perms) self.logs = {} for lvl in ('info', 'warning', 'error', 'exception'): self.logs[lvl] = [] def appender(lvl): # need to capture lvl in a separate closure return lambda msg, *args: self.logs[lvl].append(msg % args) self.mock(api.logging, lvl, appender(lvl)) def assert_logs_empty(self, lvl): self.assertEqual([], self.logs[lvl]) def assert_logs(self, lvl, msg): self.assertTrue( any(msg in m for m in self.logs[lvl]), '%r not in %r' % (msg, self.logs[lvl])) def assert_check(self, db, perm, realms, ident, outcome): self.assertEqual( outcome, db.has_permission(perm, realms, ident), 'has_permission(%r, %r, %r) is %s, but should be %s' % (perm, realms, ident.to_bytes(), not outcome, outcome)) def test_direct_inclusion_in_binding(self): db = self.auth_db({ 'proj:@root': {}, 'proj:realm': { (PERM0, PERM1): [ID1], (PERM0, PERM2): [ID2], }, 'proj:another/realm': { (PERM2,): [ID1, ID3], }, }) self.assert_check(db, PERM0, ['proj:realm'], ID1, True) self.assert_check(db, PERM1, ['proj:realm'], ID1, True) self.assert_check(db, PERM2, ['proj:realm'], ID1, False) self.assert_check(db, PERM0, ['proj:realm'], ID2, True) self.assert_check(db, PERM1, ['proj:realm'], ID2, False) self.assert_check(db, PERM2, ['proj:realm'], ID2, True) self.assert_check( db, PERM2, ['proj:realm', 'proj:another/realm'], ID1, True) self.assert_check( db, PERM2, ['proj:realm', 'proj:another/realm'], ID3, True) def test_inclusion_through_group(self): db = self.auth_db({ 'proj:@root': {}, 'proj:realm': { (PERM0, PERM1): ['group:empty', 'group:g1'], (PERM0, PERM2): ['group:empty', 'group:g2'], }, }, groups={'empty': [], 'g1': [ID1], 'g2': [ID2]}) self.assert_check(db, PERM0, ['proj:realm'], ID1, True) self.assert_check(db, PERM1, ['proj:realm'], ID1, True) self.assert_check(db, PERM2, ['proj:realm'], ID1, False) self.assert_check(db, PERM0, ['proj:realm'], ID2, True) self.assert_check(db, PERM1, ['proj:realm'], ID2, False) self.assert_check(db, PERM2, ['proj:realm'], ID2, True) def test_fallback_to_root(self): db = self.auth_db({'proj:@root': {(PERM0,): [ID1]}}) self.assert_check(db, PERM0, ['proj:@root'], ID1, True) self.assert_check(db, PERM0, ['proj:@root'], ID2, False) self.assert_logs_empty('warning') self.assert_check(db, PERM0, ['proj:realm'], ID1, True) self.assert_logs('warning', 'falling back to the root') self.assert_check(db, PERM0, ['proj:realm'], ID2, False) self.assert_check(db, PERM0, ['proj:another/realm'], ID1, True) def test_missing_project(self): db = self.auth_db({}) self.assert_check(db, PERM0, ['proj:@root'], ID1, False) self.assert_logs('warning', 'a non-existing root realm') self.logs['warning'] = [] self.assert_check(db, PERM0, ['proj:@legacy'], ID1, False) self.assert_logs('warning', 'doesn\'t have a root realm') self.logs['warning'] = [] self.assert_check(db, PERM0, ['proj:another/realm'], ID1, False) self.assert_logs('warning', 'doesn\'t have a root realm') self.logs['warning'] = [] def test_unknown_permission(self): unknown = api.Permission('luci.dev.unknown') self.all_perms[unknown.name] = unknown db = self.auth_db({'proj:realm': {(PERM0,): [ID1]}}) self.assert_logs('warning', 'is not in the AuthDB') self.assert_check(db, unknown, ['proj:realm'], ID1, False) self.assert_logs('warning', 'not present in the AuthDB') def test_realms_unavailable(self): empty = new_auth_db() with self.assertRaises(api.RealmsError): empty.has_permission('luci.dev.p1', ['proj:realm'], ID1) def test_bad_api_version(self): with self.assertRaises(api.RealmsError): self.auth_db({}, api_version=666) def test_bad_permission_type(self): db = self.auth_db({}) with self.assertRaises(TypeError): db.has_permission('luci.dev.p1', ['proj:realm'], ID1) def test_bad_realm_names(self): db = self.auth_db({}) for r in ['z', ':z', 'p:', 'blah blah:z', 'p:BLAH', 'p:@z', 'p:p:z']: with self.assertRaises(ValueError): db.has_permission(PERM0, [r], ID1) def test_has_permission_dryrun(self): rc = api.RequestCache() rc._auth_db = self.auth_db( {'proj:@root': {(PERM0,): [ID1]}}, groups={'admin': [ADMIN]}) self.mock(api, 'get_request_cache', lambda: rc) # Match. self.logs['info'] = [] api.has_permission_dryrun(PERM0, ['proj:@root'], True, ID1, 'admin', 'bug') self.assert_logs('info', "bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], " "'user:[email protected]'), authdb=0: match - ALLOW") self.logs['info'] = [] api.has_permission_dryrun(PERM1, ['proj:@root'], False, ID1, 'admin', 'bug') self.assert_logs('info', "bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], " "'user:[email protected]'), authdb=0: match - DENY") # Mismatch. self.logs['warning'] = [] api.has_permission_dryrun(PERM0, ['proj:@root'], False, ID1, 'admin', 'bug') self.assert_logs('warning', "bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], " "'user:[email protected]'), authdb=0: mismatch - got ALLOW, want DENY") self.logs['warning'] = [] api.has_permission_dryrun(PERM1, ['proj:@root'], True, ID1, 'admin', 'bug') self.assert_logs('warning', "bug: has_permission_dryrun('luci.dev.testing1', ['proj:@root'], " "'user:[email protected]'), authdb=0: mismatch - got DENY, want ALLOW") # Admin match. self.logs['info'] = [] api.has_permission_dryrun( PERM0, ['proj:@root'], True, ADMIN, 'admin', 'bug') self.assert_logs('info', "bug: has_permission_dryrun('luci.dev.testing0', ['proj:@root'], " "'user:[email protected]'), authdb=0: match - ADMIN_ALLOW") # Blow up. self.logs['exception'] = [] api.has_permission_dryrun(PERM1, ['@root'], True, ID1, 'admin', 'bug') self.assert_logs('exception', "bug: has_permission_dryrun('luci.dev.testing1', ['@root'], " "'user:[email protected]'), authdb=0: exception ValueError, want ALLOW") def test_realm_data(self): db = self.auth_db({'proj:@root': {}, 'proj:r': {}}) def realm_data(realm): r = db.get_realm_data(realm) return r.enforce_in_service[0] if r else None self.assertEqual('data for proj:r', realm_data('proj:r')) self.assertEqual('data for proj:@root', realm_data('proj:@root')) self.assertEqual('data for proj:@root', realm_data('proj:zzz')) self.assertEqual(None, realm_data('zzz:@root')) if __name__ == '__main__': if '-v' in sys.argv: unittest.TestCase.maxDiff = None unittest.main()
) model.AuthGroup(
test_fixed_ips_negative.py
# Copyright 2013 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib import exceptions as lib_exc from tempest.api.compute import base from tempest import config from tempest import test CONF = config.CONF class FixedIPsNegativeTestJson(base.BaseV2ComputeAdminTest): @classmethod def resource_setup(cls):
@test.attr(type=['negative', 'gate']) @test.services('network') def test_list_fixed_ip_details_with_non_admin_user(self): self.assertRaises(lib_exc.Unauthorized, self.non_admin_client.get_fixed_ip_details, self.ip) @test.attr(type=['negative', 'gate']) @test.services('network') def test_set_reserve_with_non_admin_user(self): body = {"reserve": "None"} self.assertRaises(lib_exc.Unauthorized, self.non_admin_client.reserve_fixed_ip, self.ip, body) @test.attr(type=['negative', 'gate']) @test.services('network') def test_set_unreserve_with_non_admin_user(self): body = {"unreserve": "None"} self.assertRaises(lib_exc.Unauthorized, self.non_admin_client.reserve_fixed_ip, self.ip, body) @test.attr(type=['negative', 'gate']) @test.services('network') def test_set_reserve_with_invalid_ip(self): # NOTE(maurosr): since this exercises the same code snippet, we do it # only for reserve action body = {"reserve": "None"} # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we # change the error code to BadRequest, both exceptions should be # accepted by tempest self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest), self.client.reserve_fixed_ip, "my.invalid.ip", body) @test.attr(type=['negative', 'gate']) @test.services('network') def test_fixed_ip_with_invalid_action(self): body = {"invalid_action": "None"} self.assertRaises(lib_exc.BadRequest, self.client.reserve_fixed_ip, self.ip, body)
super(FixedIPsNegativeTestJson, cls).resource_setup() if CONF.service_available.neutron: msg = ("%s skipped as neutron is available" % cls.__name__) raise cls.skipException(msg) cls.client = cls.os_adm.fixed_ips_client cls.non_admin_client = cls.fixed_ips_client server = cls.create_test_server(wait_until='ACTIVE') server = cls.servers_client.get_server(server['id']) for ip_set in server['addresses']: for ip in server['addresses'][ip_set]: if ip['OS-EXT-IPS:type'] == 'fixed': cls.ip = ip['addr'] break if cls.ip: break
transferQueueStandbyTaskExecutor.go
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package history import ( "context" "time" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/xdc" "go.temporal.io/server/service/history/consts" "go.temporal.io/server/service/history/queues" "go.temporal.io/server/service/history/shard" "go.temporal.io/server/service/history/tasks" "go.temporal.io/server/service/history/workflow" "go.temporal.io/server/service/worker/archiver" ) type ( transferQueueStandbyTaskExecutor struct { *transferQueueTaskExecutorBase clusterName string nDCHistoryResender xdc.NDCHistoryResender } ) func newTransferQueueStandbyTaskExecutor( shard shard.Context, workflowCache workflow.Cache, archivalClient archiver.Client, nDCHistoryResender xdc.NDCHistoryResender, logger log.Logger, clusterName string, matchingClient matchingservice.MatchingServiceClient, ) queues.Executor { return &transferQueueStandbyTaskExecutor{ transferQueueTaskExecutorBase: newTransferQueueTaskExecutorBase( shard, workflowCache, archivalClient, logger, matchingClient, ), clusterName: clusterName, nDCHistoryResender: nDCHistoryResender, } } func (t *transferQueueStandbyTaskExecutor) Execute( ctx context.Context, executable queues.Executable, ) (metrics.Scope, error) { task := executable.GetTask() scope := t.metricsClient.Scope( tasks.GetStandbyTransferTaskMetricsScope(task), getNamespaceTagByID(t.registry, task.GetNamespaceID()), ) switch task := task.(type) { case *tasks.ActivityTask: return scope, t.processActivityTask(ctx, task) case *tasks.WorkflowTask: return scope, t.processWorkflowTask(ctx, task) case *tasks.CancelExecutionTask: return scope, t.processCancelExecution(ctx, task) case *tasks.SignalExecutionTask: return scope, t.processSignalExecution(ctx, task) case *tasks.StartChildExecutionTask: return scope, t.processStartChildExecution(ctx, task) case *tasks.ResetWorkflowTask: // no reset needed for standby // TODO: add error logs return scope, nil case *tasks.CloseExecutionTask: return scope, t.processCloseExecution(ctx, task) case *tasks.DeleteExecutionTask: return scope, t.processDeleteExecutionTask(ctx, task) default: return scope, errUnknownTransferTask } } func (t *transferQueueStandbyTaskExecutor) processActivityTask( ctx context.Context, transferTask *tasks.ActivityTask, ) error { processTaskIfClosed := false actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { activityInfo, ok := mutableState.GetActivityInfo(transferTask.ScheduleID) if !ok { return nil, nil } ok = VerifyTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), activityInfo.Version, transferTask.Version, transferTask) if !ok { return nil, nil } if activityInfo.StartedId == common.EmptyEventID
return nil, nil } return t.processTransfer( ctx, processTaskIfClosed, transferTask, actionFn, getStandbyPostActionFn( transferTask, t.getCurrentTime, t.config.StandbyTaskMissingEventsResendDelay(), t.config.StandbyTaskMissingEventsDiscardDelay(), t.fetchHistoryFromRemote, t.pushActivity, ), ) } func (t *transferQueueStandbyTaskExecutor) processWorkflowTask( ctx context.Context, transferTask *tasks.WorkflowTask, ) error { processTaskIfClosed := false actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { wtInfo, ok := mutableState.GetWorkflowTaskInfo(transferTask.ScheduleID) if !ok { return nil, nil } executionInfo := mutableState.GetExecutionInfo() taskQueue := &taskqueuepb.TaskQueue{ // at standby, always use original task queue, disregards the task.TaskQueue which could be sticky Name: mutableState.GetExecutionInfo().TaskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL, } workflowRunTimeout := timestamp.DurationValue(executionInfo.WorkflowRunTimeout) taskScheduleToStartTimeoutSeconds := int64(workflowRunTimeout.Round(time.Second).Seconds()) if mutableState.GetExecutionInfo().TaskQueue != transferTask.TaskQueue { // Experimental: try to push sticky task as regular task with sticky timeout as TTL. // workflow might be sticky before namespace become standby // there shall already be a schedule_to_start timer created taskScheduleToStartTimeoutSeconds = int64(timestamp.DurationValue(executionInfo.StickyScheduleToStartTimeout).Seconds()) } ok = VerifyTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), wtInfo.Version, transferTask.Version, transferTask) if !ok { return nil, nil } if wtInfo.StartedID == common.EmptyEventID { return newWorkflowTaskPostActionInfo( mutableState, taskScheduleToStartTimeoutSeconds, *taskQueue, ) } return nil, nil } return t.processTransfer( ctx, processTaskIfClosed, transferTask, actionFn, getStandbyPostActionFn( transferTask, t.getCurrentTime, t.config.StandbyTaskMissingEventsResendDelay(), t.config.StandbyTaskMissingEventsDiscardDelay(), t.fetchHistoryFromRemote, t.pushWorkflowTask, ), ) } func (t *transferQueueStandbyTaskExecutor) processCloseExecution( ctx context.Context, transferTask *tasks.CloseExecutionTask, ) error { processTaskIfClosed := true actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { if mutableState.IsWorkflowExecutionRunning() { // this can happen if workflow is reset. return nil, nil } completionEvent, err := mutableState.GetCompletionEvent(ctx) if err != nil { return nil, err } wfCloseTime, err := mutableState.GetWorkflowCloseTime(ctx) if err != nil { return nil, err } executionInfo := mutableState.GetExecutionInfo() executionState := mutableState.GetExecutionState() workflowTypeName := executionInfo.WorkflowTypeName workflowStatus := executionState.Status workflowHistoryLength := mutableState.GetNextEventID() - 1 workflowStartTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetStartTime()) workflowExecutionTime := timestamp.TimeValue(mutableState.GetExecutionInfo().GetExecutionTime()) visibilityMemo := getWorkflowMemo(executionInfo.Memo) searchAttr := getSearchAttributes(executionInfo.SearchAttributes) lastWriteVersion, err := mutableState.GetLastWriteVersion() if err != nil { return nil, err } ok := VerifyTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), lastWriteVersion, transferTask.Version, transferTask) if !ok { return nil, nil } if err := t.archiveVisibility( ctx, namespace.ID(transferTask.NamespaceID), transferTask.WorkflowID, transferTask.RunID, workflowTypeName, workflowStartTime, workflowExecutionTime, timestamp.TimeValue(wfCloseTime), workflowStatus, workflowHistoryLength, visibilityMemo, searchAttr, ); err != nil { return nil, err } // verify if parent got the completion event verifyCompletionRecorded := mutableState.HasParentExecution() && executionInfo.NewExecutionRunId == "" && !IsTerminatedByResetter(completionEvent) if verifyCompletionRecorded { _, err := t.historyClient.VerifyChildExecutionCompletionRecorded(ctx, &historyservice.VerifyChildExecutionCompletionRecordedRequest{ NamespaceId: executionInfo.ParentNamespaceId, ParentExecution: &commonpb.WorkflowExecution{ WorkflowId: executionInfo.ParentWorkflowId, RunId: executionInfo.ParentRunId, }, ChildExecution: &commonpb.WorkflowExecution{ WorkflowId: transferTask.WorkflowID, RunId: transferTask.RunID, }, ParentInitiatedId: executionInfo.ParentInitiatedId, ParentInitiatedVersion: executionInfo.ParentInitiatedVersion, Clock: executionInfo.ParentClock, }) switch err.(type) { case nil, *serviceerror.NotFound, *serviceerror.NamespaceNotFound: return nil, nil case *serviceerror.WorkflowNotReady: return verifyChildCompletionRecordedInfo, nil default: return nil, err } } return nil, nil } return t.processTransfer( ctx, processTaskIfClosed, transferTask, actionFn, getStandbyPostActionFn( transferTask, t.getCurrentTime, t.config.StandbyTaskMissingEventsResendDelay(), t.config.StandbyTaskMissingEventsDiscardDelay(), standbyTaskPostActionNoOp, standbyTransferTaskPostActionTaskDiscarded, ), ) } func (t *transferQueueStandbyTaskExecutor) processCancelExecution( ctx context.Context, transferTask *tasks.CancelExecutionTask, ) error { processTaskIfClosed := false actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { requestCancelInfo, ok := mutableState.GetRequestCancelInfo(transferTask.InitiatedID) if !ok { return nil, nil } ok = VerifyTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), requestCancelInfo.Version, transferTask.Version, transferTask) if !ok { return nil, nil } return getHistoryResendInfo(mutableState) } return t.processTransfer( ctx, processTaskIfClosed, transferTask, actionFn, getStandbyPostActionFn( transferTask, t.getCurrentTime, t.config.StandbyTaskMissingEventsResendDelay(), t.config.StandbyTaskMissingEventsDiscardDelay(), t.fetchHistoryFromRemote, standbyTransferTaskPostActionTaskDiscarded, ), ) } func (t *transferQueueStandbyTaskExecutor) processSignalExecution( ctx context.Context, transferTask *tasks.SignalExecutionTask, ) error { processTaskIfClosed := false actionFn := func(_ context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { signalInfo, ok := mutableState.GetSignalInfo(transferTask.InitiatedID) if !ok { return nil, nil } ok = VerifyTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), signalInfo.Version, transferTask.Version, transferTask) if !ok { return nil, nil } return getHistoryResendInfo(mutableState) } return t.processTransfer( ctx, processTaskIfClosed, transferTask, actionFn, getStandbyPostActionFn( transferTask, t.getCurrentTime, t.config.StandbyTaskMissingEventsResendDelay(), t.config.StandbyTaskMissingEventsDiscardDelay(), t.fetchHistoryFromRemote, standbyTransferTaskPostActionTaskDiscarded, ), ) } func (t *transferQueueStandbyTaskExecutor) processStartChildExecution( ctx context.Context, transferTask *tasks.StartChildExecutionTask, ) error { processTaskIfClosed := true actionFn := func(ctx context.Context, wfContext workflow.Context, mutableState workflow.MutableState) (interface{}, error) { childWorkflowInfo, ok := mutableState.GetChildExecutionInfo(transferTask.InitiatedID) if !ok { return nil, nil } ok = VerifyTaskVersion(t.shard, t.logger, mutableState.GetNamespaceEntry(), childWorkflowInfo.Version, transferTask.Version, transferTask) if !ok { return nil, nil } workflowClosed := !mutableState.IsWorkflowExecutionRunning() childStarted := childWorkflowInfo.StartedId != common.EmptyEventID childAbandon := childWorkflowInfo.ParentClosePolicy == enumspb.PARENT_CLOSE_POLICY_ABANDON if workflowClosed && !(childStarted && childAbandon) { // NOTE: ideally for workflowClosed, child not started, parent close policy is abandon case, // we should continue to start the child workflow in active cluster, so standby logic also need to // perform the verification. However, we can't do that due to some technial reasons. // Please check the comments in processStartChildExecution in transferQueueActiveTaskExecutor.go // for details. return nil, nil } if !childStarted { historyResendInfo, err := getHistoryResendInfo(mutableState) if err != nil { return nil, err } return &startChildExecutionPostActionInfo{ historyResendInfo: historyResendInfo, }, nil } _, err := t.historyClient.VerifyFirstWorkflowTaskScheduled(ctx, &historyservice.VerifyFirstWorkflowTaskScheduledRequest{ NamespaceId: transferTask.TargetNamespaceID, WorkflowExecution: &commonpb.WorkflowExecution{ WorkflowId: childWorkflowInfo.StartedWorkflowId, RunId: childWorkflowInfo.StartedRunId, }, Clock: childWorkflowInfo.Clock, }) switch err.(type) { case nil, *serviceerror.NamespaceNotFound: return nil, nil case *serviceerror.WorkflowNotReady: return &startChildExecutionPostActionInfo{}, nil default: return nil, err } } return t.processTransfer( ctx, processTaskIfClosed, transferTask, actionFn, getStandbyPostActionFn( transferTask, t.getCurrentTime, t.config.StandbyTaskMissingEventsResendDelay(), t.config.StandbyTaskMissingEventsDiscardDelay(), t.startChildExecutionResendPostAction, standbyTransferTaskPostActionTaskDiscarded, ), ) } func (t *transferQueueStandbyTaskExecutor) processTransfer( ctx context.Context, processTaskIfClosed bool, taskInfo tasks.Task, actionFn standbyActionFn, postActionFn standbyPostActionFn, ) (retError error) { ctx, cancel := context.WithTimeout(ctx, taskTimeout) defer cancel() weContext, release, err := getWorkflowExecutionContextForTask(ctx, t.cache, taskInfo) if err != nil { return err } defer func() { if retError == consts.ErrTaskRetry { release(nil) } else { release(retError) } }() mutableState, err := loadMutableStateForTransferTask(ctx, weContext, taskInfo, t.metricsClient, t.logger) if err != nil || mutableState == nil { return err } if !mutableState.IsWorkflowExecutionRunning() && !processTaskIfClosed { // workflow already finished, no need to process transfer task. return nil } historyResendInfo, err := actionFn(ctx, weContext, mutableState) if err != nil { return err } // NOTE: do not access anything related mutable state after this lock release release(nil) return postActionFn(ctx, taskInfo, historyResendInfo, t.logger) } func (t *transferQueueStandbyTaskExecutor) pushActivity( ctx context.Context, task tasks.Task, postActionInfo interface{}, logger log.Logger, ) error { if postActionInfo == nil { return nil } pushActivityInfo := postActionInfo.(*activityTaskPostActionInfo) timeout := pushActivityInfo.activityTaskScheduleToStartTimeout return t.transferQueueTaskExecutorBase.pushActivity( ctx, task.(*tasks.ActivityTask), &timeout, ) } func (t *transferQueueStandbyTaskExecutor) pushWorkflowTask( ctx context.Context, task tasks.Task, postActionInfo interface{}, logger log.Logger, ) error { if postActionInfo == nil { return nil } pushwtInfo := postActionInfo.(*workflowTaskPostActionInfo) timeout := pushwtInfo.workflowTaskScheduleToStartTimeout return t.transferQueueTaskExecutorBase.pushWorkflowTask( ctx, task.(*tasks.WorkflowTask), &pushwtInfo.taskqueue, timestamp.DurationFromSeconds(timeout), ) } func (t *transferQueueStandbyTaskExecutor) startChildExecutionResendPostAction( ctx context.Context, taskInfo tasks.Task, postActionInfo interface{}, log log.Logger, ) error { if postActionInfo == nil { return nil } historyResendInfo := postActionInfo.(*startChildExecutionPostActionInfo).historyResendInfo if historyResendInfo != nil { return t.fetchHistoryFromRemote(ctx, taskInfo, historyResendInfo, log) } return standbyTaskPostActionNoOp(ctx, taskInfo, postActionInfo, log) } func (t *transferQueueStandbyTaskExecutor) fetchHistoryFromRemote( ctx context.Context, taskInfo tasks.Task, postActionInfo interface{}, logger log.Logger, ) error { var resendInfo *historyResendInfo switch postActionInfo := postActionInfo.(type) { case nil: return nil case *historyResendInfo: resendInfo = postActionInfo case *activityTaskPostActionInfo: resendInfo = postActionInfo.historyResendInfo case *workflowTaskPostActionInfo: resendInfo = postActionInfo.historyResendInfo default: logger.Fatal("unknown post action info for fetching remote history", tag.Value(postActionInfo)) } remoteClusterName, err := getRemoteClusterName( t.currentClusterName, t.registry, taskInfo.GetNamespaceID(), ) if err != nil { return err } t.metricsClient.IncCounter(metrics.HistoryRereplicationByTransferTaskScope, metrics.ClientRequests) stopwatch := t.metricsClient.StartTimer(metrics.HistoryRereplicationByTransferTaskScope, metrics.ClientLatency) defer stopwatch.Stop() adminClient, err := t.shard.GetRemoteAdminClient(remoteClusterName) if err != nil { return err } if resendInfo.lastEventID == common.EmptyEventID || resendInfo.lastEventVersion == common.EmptyVersion { err = serviceerror.NewInternal("transferQueueStandbyProcessor encountered empty historyResendInfo") } else { ns, err := t.registry.GetNamespaceByID(namespace.ID(taskInfo.GetNamespaceID())) if err != nil { return err } if err := refreshTasks( ctx, adminClient, ns.Name(), namespace.ID(taskInfo.GetNamespaceID()), taskInfo.GetWorkflowID(), taskInfo.GetRunID(), ); err != nil { if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { // Don't log NamespaceNotFound error because it is valid case, and return error to stop retry. return err } t.logger.Error("Error refresh tasks from remote.", tag.ShardID(t.shard.GetShardID()), tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), tag.WorkflowID(taskInfo.GetWorkflowID()), tag.WorkflowRunID(taskInfo.GetRunID()), tag.ClusterName(remoteClusterName), tag.Error(err)) } // NOTE: history resend may take long time and its timeout is currently // controlled by a separate dynamicconfig config: StandbyTaskReReplicationContextTimeout err = t.nDCHistoryResender.SendSingleWorkflowHistory( remoteClusterName, namespace.ID(taskInfo.GetNamespaceID()), taskInfo.GetWorkflowID(), taskInfo.GetRunID(), resendInfo.lastEventID, resendInfo.lastEventVersion, 0, 0, ) } if err != nil { if _, isNotFound := err.(*serviceerror.NamespaceNotFound); isNotFound { // Don't log NamespaceNotFound error because it is valid case, and return error to stop retry. return err } t.logger.Error("Error re-replicating history from remote.", tag.ShardID(t.shard.GetShardID()), tag.WorkflowNamespaceID(taskInfo.GetNamespaceID()), tag.WorkflowID(taskInfo.GetWorkflowID()), tag.WorkflowRunID(taskInfo.GetRunID()), tag.SourceCluster(remoteClusterName), tag.Error(err)) } // return error so task processing logic will retry return consts.ErrTaskRetry } func (t *transferQueueStandbyTaskExecutor) getCurrentTime() time.Time { return t.shard.GetCurrentTime(t.clusterName) }
{ return newActivityTaskPostActionInfo(mutableState, *activityInfo.ScheduleToStartTimeout) }
views.py
from rest_framework.views import APIView from rest_framework.response import Response from rest_framework import status from rest_framework import viewsets from profiles_api import serializers
def get(self, request, format=None): """Returns a list of APIView features""" an_apiview = [ 'Uses HTTP methods as function (get, post, patch, put, delete)', 'Is similar to a traditional Django View', 'Gives you the most control over your application logic', 'Is mapped manually to URLs', ] return Response({'message':'Hello!', 'an_apiview': an_apiview}) def post(self,request): """Create a hello message with our name""" serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') message = f'Hello {name}' return Response({'message':message}) else: return Response( serializer.errors, status=status.HTTP_400_BAD_REQUEST ) def put(self, request,pk=None): """Handle updating an obejct""" return Response({'method':'PUT'}) def patch(self, request, pk=None): """Handle a partial update of an object""" return Response({'method':'PATCH'}) def delete(self, request, pk=None): """"Delete an object""" return Response({'method':'DELETE'}) class HelloViewSet(viewsets.ViewSet): """Test API ViewSet""" serializer_class = serializers.HelloSerializer def list(self, request): """Return a hello messasge""" a_viewset = [ 'Uses actions (list, create, retreive, update, partial_update)', 'Automatically maps to URLs using Routers', 'Provides more functionality with less code', ] return Response ({'message':'Hello','a_viewset':a_viewset}) def create(self, request): """Create a new hello message""" serializer = self.serializer_class(data=request.data) if serializer.is_valid(): name = serializer.validated_data.get('name') message = f'Hello {name}!' return Response({'message':message}) else: return Response( serializer.errors, status=status.HTTP_400_BAD_REQUEST ) def retrieve(self, request, pk=None): """Handle getting an object by its ID""" return Response({'http_method': 'GET'}) def update(self, request, pk=None): """Handle updating an object""" return Response({'http_method': 'PUT'}) def partial_update(self,request, pk=None): """Handle updating part of an object""" return Response({'http_method': 'PATCH'}) def destroy(self,request, pk=None): """Handle removing an object""" return Response({'http_method': 'DELETE'})
class HelloApiView(APIView): """Test API View""" serializer_class = serializers.HelloSerializer
main.rs
extern crate serde; extern crate rltk; use rltk::{Console, GameState, Rltk, Point}; extern crate specs; use specs::prelude::*; use specs::saveload::{SimpleMarker, SimpleMarkerAllocator}; #[macro_use] extern crate specs_derive; mod components; pub use components::*; mod map; pub use map::*; mod player; use player::*; mod rect; pub use rect::Rect; mod visibility_system; use visibility_system::VisibilitySystem; mod monster_ai_system; use monster_ai_system::MonsterAI; mod map_indexing_system; use map_indexing_system::MapIndexingSystem; mod melee_combat_system; use melee_combat_system::MeleeCombatSystem; mod damage_system; use damage_system::DamageSystem; mod gui; mod gamelog; mod spawner; mod inventory_system; use inventory_system::{ ItemCollectionSystem, ItemUseSystem, ItemDropSystem, ItemRemoveSystem }; pub mod saveload_system; pub mod random_table; pub mod particle_system; pub mod hunger_system; pub mod rex_assets; pub mod trigger_system; pub mod map_builders; const SHOW_MAPGEN_VISUALIZER : bool = false; #[derive(PartialEq, Copy, Clone)] pub enum RunState { AwaitingInput, PreRun, PlayerTurn, MonsterTurn, ShowInventory, ShowDropItem, ShowTargeting { range : i32, item : Entity}, MainMenu { menu_selection : gui::MainMenuSelection }, SaveGame, NextLevel, ShowRemoveItem, GameOver, MagicMapReveal { row : i32 }, MapGeneration } pub struct State { pub ecs: World, mapgen_next_state : Option<RunState>, mapgen_history : Vec<Map>, mapgen_index : usize, mapgen_timer : f32 } impl State { fn run_systems(&mut self) { let mut vis = VisibilitySystem{}; vis.run_now(&self.ecs); let mut mob = MonsterAI{}; mob.run_now(&self.ecs); let mut mapindex = MapIndexingSystem{}; mapindex.run_now(&self.ecs); let mut triggers = trigger_system::TriggerSystem{}; triggers.run_now(&self.ecs); let mut melee = MeleeCombatSystem{}; melee.run_now(&self.ecs); let mut damage = DamageSystem{}; damage.run_now(&self.ecs); let mut pickup = ItemCollectionSystem{}; pickup.run_now(&self.ecs); let mut itemuse = ItemUseSystem{}; itemuse.run_now(&self.ecs); let mut drop_items = ItemDropSystem{}; drop_items.run_now(&self.ecs); let mut item_remove = ItemRemoveSystem{}; item_remove.run_now(&self.ecs); let mut hunger = hunger_system::HungerSystem{}; hunger.run_now(&self.ecs); let mut particles = particle_system::ParticleSpawnSystem{}; particles.run_now(&self.ecs); self.ecs.maintain(); } } impl GameState for State { fn tick(&mut self, ctx : &mut Rltk) { let mut newrunstate; { let runstate = self.ecs.fetch::<RunState>(); newrunstate = *runstate; } ctx.cls(); particle_system::cull_dead_particles(&mut self.ecs, ctx); match newrunstate { RunState::MainMenu{..} => {} RunState::GameOver{..} => {} _ => { draw_map(&self.ecs.fetch::<Map>(), ctx); let positions = self.ecs.read_storage::<Position>(); let renderables = self.ecs.read_storage::<Renderable>(); let hidden = self.ecs.read_storage::<Hidden>(); let map = self.ecs.fetch::<Map>(); let mut data = (&positions, &renderables, !&hidden).join().collect::<Vec<_>>(); data.sort_by(|&a, &b| b.1.render_order.cmp(&a.1.render_order) ); for (pos, render, _hidden) in data.iter() { let idx = map.xy_idx(pos.x, pos.y); if map.visible_tiles[idx] { ctx.set(pos.x, pos.y, render.fg, render.bg, render.glyph) } } gui::draw_ui(&self.ecs, ctx); } } match newrunstate { RunState::MapGeneration => { if !SHOW_MAPGEN_VISUALIZER { newrunstate = self.mapgen_next_state.unwrap(); } else { ctx.cls(); draw_map(&self.mapgen_history[self.mapgen_index], ctx); self.mapgen_timer += ctx.frame_time_ms; if self.mapgen_timer > 200.0 { self.mapgen_timer = 0.0; self.mapgen_index += 1; if self.mapgen_index >= self.mapgen_history.len() { //self.mapgen_index -= 1; newrunstate = self.mapgen_next_state.unwrap(); } } } } RunState::PreRun => { self.run_systems(); self.ecs.maintain(); newrunstate = RunState::AwaitingInput; } RunState::AwaitingInput => { newrunstate = player_input(self, ctx); } RunState::PlayerTurn => { self.run_systems(); self.ecs.maintain(); match *self.ecs.fetch::<RunState>() { RunState::MagicMapReveal{ .. } => newrunstate = RunState::MagicMapReveal{ row: 0 }, _ => newrunstate = RunState::MonsterTurn } } RunState::MonsterTurn => { self.run_systems(); self.ecs.maintain(); newrunstate = RunState::AwaitingInput; } RunState::ShowInventory => { let result = gui::show_inventory(self, ctx); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let item_entity = result.1.unwrap(); let is_ranged = self.ecs.read_storage::<Ranged>(); let is_item_ranged = is_ranged.get(item_entity); if let Some(is_item_ranged) = is_item_ranged { newrunstate = RunState::ShowTargeting{ range: is_item_ranged.range, item: item_entity }; } else { let mut intent = self.ecs.write_storage::<WantsToUseItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToUseItem{ item: item_entity, target: None }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } } RunState::ShowDropItem => { let result = gui::drop_item_menu(self, ctx); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let item_entity = result.1.unwrap(); let mut intent = self.ecs.write_storage::<WantsToDropItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToDropItem{ item: item_entity }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } RunState::ShowRemoveItem => { let result = gui::remove_item_menu(self, ctx); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let item_entity = result.1.unwrap(); let mut intent = self.ecs.write_storage::<WantsToRemoveItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToRemoveItem{ item: item_entity }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } RunState::ShowTargeting{range, item} => { let result = gui::ranged_target(self, ctx, range); match result.0 { gui::ItemMenuResult::Cancel => newrunstate = RunState::AwaitingInput, gui::ItemMenuResult::NoResponse => {} gui::ItemMenuResult::Selected => { let mut intent = self.ecs.write_storage::<WantsToUseItem>(); intent.insert(*self.ecs.fetch::<Entity>(), WantsToUseItem{ item, target: result.1 }).expect("Unable to insert intent"); newrunstate = RunState::PlayerTurn; } } } RunState::MainMenu{ .. } => { let result = gui::main_menu(self, ctx); match result { gui::MainMenuResult::NoSelection{ selected } => newrunstate = RunState::MainMenu{ menu_selection: selected }, gui::MainMenuResult::Selected{ selected } => { match selected { gui::MainMenuSelection::NewGame => newrunstate = RunState::PreRun, gui::MainMenuSelection::LoadGame => { saveload_system::load_game(&mut self.ecs); newrunstate = RunState::AwaitingInput; saveload_system::delete_save(); } gui::MainMenuSelection::Quit => { ::std::process::exit(0); } } } } } RunState::GameOver => { let result = gui::game_over(ctx); match result { gui::GameOverResult::NoSelection => {} gui::GameOverResult::QuitToMenu => { self.game_over_cleanup(); newrunstate = RunState::MapGeneration; self.mapgen_next_state = Some(RunState::MainMenu{ menu_selection: gui::MainMenuSelection::NewGame }); } } } RunState::SaveGame => { saveload_system::save_game(&mut self.ecs); newrunstate = RunState::MainMenu{ menu_selection : gui::MainMenuSelection::LoadGame }; } RunState::NextLevel => { self.goto_next_level(); self.mapgen_next_state = Some(RunState::PreRun); newrunstate = RunState::MapGeneration; } RunState::MagicMapReveal{row} => { let mut map = self.ecs.fetch_mut::<Map>(); for x in 0..MAPWIDTH { let idx = map.xy_idx(x as i32,row); map.revealed_tiles[idx] = true; } if row as usize == MAPHEIGHT-1 { newrunstate = RunState::MonsterTurn; } else { newrunstate = RunState::MagicMapReveal{ row: row+1 }; } } } { let mut runwriter = self.ecs.write_resource::<RunState>(); *runwriter = newrunstate; } damage_system::delete_the_dead(&mut self.ecs); } } impl State { fn entities_to_remove_on_level_change(&mut self) -> Vec<Entity> { let entities = self.ecs.entities(); let player = self.ecs.read_storage::<Player>(); let backpack = self.ecs.read_storage::<InBackpack>(); let player_entity = self.ecs.fetch::<Entity>(); let equipped = self.ecs.read_storage::<Equipped>(); let mut to_delete : Vec<Entity> = Vec::new(); for entity in entities.join() { let mut should_delete = true; // Don't delete the player let p = player.get(entity); if let Some(_p) = p { should_delete = false; } // Don't delete the player's equipment let bp = backpack.get(entity); if let Some(bp) = bp { if bp.owner == *player_entity { should_delete = false; } } let eq = equipped.get(entity); if let Some(eq) = eq { if eq.owner == *player_entity { should_delete = false; } } if should_delete { to_delete.push(entity); } } to_delete } fn goto_next_level(&mut self)
fn game_over_cleanup(&mut self) { // Delete everything let mut to_delete = Vec::new(); for e in self.ecs.entities().join() { to_delete.push(e); } for del in to_delete.iter() { self.ecs.delete_entity(*del).expect("Deletion failed"); } // Spawn a new player { let player_entity = spawner::player(&mut self.ecs, 0, 0); let mut player_entity_writer = self.ecs.write_resource::<Entity>(); *player_entity_writer = player_entity; } // Build a new map and place the player self.generate_world_map(1); } fn generate_world_map(&mut self, new_depth : i32) { self.mapgen_index = 0; self.mapgen_timer = 0.0; self.mapgen_history.clear(); let mut rng = self.ecs.write_resource::<rltk::RandomNumberGenerator>(); let mut builder = map_builders::random_builder(new_depth, &mut rng); builder.build_map(&mut rng); self.mapgen_history = builder.build_data.history.clone(); let player_start; { let mut worldmap_resource = self.ecs.write_resource::<Map>(); *worldmap_resource = builder.build_data.map.clone(); player_start = builder.build_data.starting_position.as_mut().unwrap().clone(); } // Spawn bad guys std::mem::drop(rng); builder.spawn_entities(&mut self.ecs); // Place the player and update resources let (player_x, player_y) = (player_start.x, player_start.y); let mut player_position = self.ecs.write_resource::<Point>(); *player_position = Point::new(player_x, player_y); let mut position_components = self.ecs.write_storage::<Position>(); let player_entity = self.ecs.fetch::<Entity>(); let player_pos_comp = position_components.get_mut(*player_entity); if let Some(player_pos_comp) = player_pos_comp { player_pos_comp.x = player_x; player_pos_comp.y = player_y; } // Mark the player's visibility as dirty let mut viewshed_components = self.ecs.write_storage::<Viewshed>(); let vs = viewshed_components.get_mut(*player_entity); if let Some(vs) = vs { vs.dirty = true; } } } fn main() { let mut context = Rltk::init_simple8x8(80, 50, "Hello Rust World", "resources"); context.with_post_scanlines(true); let mut gs = State { ecs: World::new(), mapgen_next_state : Some(RunState::MainMenu{ menu_selection: gui::MainMenuSelection::NewGame }), mapgen_index : 0, mapgen_history: Vec::new(), mapgen_timer: 0.0 }; gs.ecs.register::<Position>(); gs.ecs.register::<Renderable>(); gs.ecs.register::<Player>(); gs.ecs.register::<Viewshed>(); gs.ecs.register::<Monster>(); gs.ecs.register::<Name>(); gs.ecs.register::<BlocksTile>(); gs.ecs.register::<CombatStats>(); gs.ecs.register::<WantsToMelee>(); gs.ecs.register::<SufferDamage>(); gs.ecs.register::<Item>(); gs.ecs.register::<ProvidesHealing>(); gs.ecs.register::<InflictsDamage>(); gs.ecs.register::<AreaOfEffect>(); gs.ecs.register::<Consumable>(); gs.ecs.register::<Ranged>(); gs.ecs.register::<InBackpack>(); gs.ecs.register::<WantsToPickupItem>(); gs.ecs.register::<WantsToUseItem>(); gs.ecs.register::<WantsToDropItem>(); gs.ecs.register::<Confusion>(); gs.ecs.register::<SimpleMarker<SerializeMe>>(); gs.ecs.register::<SerializationHelper>(); gs.ecs.register::<Equippable>(); gs.ecs.register::<Equipped>(); gs.ecs.register::<MeleePowerBonus>(); gs.ecs.register::<DefenseBonus>(); gs.ecs.register::<WantsToRemoveItem>(); gs.ecs.register::<ParticleLifetime>(); gs.ecs.register::<HungerClock>(); gs.ecs.register::<ProvidesFood>(); gs.ecs.register::<MagicMapper>(); gs.ecs.register::<Hidden>(); gs.ecs.register::<EntryTrigger>(); gs.ecs.register::<EntityMoved>(); gs.ecs.register::<SingleActivation>(); gs.ecs.register::<BlocksVisibility>(); gs.ecs.register::<Door>(); gs.ecs.insert(SimpleMarkerAllocator::<SerializeMe>::new()); gs.ecs.insert(Map::new(1)); gs.ecs.insert(Point::new(0, 0)); gs.ecs.insert(rltk::RandomNumberGenerator::new()); let player_entity = spawner::player(&mut gs.ecs, 0, 0); gs.ecs.insert(player_entity); gs.ecs.insert(RunState::MapGeneration{} ); gs.ecs.insert(gamelog::GameLog{ entries : vec!["Welcome to Rusty Roguelike".to_string()] }); gs.ecs.insert(particle_system::ParticleBuilder::new()); gs.ecs.insert(rex_assets::RexAssets::new()); gs.generate_world_map(1); rltk::main_loop(context, gs); }
{ // Delete entities that aren't the player or his/her equipment let to_delete = self.entities_to_remove_on_level_change(); for target in to_delete { self.ecs.delete_entity(target).expect("Unable to delete entity"); } // Build a new map and place the player let current_depth; { let worldmap_resource = self.ecs.fetch::<Map>(); current_depth = worldmap_resource.depth; } self.generate_world_map(current_depth + 1); // Notify the player and give them some health let player_entity = self.ecs.fetch::<Entity>(); let mut gamelog = self.ecs.fetch_mut::<gamelog::GameLog>(); gamelog.entries.insert(0, "You descend to the next level, and take a moment to heal.".to_string()); let mut player_health_store = self.ecs.write_storage::<CombatStats>(); let player_health = player_health_store.get_mut(*player_entity); if let Some(player_health) = player_health { player_health.hp = i32::max(player_health.hp, player_health.max_hp / 2); } }
mpm_sample.py
def mpm_sample():
print("hello, I am 'mpm_sample.py'.")
shellpattern.py
import os import re def translate(pat, match_end=r"\Z"):
"""Translate a shell-style pattern to a regular expression. The pattern may include ``**<sep>`` (<sep> stands for the platform-specific path separator; "/" on POSIX systems) for matching zero or more directory levels and "*" for matching zero or more arbitrary characters with the exception of any path separator. Wrap meta-characters in brackets for a literal match (i.e. "[?]" to match the literal character "?"). Using match_end=regex one can give a regular expression that is used to match after the regex that is generated from the pattern. The default is to match the end of the string. This function is derived from the "fnmatch" module distributed with the Python standard library. Copyright (C) 2001-2016 Python Software Foundation. All rights reserved. TODO: support {alt1,alt2} shell-style alternatives """ sep = os.path.sep n = len(pat) i = 0 res = "" while i < n: c = pat[i] i += 1 if c == "*": if i + 1 < n and pat[i] == "*" and pat[i + 1] == sep: # **/ == wildcard for 0+ full (relative) directory names with trailing slashes; the forward slash stands # for the platform-specific path separator res += r"(?:[^\%s]*\%s)*" % (sep, sep) i += 2 else: # * == wildcard for name parts (does not cross path separator) res += r"[^\%s]*" % sep elif c == "?": # ? == any single character excluding path separator res += r"[^\%s]" % sep elif c == "[": j = i if j < n and pat[j] == "!": j += 1 if j < n and pat[j] == "]": j += 1 while j < n and pat[j] != "]": j += 1 if j >= n: res += "\\[" else: stuff = pat[i:j].replace("\\", "\\\\") i = j + 1 if stuff[0] == "!": stuff = "^" + stuff[1:] elif stuff[0] == "^": stuff = "\\" + stuff res += "[%s]" % stuff else: res += re.escape(c) return res + match_end + "(?ms)"
quiz-map.js
/** @jsx jsx */ import {jsx, css} from '@emotion/core'
<img src="/static/images/quiz/map-a.png" css={css` height: 180px; `} /> </p> ) export default QuizMap
const QuizMap = () => ( <p>