file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
graphics.py | import numpy as np
from matplotlib import pyplot as plt
from scipy import interpolate
from abc import ABCMeta, abstractmethod
from utils.sorting import bubble_sort
class ABSMethods(metaclass=ABCMeta):
def __init__(self, function):
self.function = function
@abstractmethod
def drawGraphic(self, vectors):
pass
class Lagranz(ABSMethods):
def __call__(self, *args, **kwargs):
call = self.function()
self.drawGraphic(call)
return call
def counter(self, x, y, xl):
|
def drawGraphic(self, vectors):
for vector in vectors:
vector = bubble_sort(vector)
x = vector[0]
y = vector[1]
xl = np.linspace(np.min(x), np.max(x))
yl = self.counter(x, y, xl)
plt.scatter(x, y)
plt.plot(xl, yl)
plt.xlabel('x')
plt.ylabel('y')
plt.title("Lagranz Method")
plt.show()
class InterpolationLinear(ABSMethods):
def __call__(self, *args, **kwargs):
call = self.function()
self.drawGraphic(call)
return call
def counter(self, x, y, xl):
yx = 0
for i in range(len(x)):
if x[i - 1] <= xl <= x[i]:
yp = y[i] - y[i - 1]
xp = x[i] - x[i - 1]
yx = y[i] + ((yp / xp) * (xl - x[i]))
break
return yx
def drawGraphic(self, vectors):
for vector in vectors:
vector = bubble_sort(vector)
x = vector[0]
y = vector[1]
yl = [self.counter(x, y, i) for i in x]
plt.scatter(x, y)
plt.plot(x, yl)
plt.xlabel('x')
plt.ylabel('y')
plt.title("Piecewise linear interpolation Method")
plt.show()
class InterpolationParabolic(ABSMethods):
def __call__(self, *args, **kwargs):
call = self.function()
self.drawGraphic(call)
return call
def counter(self, x, y, t):
z = 0
for i in range(len(x) - 1):
if x[i] <= t <= x[i + 1]:
M = np.array(
[[x[i - 1] ** 2, x[i - 1], 1], [x[i] ** 2, x[i], 1], [x[i + 1] ** 2, x[i + 1], 1]])
v = np.array([y[i - 1], y[i], y[i + 1]])
solve = np.linalg.solve(M, v)
z = solve[0] * t ** 2 + solve[1] * t + solve[2]
i += 1
return z
def drawGraphic(self, vectors):
for vector in vectors:
vector = bubble_sort(vector)
x = vector[0]
y = vector[1]
plt.scatter(x, y)
xnew = np.linspace(np.min(x), np.max(x), 10000)
ynew = [self.counter(x, y, i) for i in xnew]
plt.plot(xnew, ynew)
plt.xlabel('x')
plt.ylabel('y')
plt.title("Piecewise parabolic interpolation Method")
plt.show()
class InterpolationSpline(ABSMethods):
def __call__(self, *args, **kwargs):
call = self.function()
self.drawGraphic(call)
return call
def counter(self, x, y):
tck = interpolate.splrep(x, y, s=0)
xl = np.linspace(np.min(x), np.max(x))
yl = interpolate.splev(xl, tck, der=0)
return xl, yl
def drawGraphic(self, vectors):
for vector in vectors:
vector = bubble_sort(vector)
x = vector[0]
y = vector[1]
xl, yl = self.counter(x, y)
plt.scatter(x, y)
plt.plot(xl, yl)
plt.xlabel('x')
plt.ylabel('y')
plt.title("Spline interpolation Method")
plt.show()
class Graphics(ABSMethods):
def __call__(self, *args, **kwargs):
# call = self.function()
# print(call)
call = 0
self.drawGraphic(call)
return call
def drawGraphic(self, call):
print("\n1 - Lagranz\n2 - Linear\n3 - Parabolic\n4 - Spline")
command = int(input("Введите номер задания (из методички):"))
if command == 1:
meth = Lagranz(self.function)
meth()
elif command == 2:
meth = InterpolationLinear(self.function)
meth()
elif command == 3:
meth = InterpolationParabolic(self.function)
meth()
elif command == 4:
meth = InterpolationSpline(self.function)
meth()
else:
print("Invalid command")
| z = 0
for j in range(len(y)):
p1 = 1
p2 = 1
for i in range(len(x)):
if i == j:
p1 = p1 * 1
p2 = p2 * 1
else:
p1 = p1 * (xl - x[i])
p2 = p2 * (x[j] - x[i])
z = z + y[j] * p1 / p2
return z |
console.py | """
Console Module display message of a dialog
"""
import wx
import sys
from sas.sascalc.dataloader.loader import Loader
_BOX_WIDTH = 60
CONSOLE_WIDTH = 340
CONSOLE_HEIGHT = 240
if sys.platform.count("win32") > 0:
_STATICBOX_WIDTH = 450
PANEL_WIDTH = 500
PANEL_HEIGHT = 550
FONT_VARIANT = 0
else:
_STATICBOX_WIDTH = 480
PANEL_WIDTH = 530
PANEL_HEIGHT = 560
FONT_VARIANT = 1
class ConsoleDialog(wx.Dialog):
"""
Data summary dialog
"""
def __init__(self, parent=None, manager=None, data=None,
title="Data Summary", size=(PANEL_WIDTH, PANEL_HEIGHT)):
wx.Dialog.__init__(self, parent=parent, title=title, size=size)
self.parent = parent
self._manager = manager
self._data = data
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.msg_txt = wx.TextCtrl(self, size=(PANEL_WIDTH - 40,
PANEL_HEIGHT - 60),
style=wx.TE_MULTILINE)
self.msg_txt.SetEditable(False)
self.msg_txt.SetValue('No message available')
self.sizer.Add(self.msg_txt, 1, wx.EXPAND | wx.ALL, 10)
if self._data is not None:
self.set_message(msg=self._data.__str__())
self.SetSizer(self.sizer)
| Set the manager of this window
"""
self._manager = manager
def set_message(self, msg=""):
"""
Display the message received
"""
self.msg_txt.SetValue(str(msg))
if __name__ == "__main__":
app = wx.App()
# Instantiate a loader
loader = Loader()
# Load data
test_data = loader.load("MAR07232_rest.ASC")
dlg = ConsoleDialog(data=test_data)
dlg.ShowModal()
app.MainLoop() | def set_manager(self, manager):
""" |
monitoring-gen.go | // Copyright 2019 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package monitoring provides access to the Stackdriver Monitoring API.
//
// This package is DEPRECATED. Use package cloud.google.com/go/monitoring/apiv3 instead.
//
// For product documentation, see: https://cloud.google.com/monitoring/api/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/monitoring/v3"
// ...
// ctx := context.Background()
// monitoringService, err := monitoring.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes:
//
// monitoringService, err := monitoring.NewService(ctx, option.WithScopes(monitoring.MonitoringWriteScope))
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// monitoringService, err := monitoring.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// monitoringService, err := monitoring.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package monitoring // import "google.golang.org/api/monitoring/v3"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
const apiId = "monitoring:v3"
const apiName = "monitoring"
const apiVersion = "v3"
const basePath = "https://monitoring.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// View and manage your data across Google Cloud Platform services
CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
// View and write monitoring data for all of your Google and third-party
// Cloud and API projects
MonitoringScope = "https://www.googleapis.com/auth/monitoring"
// View monitoring data for all of your Google Cloud and third-party
// projects
MonitoringReadScope = "https://www.googleapis.com/auth/monitoring.read"
// Publish metric data to your Google Cloud projects
MonitoringWriteScope = "https://www.googleapis.com/auth/monitoring.write"
)
// NewService creates a new APIService.
func NewService(ctx context.Context, opts ...option.ClientOption) (*APIService, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/monitoring.read",
"https://www.googleapis.com/auth/monitoring.write",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new APIService. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*APIService, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &APIService{client: client, BasePath: basePath}
s.Projects = NewProjectsService(s)
s.Services = NewServicesService(s)
s.UptimeCheckIps = NewUptimeCheckIpsService(s)
return s, nil
}
type APIService struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Projects *ProjectsService
Services *ServicesService
UptimeCheckIps *UptimeCheckIpsService
}
func (s *APIService) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewProjectsService(s *APIService) *ProjectsService {
rs := &ProjectsService{s: s}
rs.AlertPolicies = NewProjectsAlertPoliciesService(s)
rs.CollectdTimeSeries = NewProjectsCollectdTimeSeriesService(s)
rs.Groups = NewProjectsGroupsService(s)
rs.MetricDescriptors = NewProjectsMetricDescriptorsService(s)
rs.MonitoredResourceDescriptors = NewProjectsMonitoredResourceDescriptorsService(s)
rs.NotificationChannelDescriptors = NewProjectsNotificationChannelDescriptorsService(s)
rs.NotificationChannels = NewProjectsNotificationChannelsService(s)
rs.TimeSeries = NewProjectsTimeSeriesService(s)
rs.UptimeCheckConfigs = NewProjectsUptimeCheckConfigsService(s)
return rs
}
type ProjectsService struct {
s *APIService
AlertPolicies *ProjectsAlertPoliciesService
CollectdTimeSeries *ProjectsCollectdTimeSeriesService
Groups *ProjectsGroupsService
MetricDescriptors *ProjectsMetricDescriptorsService
MonitoredResourceDescriptors *ProjectsMonitoredResourceDescriptorsService
NotificationChannelDescriptors *ProjectsNotificationChannelDescriptorsService
NotificationChannels *ProjectsNotificationChannelsService
TimeSeries *ProjectsTimeSeriesService
UptimeCheckConfigs *ProjectsUptimeCheckConfigsService
}
func NewProjectsAlertPoliciesService(s *APIService) *ProjectsAlertPoliciesService {
rs := &ProjectsAlertPoliciesService{s: s}
return rs
}
type ProjectsAlertPoliciesService struct {
s *APIService
}
func NewProjectsCollectdTimeSeriesService(s *APIService) *ProjectsCollectdTimeSeriesService {
rs := &ProjectsCollectdTimeSeriesService{s: s}
return rs
}
type ProjectsCollectdTimeSeriesService struct {
s *APIService
}
func NewProjectsGroupsService(s *APIService) *ProjectsGroupsService {
rs := &ProjectsGroupsService{s: s}
rs.Members = NewProjectsGroupsMembersService(s)
return rs
}
type ProjectsGroupsService struct {
s *APIService
Members *ProjectsGroupsMembersService
}
func NewProjectsGroupsMembersService(s *APIService) *ProjectsGroupsMembersService {
rs := &ProjectsGroupsMembersService{s: s}
return rs
}
type ProjectsGroupsMembersService struct {
s *APIService
}
func NewProjectsMetricDescriptorsService(s *APIService) *ProjectsMetricDescriptorsService {
rs := &ProjectsMetricDescriptorsService{s: s}
return rs
}
type ProjectsMetricDescriptorsService struct {
s *APIService
}
func NewProjectsMonitoredResourceDescriptorsService(s *APIService) *ProjectsMonitoredResourceDescriptorsService {
rs := &ProjectsMonitoredResourceDescriptorsService{s: s}
return rs
}
type ProjectsMonitoredResourceDescriptorsService struct {
s *APIService
}
func NewProjectsNotificationChannelDescriptorsService(s *APIService) *ProjectsNotificationChannelDescriptorsService {
rs := &ProjectsNotificationChannelDescriptorsService{s: s}
return rs
}
type ProjectsNotificationChannelDescriptorsService struct {
s *APIService
}
func NewProjectsNotificationChannelsService(s *APIService) *ProjectsNotificationChannelsService {
rs := &ProjectsNotificationChannelsService{s: s}
return rs
}
type ProjectsNotificationChannelsService struct {
s *APIService
}
func NewProjectsTimeSeriesService(s *APIService) *ProjectsTimeSeriesService {
rs := &ProjectsTimeSeriesService{s: s}
return rs
}
type ProjectsTimeSeriesService struct {
s *APIService
}
func NewProjectsUptimeCheckConfigsService(s *APIService) *ProjectsUptimeCheckConfigsService {
rs := &ProjectsUptimeCheckConfigsService{s: s}
return rs
}
type ProjectsUptimeCheckConfigsService struct {
s *APIService
}
func NewServicesService(s *APIService) *ServicesService |
type ServicesService struct {
s *APIService
ServiceLevelObjectives *ServicesServiceLevelObjectivesService
}
func NewServicesServiceLevelObjectivesService(s *APIService) *ServicesServiceLevelObjectivesService {
rs := &ServicesServiceLevelObjectivesService{s: s}
return rs
}
type ServicesServiceLevelObjectivesService struct {
s *APIService
}
func NewUptimeCheckIpsService(s *APIService) *UptimeCheckIpsService {
rs := &UptimeCheckIpsService{s: s}
return rs
}
type UptimeCheckIpsService struct {
s *APIService
}
// Aggregation: Describes how to combine multiple time series to provide
// different views of the data. Aggregation consists of an alignment
// step on individual time series (alignment_period and
// per_series_aligner) followed by an optional reduction step of the
// data across the aligned time series (cross_series_reducer and
// group_by_fields). For more details, see Aggregation.
type Aggregation struct {
// AlignmentPeriod: The alignment period for per-time series alignment.
// If present, alignmentPeriod must be at least 60 seconds. After
// per-time series alignment, each time series will contain data points
// only on the period boundaries. If perSeriesAligner is not specified
// or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner
// is specified and does not equal ALIGN_NONE, then this field must be
// defined; otherwise an error is returned.
AlignmentPeriod string `json:"alignmentPeriod,omitempty"`
// CrossSeriesReducer: The approach to be used to combine time series.
// Not all reducer functions may be applied to all time series,
// depending on the metric type and the value type of the original time
// series. Reduction may change the metric type of value type of the
// time series.Time series data must be aligned in order to perform
// cross-time series reduction. If crossSeriesReducer is specified, then
// perSeriesAligner must be specified and not equal ALIGN_NONE and
// alignmentPeriod must be specified; otherwise, an error is returned.
//
// Possible values:
// "REDUCE_NONE" - No cross-time series reduction. The output of the
// aligner is returned.
// "REDUCE_MEAN" - Reduce by computing the mean across time series for
// each alignment period. This reducer is valid for delta and gauge
// metrics with numeric or distribution values. The value type of the
// output is DOUBLE.
// "REDUCE_MIN" - Reduce by computing the minimum across time series
// for each alignment period. This reducer is valid for delta and gauge
// metrics with numeric values. The value type of the output is the same
// as the value type of the input.
// "REDUCE_MAX" - Reduce by computing the maximum across time series
// for each alignment period. This reducer is valid for delta and gauge
// metrics with numeric values. The value type of the output is the same
// as the value type of the input.
// "REDUCE_SUM" - Reduce by computing the sum across time series for
// each alignment period. This reducer is valid for delta and gauge
// metrics with numeric and distribution values. The value type of the
// output is the same as the value type of the input.
// "REDUCE_STDDEV" - Reduce by computing the standard deviation across
// time series for each alignment period. This reducer is valid for
// delta and gauge metrics with numeric or distribution values. The
// value type of the output is DOUBLE.
// "REDUCE_COUNT" - Reduce by computing the count of data points
// across time series for each alignment period. This reducer is valid
// for delta and gauge metrics of numeric, Boolean, distribution, and
// string value type. The value type of the output is INT64.
// "REDUCE_COUNT_TRUE" - Reduce by computing the count of True-valued
// data points across time series for each alignment period. This
// reducer is valid for delta and gauge metrics of Boolean value type.
// The value type of the output is INT64.
// "REDUCE_COUNT_FALSE" - Reduce by computing the count of
// False-valued data points across time series for each alignment
// period. This reducer is valid for delta and gauge metrics of Boolean
// value type. The value type of the output is INT64.
// "REDUCE_FRACTION_TRUE" - Reduce by computing the fraction of
// True-valued data points across time series for each alignment period.
// This reducer is valid for delta and gauge metrics of Boolean value
// type. The output value is in the range 0, 1 and has value type
// DOUBLE.
// "REDUCE_PERCENTILE_99" - Reduce by computing 99th percentile of
// data points across time series for each alignment period. This
// reducer is valid for gauge and delta metrics of numeric and
// distribution type. The value of the output is DOUBLE
// "REDUCE_PERCENTILE_95" - Reduce by computing 95th percentile of
// data points across time series for each alignment period. This
// reducer is valid for gauge and delta metrics of numeric and
// distribution type. The value of the output is DOUBLE
// "REDUCE_PERCENTILE_50" - Reduce by computing 50th percentile of
// data points across time series for each alignment period. This
// reducer is valid for gauge and delta metrics of numeric and
// distribution type. The value of the output is DOUBLE
// "REDUCE_PERCENTILE_05" - Reduce by computing 5th percentile of data
// points across time series for each alignment period. This reducer is
// valid for gauge and delta metrics of numeric and distribution type.
// The value of the output is DOUBLE
CrossSeriesReducer string `json:"crossSeriesReducer,omitempty"`
// GroupByFields: The set of fields to preserve when crossSeriesReducer
// is specified. The groupByFields determine how the time series are
// partitioned into subsets prior to applying the aggregation function.
// Each subset contains time series that have the same value for each of
// the grouping fields. Each individual time series is a member of
// exactly one subset. The crossSeriesReducer is applied to each subset
// of time series. It is not possible to reduce across different
// resource types, so this field implicitly contains resource.type.
// Fields not specified in groupByFields are aggregated away. If
// groupByFields is not specified and all the time series have the same
// resource type, then the time series are aggregated into a single
// output time series. If crossSeriesReducer is not defined, this field
// is ignored.
GroupByFields []string `json:"groupByFields,omitempty"`
// PerSeriesAligner: The approach to be used to align individual time
// series. Not all alignment functions may be applied to all time
// series, depending on the metric type and value type of the original
// time series. Alignment may change the metric type or the value type
// of the time series.Time series data must be aligned in order to
// perform cross-time series reduction. If crossSeriesReducer is
// specified, then perSeriesAligner must be specified and not equal
// ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error
// is returned.
//
// Possible values:
// "ALIGN_NONE" - No alignment. Raw data is returned. Not valid if
// cross-time series reduction is requested. The value type of the
// result is the same as the value type of the input.
// "ALIGN_DELTA" - Align and convert to delta metric type. This
// alignment is valid for cumulative metrics and delta metrics. Aligning
// an existing delta metric to a delta metric requires that the
// alignment period be increased. The value type of the result is the
// same as the value type of the input.One can think of this aligner as
// a rate but without time units; that is, the output is conceptually
// (second_point - first_point).
// "ALIGN_RATE" - Align and convert to a rate. This alignment is valid
// for cumulative metrics and delta metrics with numeric values. The
// output is a gauge metric with value type DOUBLE.One can think of this
// aligner as conceptually providing the slope of the line that passes
// through the value at the start and end of the window. In other words,
// this is conceptually ((y1 - y0)/(t1 - t0)), and the output unit is
// one that has a "/time" dimension.If, by rate, you are looking for
// percentage change, see the ALIGN_PERCENT_CHANGE aligner option.
// "ALIGN_INTERPOLATE" - Align by interpolating between adjacent
// points around the period boundary. This alignment is valid for gauge
// metrics with numeric values. The value type of the result is the same
// as the value type of the input.
// "ALIGN_NEXT_OLDER" - Align by shifting the oldest data point before
// the period boundary to the boundary. This alignment is valid for
// gauge metrics. The value type of the result is the same as the value
// type of the input.
// "ALIGN_MIN" - Align time series via aggregation. The resulting data
// point in the alignment period is the minimum of all data points in
// the period. This alignment is valid for gauge and delta metrics with
// numeric values. The value type of the result is the same as the value
// type of the input.
// "ALIGN_MAX" - Align time series via aggregation. The resulting data
// point in the alignment period is the maximum of all data points in
// the period. This alignment is valid for gauge and delta metrics with
// numeric values. The value type of the result is the same as the value
// type of the input.
// "ALIGN_MEAN" - Align time series via aggregation. The resulting
// data point in the alignment period is the average or arithmetic mean
// of all data points in the period. This alignment is valid for gauge
// and delta metrics with numeric values. The value type of the output
// is DOUBLE.
// "ALIGN_COUNT" - Align time series via aggregation. The resulting
// data point in the alignment period is the count of all data points in
// the period. This alignment is valid for gauge and delta metrics with
// numeric or Boolean values. The value type of the output is INT64.
// "ALIGN_SUM" - Align time series via aggregation. The resulting data
// point in the alignment period is the sum of all data points in the
// period. This alignment is valid for gauge and delta metrics with
// numeric and distribution values. The value type of the output is the
// same as the value type of the input.
// "ALIGN_STDDEV" - Align time series via aggregation. The resulting
// data point in the alignment period is the standard deviation of all
// data points in the period. This alignment is valid for gauge and
// delta metrics with numeric values. The value type of the output is
// DOUBLE.
// "ALIGN_COUNT_TRUE" - Align time series via aggregation. The
// resulting data point in the alignment period is the count of
// True-valued data points in the period. This alignment is valid for
// gauge metrics with Boolean values. The value type of the output is
// INT64.
// "ALIGN_COUNT_FALSE" - Align time series via aggregation. The
// resulting data point in the alignment period is the count of
// False-valued data points in the period. This alignment is valid for
// gauge metrics with Boolean values. The value type of the output is
// INT64.
// "ALIGN_FRACTION_TRUE" - Align time series via aggregation. The
// resulting data point in the alignment period is the fraction of
// True-valued data points in the period. This alignment is valid for
// gauge metrics with Boolean values. The output value is in the range
// 0, 1 and has value type DOUBLE.
// "ALIGN_PERCENTILE_99" - Align time series via aggregation. The
// resulting data point in the alignment period is the 99th percentile
// of all data points in the period. This alignment is valid for gauge
// and delta metrics with distribution values. The output is a gauge
// metric with value type DOUBLE.
// "ALIGN_PERCENTILE_95" - Align time series via aggregation. The
// resulting data point in the alignment period is the 95th percentile
// of all data points in the period. This alignment is valid for gauge
// and delta metrics with distribution values. The output is a gauge
// metric with value type DOUBLE.
// "ALIGN_PERCENTILE_50" - Align time series via aggregation. The
// resulting data point in the alignment period is the 50th percentile
// of all data points in the period. This alignment is valid for gauge
// and delta metrics with distribution values. The output is a gauge
// metric with value type DOUBLE.
// "ALIGN_PERCENTILE_05" - Align time series via aggregation. The
// resulting data point in the alignment period is the 5th percentile of
// all data points in the period. This alignment is valid for gauge and
// delta metrics with distribution values. The output is a gauge metric
// with value type DOUBLE.
// "ALIGN_PERCENT_CHANGE" - Align and convert to a percentage change.
// This alignment is valid for gauge and delta metrics with numeric
// values. This alignment conceptually computes the equivalent of
// "((current - previous)/previous)*100" where previous value is
// determined based on the alignmentPeriod. In the event that previous
// is 0 the calculated value is infinity with the exception that if both
// (current - previous) and previous are 0 the calculated value is 0. A
// 10 minute moving mean is computed at each point of the time window
// prior to the above calculation to smooth the metric and prevent false
// positives from very short lived spikes. Only applicable for data that
// is >= 0. Any values < 0 are treated as no data. While delta metrics
// are accepted by this alignment special care should be taken that the
// values for the metric will always be positive. The output is a gauge
// metric with value type DOUBLE.
PerSeriesAligner string `json:"perSeriesAligner,omitempty"`
// ForceSendFields is a list of field names (e.g. "AlignmentPeriod") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AlignmentPeriod") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Aggregation) MarshalJSON() ([]byte, error) {
type NoMethod Aggregation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AlertPolicy: A description of the conditions under which some aspect
// of your system is considered to be "unhealthy" and the ways to notify
// people or services about this state. For an overview of alert
// policies, see Introduction to Alerting.
type AlertPolicy struct {
// Combiner: How to combine the results of multiple conditions to
// determine if an incident should be opened.
//
// Possible values:
// "COMBINE_UNSPECIFIED" - An unspecified combiner.
// "AND" - Combine conditions using the logical AND operator. An
// incident is created only if all conditions are met simultaneously.
// This combiner is satisfied if all conditions are met, even if they
// are met on completely different resources.
// "OR" - Combine conditions using the logical OR operator. An
// incident is created if any of the listed conditions is met.
// "AND_WITH_MATCHING_RESOURCE" - Combine conditions using logical AND
// operator, but unlike the regular AND option, an incident is created
// only if all conditions are met simultaneously on at least one
// resource.
Combiner string `json:"combiner,omitempty"`
// Conditions: A list of conditions for the policy. The conditions are
// combined by AND or OR according to the combiner field. If the
// combined conditions evaluate to true, then an incident is created. A
// policy can have from one to six conditions.
Conditions []*Condition `json:"conditions,omitempty"`
// CreationRecord: A read-only record of the creation of the alerting
// policy. If provided in a call to create or update, this field will be
// ignored.
CreationRecord *MutationRecord `json:"creationRecord,omitempty"`
// DisplayName: A short name or phrase used to identify the policy in
// dashboards, notifications, and incidents. To avoid confusion, don't
// use the same display name for multiple policies in the same project.
// The name is limited to 512 Unicode characters.
DisplayName string `json:"displayName,omitempty"`
// Documentation: Documentation that is included with notifications and
// incidents related to this policy. Best practice is for the
// documentation to include information to help responders understand,
// mitigate, escalate, and correct the underlying problems detected by
// the alerting policy. Notification channels that have limited capacity
// might not show this documentation.
Documentation *Documentation `json:"documentation,omitempty"`
// Enabled: Whether or not the policy is enabled. On write, the default
// interpretation if unset is that the policy is enabled. On read,
// clients should not make any assumption about the state if it has not
// been populated. The field should always be populated on List and Get
// operations, unless a field projection has been specified that strips
// it out.
Enabled bool `json:"enabled,omitempty"`
// MutationRecord: A read-only record of the most recent change to the
// alerting policy. If provided in a call to create or update, this
// field will be ignored.
MutationRecord *MutationRecord `json:"mutationRecord,omitempty"`
// Name: Required if the policy exists. The resource name for this
// policy. The syntax
// is:
// projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]
// [ALERT_POLIC
// Y_ID] is assigned by Stackdriver Monitoring when the policy is
// created. When calling the alertPolicies.create method, do not include
// the name field in the alerting policy passed as part of the request.
Name string `json:"name,omitempty"`
// NotificationChannels: Identifies the notification channels to which
// notifications should be sent when incidents are opened or closed or
// when new violations occur on an already opened incident. Each element
// of this array corresponds to the name field in each of the
// NotificationChannel objects that are returned from the
// ListNotificationChannels method. The syntax of the entries in this
// field is:
// projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
//
NotificationChannels []string `json:"notificationChannels,omitempty"`
// UserLabels: User-supplied key/value data to be used for organizing
// and identifying the AlertPolicy objects.The field can contain up to
// 64 entries. Each key and value is limited to 63 Unicode characters or
// 128 bytes, whichever is smaller. Labels and values can contain only
// lowercase letters, numerals, underscores, and dashes. Keys must begin
// with a letter.
UserLabels map[string]string `json:"userLabels,omitempty"`
// Validity: Read-only description of how the alert policy is invalid.
// OK if the alert policy is valid. If not OK, the alert policy will not
// generate incidents.
Validity *Status `json:"validity,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Combiner") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Combiner") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AlertPolicy) MarshalJSON() ([]byte, error) {
type NoMethod AlertPolicy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AppEngine: App Engine service. Learn more at
// https://cloud.google.com/appengine.
type AppEngine struct {
// ModuleId: The ID of the App Engine module underlying this service.
// Corresponds to the module_id resource label in the gae_app monitored
// resource:
// https://cloud.google.com/monitoring/api/resources#tag_gae_app
ModuleId string `json:"moduleId,omitempty"`
// ForceSendFields is a list of field names (e.g. "ModuleId") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ModuleId") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AppEngine) MarshalJSON() ([]byte, error) {
type NoMethod AppEngine
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AvailabilityCriteria: Future parameters for the availability SLI.
type AvailabilityCriteria struct {
}
// BasicAuthentication: The authentication parameters to provide to the
// specified resource or URL that requires a username and password.
// Currently, only Basic HTTP authentication
// (https://tools.ietf.org/html/rfc7617) is supported in Uptime checks.
type BasicAuthentication struct {
// Password: The password to use when authenticating with the HTTP
// server.
Password string `json:"password,omitempty"`
// Username: The username to use when authenticating with the HTTP
// server.
Username string `json:"username,omitempty"`
// ForceSendFields is a list of field names (e.g. "Password") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Password") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *BasicAuthentication) MarshalJSON() ([]byte, error) {
type NoMethod BasicAuthentication
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BasicSli: An SLI measuring performance on a well-known service type.
// Performance will be computed on the basis of pre-defined metrics. The
// type of the service_resource determines the metrics to use and the
// service_resource.labels and metric_labels are used to construct a
// monitoring filter to filter that metric down to just the data
// relevant to this service.
type BasicSli struct {
// Availability: Good service is defined to be the count of requests
// made to this service that return successfully.
Availability *AvailabilityCriteria `json:"availability,omitempty"`
// Latency: Good service is defined to be the count of requests made to
// this service that are fast enough with respect to latency.threshold.
Latency *LatencyCriteria `json:"latency,omitempty"`
// Location: OPTIONAL: The set of locations to which this SLI is
// relevant. Telemetry from other locations will not be used to
// calculate performance for this SLI. If omitted, this SLI applies to
// all locations in which the Service has activity. For service types
// that don't support breaking down by location, setting this field will
// result in an error.
Location []string `json:"location,omitempty"`
// Method: OPTIONAL: The set of RPCs to which this SLI is relevant.
// Telemetry from other methods will not be used to calculate
// performance for this SLI. If omitted, this SLI applies to all the
// Service's methods. For service types that don't support breaking down
// by method, setting this field will result in an error.
Method []string `json:"method,omitempty"`
// Version: OPTIONAL: The set of API versions to which this SLI is
// relevant. Telemetry from other API versions will not be used to
// calculate performance for this SLI. If omitted, this SLI applies to
// all API versions. For service types that don't support breaking down
// by version, setting this field will result in an error.
Version []string `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "Availability") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Availability") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *BasicSli) MarshalJSON() ([]byte, error) {
type NoMethod BasicSli
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// BucketOptions: BucketOptions describes the bucket boundaries used to
// create a histogram for the distribution. The buckets can be in a
// linear sequence, an exponential sequence, or each bucket can be
// specified explicitly. BucketOptions does not include the number of
// values in each bucket.A bucket has an inclusive lower bound and
// exclusive upper bound for the values that are counted for that
// bucket. The upper bound of a bucket must be strictly greater than the
// lower bound. The sequence of N buckets for a distribution consists of
// an underflow bucket (number 0), zero or more finite buckets (number 1
// through N - 2) and an overflow bucket (number N - 1). The buckets are
// contiguous: the lower bound of bucket i (i > 0) is the same as the
// upper bound of bucket i - 1. The buckets span the whole range of
// finite values: lower bound of the underflow bucket is -infinity and
// the upper bound of the overflow bucket is +infinity. The finite
// buckets are so-called because both bounds are finite.
type BucketOptions struct {
// ExplicitBuckets: The explicit buckets.
ExplicitBuckets *Explicit `json:"explicitBuckets,omitempty"`
// ExponentialBuckets: The exponential buckets.
ExponentialBuckets *Exponential `json:"exponentialBuckets,omitempty"`
// LinearBuckets: The linear bucket.
LinearBuckets *Linear `json:"linearBuckets,omitempty"`
// ForceSendFields is a list of field names (e.g. "ExplicitBuckets") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExplicitBuckets") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *BucketOptions) MarshalJSON() ([]byte, error) {
type NoMethod BucketOptions
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CloudEndpoints: Cloud Endpoints service. Learn more at
// https://cloud.google.com/endpoints.
type CloudEndpoints struct {
// Service: The name of the Cloud Endpoints service underlying this
// service. Corresponds to the service resource label in the api
// monitored resource:
// https://cloud.google.com/monitoring/api/resources#tag_api
Service string `json:"service,omitempty"`
// ForceSendFields is a list of field names (e.g. "Service") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Service") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CloudEndpoints) MarshalJSON() ([]byte, error) {
type NoMethod CloudEndpoints
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ClusterIstio: Istio service. Learn more at http://istio.io.
type ClusterIstio struct {
// ClusterName: The name of the Kubernetes cluster in which this Istio
// service is defined. Corresponds to the cluster_name resource label in
// k8s_cluster resources.
ClusterName string `json:"clusterName,omitempty"`
// Location: The location of the Kubernetes cluster in which this Istio
// service is defined. Corresponds to the location resource label in
// k8s_cluster resources.
Location string `json:"location,omitempty"`
// ServiceName: The name of the Istio service underlying this service.
// Corresponds to the destination_service_name metric label in Istio
// metrics.
ServiceName string `json:"serviceName,omitempty"`
// ServiceNamespace: The namespace of the Istio service underlying this
// service. Corresponds to the destination_service_namespace metric
// label in Istio metrics.
ServiceNamespace string `json:"serviceNamespace,omitempty"`
// ForceSendFields is a list of field names (e.g. "ClusterName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ClusterName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ClusterIstio) MarshalJSON() ([]byte, error) {
type NoMethod ClusterIstio
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CollectdPayload: A collection of data points sent from a
// collectd-based plugin. See the collectd documentation for more
// information.
type CollectdPayload struct {
// EndTime: The end time of the interval.
EndTime string `json:"endTime,omitempty"`
// Metadata: The measurement metadata. Example: "process_id" -> 12345
Metadata map[string]TypedValue `json:"metadata,omitempty"`
// Plugin: The name of the plugin. Example: "disk".
Plugin string `json:"plugin,omitempty"`
// PluginInstance: The instance name of the plugin Example: "hdcl".
PluginInstance string `json:"pluginInstance,omitempty"`
// StartTime: The start time of the interval.
StartTime string `json:"startTime,omitempty"`
// Type: The measurement type. Example: "memory".
Type string `json:"type,omitempty"`
// TypeInstance: The measurement type instance. Example: "used".
TypeInstance string `json:"typeInstance,omitempty"`
// Values: The measured values during this time interval. Each value
// must have a different dataSourceName.
Values []*CollectdValue `json:"values,omitempty"`
// ForceSendFields is a list of field names (e.g. "EndTime") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "EndTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CollectdPayload) MarshalJSON() ([]byte, error) {
type NoMethod CollectdPayload
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CollectdPayloadError: Describes the error status for payloads that
// were not written.
type CollectdPayloadError struct {
// Error: Records the error status for the payload. If this field is
// present, the partial errors for nested values won't be populated.
Error *Status `json:"error,omitempty"`
// Index: The zero-based index in
// CreateCollectdTimeSeriesRequest.collectd_payloads.
Index int64 `json:"index,omitempty"`
// ValueErrors: Records the error status for values that were not
// written due to an error.Failed payloads for which nothing is written
// will not include partial value errors.
ValueErrors []*CollectdValueError `json:"valueErrors,omitempty"`
// ForceSendFields is a list of field names (e.g. "Error") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Error") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CollectdPayloadError) MarshalJSON() ([]byte, error) {
type NoMethod CollectdPayloadError
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CollectdValue: A single data point from a collectd-based plugin.
type CollectdValue struct {
// DataSourceName: The data source for the collectd value. For example
// there are two data sources for network measurements: "rx" and "tx".
DataSourceName string `json:"dataSourceName,omitempty"`
// DataSourceType: The type of measurement.
//
// Possible values:
// "UNSPECIFIED_DATA_SOURCE_TYPE" - An unspecified data source type.
// This corresponds to
// google.api.MetricDescriptor.MetricKind.METRIC_KIND_UNSPECIFIED.
// "GAUGE" - An instantaneous measurement of a varying quantity. This
// corresponds to google.api.MetricDescriptor.MetricKind.GAUGE.
// "COUNTER" - A cumulative value over time. This corresponds to
// google.api.MetricDescriptor.MetricKind.CUMULATIVE.
// "DERIVE" - A rate of change of the measurement.
// "ABSOLUTE" - An amount of change since the last measurement
// interval. This corresponds to
// google.api.MetricDescriptor.MetricKind.DELTA.
DataSourceType string `json:"dataSourceType,omitempty"`
// Value: The measurement value.
Value *TypedValue `json:"value,omitempty"`
// ForceSendFields is a list of field names (e.g. "DataSourceName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DataSourceName") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *CollectdValue) MarshalJSON() ([]byte, error) {
type NoMethod CollectdValue
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CollectdValueError: Describes the error status for values that were
// not written.
type CollectdValueError struct {
// Error: Records the error status for the value.
Error *Status `json:"error,omitempty"`
// Index: The zero-based index in CollectdPayload.values within the
// parent CreateCollectdTimeSeriesRequest.collectd_payloads.
Index int64 `json:"index,omitempty"`
// ForceSendFields is a list of field names (e.g. "Error") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Error") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CollectdValueError) MarshalJSON() ([]byte, error) {
type NoMethod CollectdValueError
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Condition: A condition is a true/false test that determines when an
// alerting policy should open an incident. If a condition evaluates to
// true, it signifies that something is wrong.
type Condition struct {
// ConditionAbsent: A condition that checks that a time series continues
// to receive new data points.
ConditionAbsent *MetricAbsence `json:"conditionAbsent,omitempty"`
// ConditionThreshold: A condition that compares a time series against a
// threshold.
ConditionThreshold *MetricThreshold `json:"conditionThreshold,omitempty"`
// DisplayName: A short name or phrase used to identify the condition in
// dashboards, notifications, and incidents. To avoid confusion, don't
// use the same display name for multiple conditions in the same policy.
DisplayName string `json:"displayName,omitempty"`
// Name: Required if the condition exists. The unique resource name for
// this condition. Its syntax
// is:
// projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDIT
// ION_ID]
// [CONDITION_ID] is assigned by Stackdriver Monitoring when the
// condition is created as part of a new or updated alerting policy.When
// calling the alertPolicies.create method, do not include the name
// field in the conditions of the requested alerting policy. Stackdriver
// Monitoring creates the condition identifiers and includes them in the
// new policy.When calling the alertPolicies.update method to update a
// policy, including a condition name causes the existing condition to
// be updated. Conditions without names are added to the updated policy.
// Existing conditions are deleted if they are not updated.Best practice
// is to preserve [CONDITION_ID] if you make only small changes, such as
// those to condition thresholds, durations, or trigger values.
// Otherwise, treat the change as a new condition and let the existing
// condition be deleted.
Name string `json:"name,omitempty"`
// ForceSendFields is a list of field names (e.g. "ConditionAbsent") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ConditionAbsent") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Condition) MarshalJSON() ([]byte, error) {
type NoMethod Condition
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ContentMatcher: Optional. Used to perform content matching. This
// allows matching based on substrings and regular expressions, together
// with their negations. Only the first 4 MB of an HTTP or HTTPS
// check's response (and the first 1 MB of a TCP check's response)
// are examined for purposes of content matching.
type ContentMatcher struct {
// Content: String or regex content to match. Maximum 1024 bytes. An
// empty content string indicates no content matching is to be
// performed.
Content string `json:"content,omitempty"`
// Matcher: The type of content matcher that will be applied to the
// server output, compared to the content string when the check is run.
//
// Possible values:
// "CONTENT_MATCHER_OPTION_UNSPECIFIED" - No content matcher type
// specified (maintained for backward compatibility, but deprecated for
// future use). Treated as CONTAINS_STRING.
// "CONTAINS_STRING" - Selects substring matching. The match succeeds
// if the output contains the content string. This is the default value
// for checks without a matcher option, or where the value of matcher is
// CONTENT_MATCHER_OPTION_UNSPECIFIED.
// "NOT_CONTAINS_STRING" - Selects negation of substring matching. The
// match succeeds if the output does NOT contain the content string.
// "MATCHES_REGEX" - Selects regular-expression matching. The match
// succeeds of the output matches the regular expression specified in
// the content string.
// "NOT_MATCHES_REGEX" - Selects negation of regular-expression
// matching. The match succeeds if the output does NOT match the regular
// expression specified in the content string.
Matcher string `json:"matcher,omitempty"`
// ForceSendFields is a list of field names (e.g. "Content") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Content") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ContentMatcher) MarshalJSON() ([]byte, error) {
type NoMethod ContentMatcher
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CreateCollectdTimeSeriesRequest: The CreateCollectdTimeSeries
// request.
type CreateCollectdTimeSeriesRequest struct {
// CollectdPayloads: The collectd payloads representing the time series
// data. You must not include more than a single point for each time
// series, so no two payloads can have the same values for all of the
// fields plugin, plugin_instance, type, and type_instance.
CollectdPayloads []*CollectdPayload `json:"collectdPayloads,omitempty"`
// CollectdVersion: The version of collectd that collected the data.
// Example: "5.3.0-192.el6".
CollectdVersion string `json:"collectdVersion,omitempty"`
// Resource: The monitored resource associated with the time series.
Resource *MonitoredResource `json:"resource,omitempty"`
// ForceSendFields is a list of field names (e.g. "CollectdPayloads") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CollectdPayloads") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *CreateCollectdTimeSeriesRequest) MarshalJSON() ([]byte, error) {
type NoMethod CreateCollectdTimeSeriesRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CreateCollectdTimeSeriesResponse: The CreateCollectdTimeSeries
// response.
type CreateCollectdTimeSeriesResponse struct {
// PayloadErrors: Records the error status for points that were not
// written due to an error in the request.Failed requests for which
// nothing is written will return an error response instead. Requests
// where data points were rejected by the backend will set summary
// instead.
PayloadErrors []*CollectdPayloadError `json:"payloadErrors,omitempty"`
// Summary: Aggregate statistics from writing the payloads. This field
// is omitted if all points were successfully written, so that the
// response is empty. This is for backwards compatibility with clients
// that log errors on any non-empty response.
Summary *CreateTimeSeriesSummary `json:"summary,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "PayloadErrors") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "PayloadErrors") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CreateCollectdTimeSeriesResponse) MarshalJSON() ([]byte, error) {
type NoMethod CreateCollectdTimeSeriesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CreateTimeSeriesRequest: The CreateTimeSeries request.
type CreateTimeSeriesRequest struct {
// TimeSeries: The new data to be added to a list of time series. Adds
// at most one data point to each of several time series. The new data
// point must be more recent than any other point in its time series.
// Each TimeSeries value must fully specify a unique time series by
// supplying all label values for the metric and the monitored
// resource.The maximum number of TimeSeries objects per Create request
// is 200.
TimeSeries []*TimeSeries `json:"timeSeries,omitempty"`
// ForceSendFields is a list of field names (e.g. "TimeSeries") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "TimeSeries") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CreateTimeSeriesRequest) MarshalJSON() ([]byte, error) {
type NoMethod CreateTimeSeriesRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CreateTimeSeriesSummary: Summary of the result of a failed request to
// write data to a time series.
type CreateTimeSeriesSummary struct {
// Errors: The number of points that failed to be written. Order is not
// guaranteed.
Errors []*Error `json:"errors,omitempty"`
// SuccessPointCount: The number of points that were successfully
// written.
SuccessPointCount int64 `json:"successPointCount,omitempty"`
// TotalPointCount: The number of points in the request.
TotalPointCount int64 `json:"totalPointCount,omitempty"`
// ForceSendFields is a list of field names (e.g. "Errors") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Errors") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CreateTimeSeriesSummary) MarshalJSON() ([]byte, error) {
type NoMethod CreateTimeSeriesSummary
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Custom: Custom view of service telemetry. Currently a place-holder
// pending final design.
type Custom struct {
}
// Distribution: Distribution contains summary statistics for a
// population of values. It optionally contains a histogram representing
// the distribution of those values across a set of buckets.The summary
// statistics are the count, mean, sum of the squared deviation from the
// mean, the minimum, and the maximum of the set of population of
// values. The histogram is based on a sequence of buckets and gives a
// count of values that fall into each bucket. The boundaries of the
// buckets are given either explicitly or by formulas for buckets of
// fixed or exponentially increasing widths.Although it is not
// forbidden, it is generally a bad idea to include non-finite values
// (infinities or NaNs) in the population of values, as this will render
// the mean and sum_of_squared_deviation fields meaningless.
type Distribution struct {
// BucketCounts: Required in the Stackdriver Monitoring API v3. The
// values for each bucket specified in bucket_options. The sum of the
// values in bucketCounts must equal the value in the count field of the
// Distribution object. The order of the bucket counts follows the
// numbering schemes described for the three bucket types. The underflow
// bucket has number 0; the finite buckets, if any, have numbers 1
// through N-2; and the overflow bucket has number N-1. The size of
// bucket_counts must not be greater than N. If the size is less than N,
// then the remaining buckets are assigned values of zero.
BucketCounts googleapi.Int64s `json:"bucketCounts,omitempty"`
// BucketOptions: Required in the Stackdriver Monitoring API v3. Defines
// the histogram bucket boundaries.
BucketOptions *BucketOptions `json:"bucketOptions,omitempty"`
// Count: The number of values in the population. Must be non-negative.
// This value must equal the sum of the values in bucket_counts if a
// histogram is provided.
Count int64 `json:"count,omitempty,string"`
// Exemplars: Must be in increasing order of value field.
Exemplars []*Exemplar `json:"exemplars,omitempty"`
// Mean: The arithmetic mean of the values in the population. If count
// is zero then this field must be zero.
Mean float64 `json:"mean,omitempty"`
// Range: If specified, contains the range of the population values. The
// field must not be present if the count is zero. This field is
// presently ignored by the Stackdriver Monitoring API v3.
Range *Range `json:"range,omitempty"`
// SumOfSquaredDeviation: The sum of squared deviations from the mean of
// the values in the population. For values x_i this
// is:
// Sum[i=1..n]((x_i - mean)^2)
// Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd
// edition describes Welford's method for accumulating this sum in one
// pass.If count is zero then this field must be zero.
SumOfSquaredDeviation float64 `json:"sumOfSquaredDeviation,omitempty"`
// ForceSendFields is a list of field names (e.g. "BucketCounts") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BucketCounts") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Distribution) MarshalJSON() ([]byte, error) {
type NoMethod Distribution
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *Distribution) UnmarshalJSON(data []byte) error {
type NoMethod Distribution
var s1 struct {
Mean gensupport.JSONFloat64 `json:"mean"`
SumOfSquaredDeviation gensupport.JSONFloat64 `json:"sumOfSquaredDeviation"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Mean = float64(s1.Mean)
s.SumOfSquaredDeviation = float64(s1.SumOfSquaredDeviation)
return nil
}
// DistributionCut: A DistributionCut defines a TimeSeries and
// thresholds used for measuring good service and total service. The
// TimeSeries must have ValueType =
// DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE. The
// computed good_service will be the count of values x in the
// Distribution such that range.min <= x < range.max.
type DistributionCut struct {
// DistributionFilter: A monitoring filter
// (https://cloud.google.com/monitoring/api/v3/filters) specifying a
// TimeSeries aggregating values. Must have ValueType =
// DISTRIBUTION and MetricKind = DELTA or MetricKind = CUMULATIVE.
DistributionFilter string `json:"distributionFilter,omitempty"`
// Range: Range of values considered "good." For a one-sided range, set
// one bound to an infinite value.
Range *GoogleMonitoringV3Range `json:"range,omitempty"`
// ForceSendFields is a list of field names (e.g. "DistributionFilter")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DistributionFilter") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *DistributionCut) MarshalJSON() ([]byte, error) {
type NoMethod DistributionCut
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Documentation: A content string and a MIME type that describes the
// content string's format.
type Documentation struct {
// Content: The text of the documentation, interpreted according to
// mime_type. The content may not exceed 8,192 Unicode characters and
// may not exceed more than 10,240 bytes when encoded in UTF-8 format,
// whichever is smaller.
Content string `json:"content,omitempty"`
// MimeType: The format of the content field. Presently, only the value
// "text/markdown" is supported. See Markdown
// (https://en.wikipedia.org/wiki/Markdown) for more information.
MimeType string `json:"mimeType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Content") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Content") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Documentation) MarshalJSON() ([]byte, error) {
type NoMethod Documentation
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// DroppedLabels: A set of (label, value) pairs which were dropped
// during aggregation, attached to google.api.Distribution.Exemplars in
// google.api.Distribution values during aggregation.These values are
// used in combination with the label values that remain on the
// aggregated Distribution timeseries to construct the full label set
// for the exemplar values. The resulting full label set may be used to
// identify the specific task/job/instance (for example) which may be
// contributing to a long-tail, while allowing the storage savings of
// only storing aggregated distribution values for a large group.Note
// that there are no guarantees on ordering of the labels from
// exemplar-to-exemplar and from distribution-to-distribution in the
// same stream, and there may be duplicates. It is up to clients to
// resolve any ambiguities.
type DroppedLabels struct {
// Label: Map from label to its value, for all labels dropped in any
// aggregation.
Label map[string]string `json:"label,omitempty"`
// ForceSendFields is a list of field names (e.g. "Label") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Label") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *DroppedLabels) MarshalJSON() ([]byte, error) {
type NoMethod DroppedLabels
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Empty: A generic empty message that you can re-use to avoid defining
// duplicated empty messages in your APIs. A typical example is to use
// it as the request or the response type of an API method. For
// instance:
// service Foo {
// rpc Bar(google.protobuf.Empty) returns
// (google.protobuf.Empty);
// }
// The JSON representation for Empty is empty JSON object {}.
type Empty struct {
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
}
// Error: Detailed information about an error category.
type Error struct {
// PointCount: The number of points that couldn't be written because of
// status.
PointCount int64 `json:"pointCount,omitempty"`
// Status: The status of the requested write operation.
Status *Status `json:"status,omitempty"`
// ForceSendFields is a list of field names (e.g. "PointCount") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "PointCount") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Error) MarshalJSON() ([]byte, error) {
type NoMethod Error
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Exemplar: Exemplars are example points that may be used to annotate
// aggregated distribution values. They are metadata that gives
// information about a particular value added to a Distribution bucket,
// such as a trace ID that was active when a value was added. They may
// contain further information, such as a example values and timestamps,
// origin, etc.
type Exemplar struct {
// Attachments: Contextual information about the example value. Examples
// are:Trace:
// type.googleapis.com/google.monitoring.v3.SpanContextLiteral string:
// type.googleapis.com/google.protobuf.StringValueLabels dropped during
// aggregation:
// type.googleapis.com/google.monitoring.v3.DroppedLabelsThere may be
// only a single attachment of any given message type in a single
// exemplar, and this is enforced by the system.
Attachments []googleapi.RawMessage `json:"attachments,omitempty"`
// Timestamp: The observation (sampling) time of the above value.
Timestamp string `json:"timestamp,omitempty"`
// Value: Value of the exemplar point. This value determines to which
// bucket the exemplar belongs.
Value float64 `json:"value,omitempty"`
// ForceSendFields is a list of field names (e.g. "Attachments") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Attachments") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Exemplar) MarshalJSON() ([]byte, error) {
type NoMethod Exemplar
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *Exemplar) UnmarshalJSON(data []byte) error {
type NoMethod Exemplar
var s1 struct {
Value gensupport.JSONFloat64 `json:"value"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Value = float64(s1.Value)
return nil
}
// Explicit: Specifies a set of buckets with arbitrary widths.There are
// size(bounds) + 1 (= N) buckets. Bucket i has the following
// boundaries:Upper bound (0 <= i < N-1): boundsi Lower bound (1 <= i <
// N); boundsi - 1The bounds field must contain at least one element. If
// bounds has only one element, then there are no finite buckets, and
// that single element is the common boundary of the overflow and
// underflow buckets.
type Explicit struct {
// Bounds: The values must be monotonically increasing.
Bounds []float64 `json:"bounds,omitempty"`
// ForceSendFields is a list of field names (e.g. "Bounds") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Bounds") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Explicit) MarshalJSON() ([]byte, error) {
type NoMethod Explicit
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Exponential: Specifies an exponential sequence of buckets that have a
// width that is proportional to the value of the lower bound. Each
// bucket represents a constant relative uncertainty on a specific value
// in the bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket
// i has the following boundaries:Upper bound (0 <= i < N-1): scale *
// (growth_factor ^ i). Lower bound (1 <= i < N): scale *
// (growth_factor ^ (i - 1)).
type Exponential struct {
// GrowthFactor: Must be greater than 1.
GrowthFactor float64 `json:"growthFactor,omitempty"`
// NumFiniteBuckets: Must be greater than 0.
NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"`
// Scale: Must be greater than 0.
Scale float64 `json:"scale,omitempty"`
// ForceSendFields is a list of field names (e.g. "GrowthFactor") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "GrowthFactor") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Exponential) MarshalJSON() ([]byte, error) {
type NoMethod Exponential
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *Exponential) UnmarshalJSON(data []byte) error {
type NoMethod Exponential
var s1 struct {
GrowthFactor gensupport.JSONFloat64 `json:"growthFactor"`
Scale gensupport.JSONFloat64 `json:"scale"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.GrowthFactor = float64(s1.GrowthFactor)
s.Scale = float64(s1.Scale)
return nil
}
// Field: A single field of a message type.
type Field struct {
// Cardinality: The field cardinality.
//
// Possible values:
// "CARDINALITY_UNKNOWN" - For fields with unknown cardinality.
// "CARDINALITY_OPTIONAL" - For optional fields.
// "CARDINALITY_REQUIRED" - For required fields. Proto2 syntax only.
// "CARDINALITY_REPEATED" - For repeated fields.
Cardinality string `json:"cardinality,omitempty"`
// DefaultValue: The string value of the default value of this field.
// Proto2 syntax only.
DefaultValue string `json:"defaultValue,omitempty"`
// JsonName: The field JSON name.
JsonName string `json:"jsonName,omitempty"`
// Kind: The field type.
//
// Possible values:
// "TYPE_UNKNOWN" - Field type unknown.
// "TYPE_DOUBLE" - Field type double.
// "TYPE_FLOAT" - Field type float.
// "TYPE_INT64" - Field type int64.
// "TYPE_UINT64" - Field type uint64.
// "TYPE_INT32" - Field type int32.
// "TYPE_FIXED64" - Field type fixed64.
// "TYPE_FIXED32" - Field type fixed32.
// "TYPE_BOOL" - Field type bool.
// "TYPE_STRING" - Field type string.
// "TYPE_GROUP" - Field type group. Proto2 syntax only, and
// deprecated.
// "TYPE_MESSAGE" - Field type message.
// "TYPE_BYTES" - Field type bytes.
// "TYPE_UINT32" - Field type uint32.
// "TYPE_ENUM" - Field type enum.
// "TYPE_SFIXED32" - Field type sfixed32.
// "TYPE_SFIXED64" - Field type sfixed64.
// "TYPE_SINT32" - Field type sint32.
// "TYPE_SINT64" - Field type sint64.
Kind string `json:"kind,omitempty"`
// Name: The field name.
Name string `json:"name,omitempty"`
// Number: The field number.
Number int64 `json:"number,omitempty"`
// OneofIndex: The index of the field type in Type.oneofs, for message
// or enumeration types. The first type has index 1; zero means the type
// is not in the list.
OneofIndex int64 `json:"oneofIndex,omitempty"`
// Options: The protocol buffer options.
Options []*Option `json:"options,omitempty"`
// Packed: Whether to use alternative packed wire representation.
Packed bool `json:"packed,omitempty"`
// TypeUrl: The field type URL, without the scheme, for message or
// enumeration types. Example:
// "type.googleapis.com/google.protobuf.Timestamp".
TypeUrl string `json:"typeUrl,omitempty"`
// ForceSendFields is a list of field names (e.g. "Cardinality") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Cardinality") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Field) MarshalJSON() ([]byte, error) {
type NoMethod Field
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetNotificationChannelVerificationCodeRequest: The
// GetNotificationChannelVerificationCode request.
type GetNotificationChannelVerificationCodeRequest struct {
// ExpireTime: The desired expiration time. If specified, the API will
// guarantee that the returned code will not be valid after the
// specified timestamp; however, the API cannot guarantee that the
// returned code will be valid for at least as long as the requested
// time (the API puts an upper bound on the amount of time for which a
// code may be valid). If omitted, a default expiration will be used,
// which may be less than the max permissible expiration (so specifying
// an expiration may extend the code's lifetime over omitting an
// expiration, even though the API does impose an upper limit on the
// maximum expiration that is permitted).
ExpireTime string `json:"expireTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "ExpireTime") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExpireTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GetNotificationChannelVerificationCodeRequest) MarshalJSON() ([]byte, error) {
type NoMethod GetNotificationChannelVerificationCodeRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetNotificationChannelVerificationCodeResponse: The
// GetNotificationChannelVerificationCode request.
type GetNotificationChannelVerificationCodeResponse struct {
// Code: The verification code, which may be used to verify other
// channels that have an equivalent identity (i.e. other channels of the
// same type with the same fingerprint such as other email channels with
// the same email address or other sms channels with the same number).
Code string `json:"code,omitempty"`
// ExpireTime: The expiration time associated with the code that was
// returned. If an expiration was provided in the request, this is the
// minimum of the requested expiration in the request and the max
// permitted expiration.
ExpireTime string `json:"expireTime,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Code") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Code") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GetNotificationChannelVerificationCodeResponse) MarshalJSON() ([]byte, error) {
type NoMethod GetNotificationChannelVerificationCodeResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GoogleMonitoringV3Range: Range of numerical values, inclusive of min
// and exclusive of max. If the open range "< range.max" is desired, set
// range.min = -infinity. If the open range ">= range.min" is desired,
// set range.max = infinity.
type GoogleMonitoringV3Range struct {
// Max: Range maximum.
Max float64 `json:"max,omitempty"`
// Min: Range minimum.
Min float64 `json:"min,omitempty"`
// ForceSendFields is a list of field names (e.g. "Max") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Max") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GoogleMonitoringV3Range) MarshalJSON() ([]byte, error) {
type NoMethod GoogleMonitoringV3Range
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *GoogleMonitoringV3Range) UnmarshalJSON(data []byte) error {
type NoMethod GoogleMonitoringV3Range
var s1 struct {
Max gensupport.JSONFloat64 `json:"max"`
Min gensupport.JSONFloat64 `json:"min"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Max = float64(s1.Max)
s.Min = float64(s1.Min)
return nil
}
// Group: The description of a dynamic collection of monitored
// resources. Each group has a filter that is matched against monitored
// resources and their associated metadata. If a group's filter matches
// an available monitored resource, then that resource is a member of
// that group. Groups can contain any number of monitored resources, and
// each monitored resource can be a member of any number of
// groups.Groups can be nested in parent-child hierarchies. The
// parentName field identifies an optional parent for each group. If a
// group has a parent, then the only monitored resources available to be
// matched by the group's filter are the resources contained in the
// parent group. In other words, a group contains the monitored
// resources that match its filter and the filters of all the group's
// ancestors. A group without a parent can contain any monitored
// resource.For example, consider an infrastructure running a set of
// instances with two user-defined tags: "environment" and "role". A
// parent group has a filter, environment="production". A child of that
// parent group has a filter, role="transcoder". The parent group
// contains all instances in the production environment, regardless of
// their roles. The child group contains instances that have the
// transcoder role and are in the production environment.The monitored
// resources contained in a group can change at any moment, depending on
// what resources exist and what filters are associated with the group
// and its ancestors.
type Group struct {
// DisplayName: A user-assigned name for this group, used only for
// display purposes.
DisplayName string `json:"displayName,omitempty"`
// Filter: The filter used to determine which monitored resources belong
// to this group.
Filter string `json:"filter,omitempty"`
// IsCluster: If true, the members of this group are considered to be a
// cluster. The system can perform additional analysis on groups that
// are clusters.
IsCluster bool `json:"isCluster,omitempty"`
// Name: Output only. The name of this group. The format is
// "projects/{project_id_or_number}/groups/{group_id}". When creating a
// group, this field is ignored and a new name is created consisting of
// the project specified in the call to CreateGroup and a unique
// {group_id} that is generated automatically.
Name string `json:"name,omitempty"`
// ParentName: The name of the group's parent, if it has one. The format
// is "projects/{project_id_or_number}/groups/{group_id}". For groups
// with no parent, parentName is the empty string, "".
ParentName string `json:"parentName,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "DisplayName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DisplayName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Group) MarshalJSON() ([]byte, error) {
type NoMethod Group
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// HttpCheck: Information involved in an HTTP/HTTPS Uptime check
// request.
type HttpCheck struct {
// AuthInfo: The authentication information. Optional when creating an
// HTTP check; defaults to empty.
AuthInfo *BasicAuthentication `json:"authInfo,omitempty"`
// Headers: The list of headers to send as part of the Uptime check
// request. If two headers have the same key and different values, they
// should be entered as a single header, with the value being a
// comma-separated list of all the desired values as described at
// https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering
// two separate headers with the same key in a Create call will cause
// the first to be overwritten by the second. The maximum number of
// headers allowed is 100.
Headers map[string]string `json:"headers,omitempty"`
// MaskHeaders: Boolean specifiying whether to encrypt the header
// information. Encryption should be specified for any headers related
// to authentication that you do not wish to be seen when retrieving the
// configuration. The server will be responsible for encrypting the
// headers. On Get/List calls, if mask_headers is set to true then the
// headers will be obscured with ******.
MaskHeaders bool `json:"maskHeaders,omitempty"`
// Path: Optional (defaults to "/"). The path to the page against which
// to run the check. Will be combined with the host (specified within
// the monitored_resource) and port to construct the full URL. If the
// provided path does not begin with "/", a "/" will be prepended
// automatically.
Path string `json:"path,omitempty"`
// Port: Optional (defaults to 80 when use_ssl is false, and 443 when
// use_ssl is true). The TCP port on the HTTP server against which to
// run the check. Will be combined with host (specified within the
// monitored_resource) and path to construct the full URL.
Port int64 `json:"port,omitempty"`
// UseSsl: If true, use HTTPS instead of HTTP to run the check.
UseSsl bool `json:"useSsl,omitempty"`
// ValidateSsl: Boolean specifying whether to include SSL certificate
// validation as a part of the Uptime check. Only applies to checks
// where monitored_resource is set to uptime_url. If use_ssl is false,
// setting validate_ssl to true has no effect.
ValidateSsl bool `json:"validateSsl,omitempty"`
// ForceSendFields is a list of field names (e.g. "AuthInfo") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AuthInfo") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *HttpCheck) MarshalJSON() ([]byte, error) {
type NoMethod HttpCheck
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// InternalChecker: An internal checker allows Uptime checks to run on
// private/internal GCP resources.
type InternalChecker struct {
// DisplayName: The checker's human-readable name. The display name
// should be unique within a Stackdriver Workspace in order to make it
// easier to identify; however, uniqueness is not enforced.
DisplayName string `json:"displayName,omitempty"`
// GcpZone: The GCP zone the Uptime check should egress from. Only
// respected for internal Uptime checks, where internal_network is
// specified.
GcpZone string `json:"gcpZone,omitempty"`
// Name: A unique resource name for this InternalChecker. The format
// is:projects/[PROJECT_ID]/internalCheckers/[INTERNAL_CHECKER_ID].[PROJE
// CT_ID] is the Stackdriver Workspace project for the Uptime check
// config associated with the internal checker.
Name string `json:"name,omitempty"`
// Network: The GCP VPC network (https://cloud.google.com/vpc/docs/vpc)
// where the internal resource lives (ex: "default").
Network string `json:"network,omitempty"`
// PeerProjectId: The GCP project ID where the internal checker lives.
// Not necessary the same as the Workspace project.
PeerProjectId string `json:"peerProjectId,omitempty"`
// State: The current operational state of the internal checker.
//
// Possible values:
// "UNSPECIFIED" - An internal checker should never be in the
// unspecified state.
// "CREATING" - The checker is being created, provisioned, and
// configured. A checker in this state can be returned by
// ListInternalCheckers or GetInternalChecker, as well as by examining
// the long running Operation
// (https://cloud.google.com/apis/design/design_patterns#long_running_ope
// rations) that created it.
// "RUNNING" - The checker is running and available for use. A checker
// in this state can be returned by ListInternalCheckers or
// GetInternalChecker as well as by examining the long running Operation
// (https://cloud.google.com/apis/design/design_patterns#long_running_ope
// rations) that created it. If a checker is being torn down, it is
// neither visible nor usable, so there is no "deleting" or "down"
// state.
State string `json:"state,omitempty"`
// ForceSendFields is a list of field names (e.g. "DisplayName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DisplayName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *InternalChecker) MarshalJSON() ([]byte, error) {
type NoMethod InternalChecker
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LabelDescriptor: A description of a label.
type LabelDescriptor struct {
// Description: A human-readable description for the label.
Description string `json:"description,omitempty"`
// Key: The key for this label. The key must meet the following
// criteria:
// Does not exceed 100 characters.
// Matches the following regular expression: [a-zA-Z][a-zA-Z0-9_]*
// The first character must be an upper- or lower-case letter.
// The remaining characters must be letters, digits, or underscores.
Key string `json:"key,omitempty"`
// ValueType: The type of data that can be assigned to the label.
//
// Possible values:
// "STRING" - A variable-length string, not to exceed 1,024
// characters. This is the default value type.
// "BOOL" - Boolean; true or false.
// "INT64" - A 64-bit signed integer.
ValueType string `json:"valueType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LabelDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod LabelDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// LatencyCriteria: Parameters for a latency threshold SLI.
type LatencyCriteria struct {
// Threshold: Good service is defined to be the count of requests made
// to this service that return in no more than threshold.
Threshold string `json:"threshold,omitempty"`
// ForceSendFields is a list of field names (e.g. "Threshold") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Threshold") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *LatencyCriteria) MarshalJSON() ([]byte, error) {
type NoMethod LatencyCriteria
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Linear: Specifies a linear sequence of buckets that all have the same
// width (except overflow and underflow). Each bucket represents a
// constant absolute uncertainty on the specific value in the
// bucket.There are num_finite_buckets + 2 (= N) buckets. Bucket i has
// the following boundaries:Upper bound (0 <= i < N-1): offset + (width
// * i). Lower bound (1 <= i < N): offset + (width * (i - 1)).
type Linear struct {
// NumFiniteBuckets: Must be greater than 0.
NumFiniteBuckets int64 `json:"numFiniteBuckets,omitempty"`
// Offset: Lower bound of the first bucket.
Offset float64 `json:"offset,omitempty"`
// Width: Must be greater than 0.
Width float64 `json:"width,omitempty"`
// ForceSendFields is a list of field names (e.g. "NumFiniteBuckets") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NumFiniteBuckets") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Linear) MarshalJSON() ([]byte, error) {
type NoMethod Linear
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *Linear) UnmarshalJSON(data []byte) error {
type NoMethod Linear
var s1 struct {
Offset gensupport.JSONFloat64 `json:"offset"`
Width gensupport.JSONFloat64 `json:"width"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Offset = float64(s1.Offset)
s.Width = float64(s1.Width)
return nil
}
// ListAlertPoliciesResponse: The protocol for the ListAlertPolicies
// response.
type ListAlertPoliciesResponse struct {
// AlertPolicies: The returned alert policies.
AlertPolicies []*AlertPolicy `json:"alertPolicies,omitempty"`
// NextPageToken: If there might be more results than were returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AlertPolicies") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AlertPolicies") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListAlertPoliciesResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListAlertPoliciesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListGroupMembersResponse: The ListGroupMembers response.
type ListGroupMembersResponse struct {
// Members: A set of monitored resources in the group.
Members []*MonitoredResource `json:"members,omitempty"`
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// TotalSize: The total number of elements matching this request.
TotalSize int64 `json:"totalSize,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Members") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Members") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListGroupMembersResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListGroupMembersResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListGroupsResponse: The ListGroups response.
type ListGroupsResponse struct {
// Group: The groups that match the specified filters.
Group []*Group `json:"group,omitempty"`
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Group") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Group") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListGroupsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListGroupsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListMetricDescriptorsResponse: The ListMetricDescriptors response.
type ListMetricDescriptorsResponse struct {
// MetricDescriptors: The metric descriptors that are available to the
// project and that match the value of filter, if present.
MetricDescriptors []*MetricDescriptor `json:"metricDescriptors,omitempty"`
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "MetricDescriptors")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MetricDescriptors") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ListMetricDescriptorsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListMetricDescriptorsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListMonitoredResourceDescriptorsResponse: The
// ListMonitoredResourceDescriptors response.
type ListMonitoredResourceDescriptorsResponse struct {
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// ResourceDescriptors: The monitored resource descriptors that are
// available to this project and that match filter, if present.
ResourceDescriptors []*MonitoredResourceDescriptor `json:"resourceDescriptors,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListMonitoredResourceDescriptorsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListMonitoredResourceDescriptorsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListNotificationChannelDescriptorsResponse: The
// ListNotificationChannelDescriptors response.
type ListNotificationChannelDescriptorsResponse struct {
// ChannelDescriptors: The monitored resource descriptors supported for
// the specified project, optionally filtered.
ChannelDescriptors []*NotificationChannelDescriptor `json:"channelDescriptors,omitempty"`
// NextPageToken: If not empty, indicates that there may be more results
// that match the request. Use the value in the page_token field in a
// subsequent request to fetch the next set of results. If empty, all
// results have been returned.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "ChannelDescriptors")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ChannelDescriptors") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ListNotificationChannelDescriptorsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListNotificationChannelDescriptorsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListNotificationChannelsResponse: The ListNotificationChannels
// response.
type ListNotificationChannelsResponse struct {
// NextPageToken: If not empty, indicates that there may be more results
// that match the request. Use the value in the page_token field in a
// subsequent request to fetch the next set of results. If empty, all
// results have been returned.
NextPageToken string `json:"nextPageToken,omitempty"`
// NotificationChannels: The notification channels defined for the
// specified project.
NotificationChannels []*NotificationChannel `json:"notificationChannels,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListNotificationChannelsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListNotificationChannelsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListServiceLevelObjectivesResponse: The ListServiceLevelObjectives
// response.
type ListServiceLevelObjectivesResponse struct {
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServiceLevelObjectives: The ServiceLevelObjectives matching the
// specified filter.
ServiceLevelObjectives []*ServiceLevelObjective `json:"serviceLevelObjectives,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListServiceLevelObjectivesResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListServiceLevelObjectivesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListServicesResponse: The ListServices response.
type ListServicesResponse struct {
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// Services: The Services matching the specified filter.
Services []*Service `json:"services,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListServicesResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListServicesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListTimeSeriesResponse: The ListTimeSeries response.
type ListTimeSeriesResponse struct {
// ExecutionErrors: Query execution errors that may have caused the time
// series data returned to be incomplete.
ExecutionErrors []*Status `json:"executionErrors,omitempty"`
// NextPageToken: If there are more results than have been returned,
// then this field is set to a non-empty value. To see the additional
// results, use that value as pageToken in the next call to this method.
NextPageToken string `json:"nextPageToken,omitempty"`
// TimeSeries: One or more time series that match the filter included in
// the request.
TimeSeries []*TimeSeries `json:"timeSeries,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "ExecutionErrors") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ExecutionErrors") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ListTimeSeriesResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListTimeSeriesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListUptimeCheckConfigsResponse: The protocol for the
// ListUptimeCheckConfigs response.
type ListUptimeCheckConfigsResponse struct {
// NextPageToken: This field represents the pagination token to retrieve
// the next page of results. If the value is empty, it means no further
// results for the request. To retrieve the next page of results, the
// value of the next_page_token is passed to the subsequent List method
// call (in the request message's page_token field).
NextPageToken string `json:"nextPageToken,omitempty"`
// TotalSize: The total number of Uptime check configurations for the
// project, irrespective of any pagination.
TotalSize int64 `json:"totalSize,omitempty"`
// UptimeCheckConfigs: The returned Uptime check configurations.
UptimeCheckConfigs []*UptimeCheckConfig `json:"uptimeCheckConfigs,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListUptimeCheckConfigsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListUptimeCheckConfigsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListUptimeCheckIpsResponse: The protocol for the ListUptimeCheckIps
// response.
type ListUptimeCheckIpsResponse struct {
// NextPageToken: This field represents the pagination token to retrieve
// the next page of results. If the value is empty, it means no further
// results for the request. To retrieve the next page of results, the
// value of the next_page_token is passed to the subsequent List method
// call (in the request message's page_token field). NOTE: this field is
// not yet implemented
NextPageToken string `json:"nextPageToken,omitempty"`
// UptimeCheckIps: The returned list of IP addresses (including region
// and location) that the checkers run from.
UptimeCheckIps []*UptimeCheckIp `json:"uptimeCheckIps,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListUptimeCheckIpsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListUptimeCheckIpsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Metric: A specific metric, identified by specifying values for all of
// the labels of a MetricDescriptor.
type Metric struct {
// Labels: The set of label values that uniquely identify this metric.
// All labels listed in the MetricDescriptor must be assigned values.
Labels map[string]string `json:"labels,omitempty"`
// Type: An existing metric type, see google.api.MetricDescriptor. For
// example, custom.googleapis.com/invoice/paid/amount.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Labels") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Labels") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Metric) MarshalJSON() ([]byte, error) {
type NoMethod Metric
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricAbsence: A condition type that checks that monitored resources
// are reporting data. The configuration defines a metric and a set of
// monitored resources. The predicate is considered in violation when a
// time series for the specified metric of a monitored resource does not
// include any data in the specified duration.
type MetricAbsence struct {
// Aggregations: Specifies the alignment of data points in individual
// time series as well as how to combine the retrieved time series
// together (such as when aggregating multiple streams on each resource
// to a single stream for each resource or when aggregating streams
// across all members of a group of resrouces). Multiple aggregations
// are applied in the order specified.This field is similar to the one
// in the ListTimeSeries request. It is advisable to use the
// ListTimeSeries method when debugging this field.
Aggregations []*Aggregation `json:"aggregations,omitempty"`
// Duration: The amount of time that a time series must fail to report
// new data to be considered failing. Currently, only values that are a
// multiple of a minute--e.g. 60, 120, or 300 seconds--are supported. If
// an invalid value is given, an error will be returned. The
// Duration.nanos field is ignored.
Duration string `json:"duration,omitempty"`
// Filter: A filter that identifies which time series should be compared
// with the threshold.The filter is similar to the one that is specified
// in the ListTimeSeries request (that call is useful to verify the time
// series that will be retrieved / processed) and must specify the
// metric type and optionally may contain restrictions on resource type,
// resource labels, and metric labels. This field may not exceed 2048
// Unicode characters in length.
Filter string `json:"filter,omitempty"`
// Trigger: The number/percent of time series for which the comparison
// must hold in order for the condition to trigger. If unspecified, then
// the condition will trigger if the comparison is true for any of the
// time series that have been identified by filter and aggregations.
Trigger *Trigger `json:"trigger,omitempty"`
// ForceSendFields is a list of field names (e.g. "Aggregations") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Aggregations") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricAbsence) MarshalJSON() ([]byte, error) {
type NoMethod MetricAbsence
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricDescriptor: Defines a metric type and its schema. Once a metric
// descriptor is created, deleting or altering it stops data collection
// and makes the metric type's existing data unusable.
type MetricDescriptor struct {
// Description: A detailed description of the metric, which can be used
// in documentation.
Description string `json:"description,omitempty"`
// DisplayName: A concise name for the metric, which can be displayed in
// user interfaces. Use sentence case without an ending period, for
// example "Request count". This field is optional but it is recommended
// to be set for any metrics associated with user-visible concepts, such
// as Quota.
DisplayName string `json:"displayName,omitempty"`
// Labels: The set of labels that can be used to describe a specific
// instance of this metric type. For example, the
// appengine.googleapis.com/http/server/response_latencies metric type
// has a label for the HTTP response code, response_code, so you can
// look at latencies for successful responses or just for responses that
// failed.
Labels []*LabelDescriptor `json:"labels,omitempty"`
// LaunchStage: Optional. The launch stage of the metric definition.
//
// Possible values:
// "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value.
// "EARLY_ACCESS" - Early Access features are limited to a closed
// group of testers. To use these features, you must sign up in advance
// and sign a Trusted Tester agreement (which includes confidentiality
// provisions). These features may be unstable, changed in
// backward-incompatible ways, and are not guaranteed to be released.
// "ALPHA" - Alpha is a limited availability test for releases before
// they are cleared for widespread use. By Alpha, all significant design
// issues are resolved and we are in the process of verifying
// functionality. Alpha customers need to apply for access, agree to
// applicable terms, and have their projects whitelisted. Alpha releases
// don’t have to be feature complete, no SLAs are provided, and there
// are no technical support obligations, but they will be far enough
// along that customers can actually use them in test environments or
// for limited-use tests -- just like they would in normal production
// cases.
// "BETA" - Beta is the point at which we are ready to open a release
// for any customer to use. There are no SLA or technical support
// obligations in a Beta release. Products will be complete from a
// feature perspective, but may have some open outstanding issues. Beta
// releases are suitable for limited production use cases.
// "GA" - GA features are open to all developers and are considered
// stable and fully qualified for production use.
// "DEPRECATED" - Deprecated features are scheduled to be shut down
// and removed. For more information, see the “Deprecation Policy”
// section of our Terms of Service (https://cloud.google.com/terms/) and
// the Google Cloud Platform Subject to the Deprecation Policy
// (https://cloud.google.com/terms/deprecation) documentation.
LaunchStage string `json:"launchStage,omitempty"`
// Metadata: Optional. Metadata which can be used to guide usage of the
// metric.
Metadata *MetricDescriptorMetadata `json:"metadata,omitempty"`
// MetricKind: Whether the metric records instantaneous values, changes
// to a value, etc. Some combinations of metric_kind and value_type
// might not be supported.
//
// Possible values:
// "METRIC_KIND_UNSPECIFIED" - Do not use this default value.
// "GAUGE" - An instantaneous measurement of a value.
// "DELTA" - The change in a value during a time interval.
// "CUMULATIVE" - A value accumulated over a time interval. Cumulative
// measurements in a time series should have the same start time and
// increasing end times, until an event resets the cumulative value to
// zero and sets a new start time for the following points.
MetricKind string `json:"metricKind,omitempty"`
// MonitoredResourceTypes: Read-only. If present, then a time series,
// which is identified partially by a metric type and a
// MonitoredResourceDescriptor, that is associated with this metric type
// can only be associated with one of the monitored resource types
// listed here.
MonitoredResourceTypes []string `json:"monitoredResourceTypes,omitempty"`
// Name: The resource name of the metric descriptor.
Name string `json:"name,omitempty"`
// Type: The metric type, including its DNS name prefix. The type is not
// URL-encoded. All user-defined metric types have the DNS name
// custom.googleapis.com or external.googleapis.com. Metric types should
// use a natural hierarchical grouping. For
// example:
// "custom.googleapis.com/invoice/paid/amount"
// "external.googlea
// pis.com/prometheus/up"
// "appengine.googleapis.com/http/server/response_
// latencies"
//
Type string `json:"type,omitempty"`
// Unit: The unit in which the metric value is reported. It is only
// applicable if the value_type is INT64, DOUBLE, or DISTRIBUTION. The
// supported units are a subset of The Unified Code for Units of Measure
// (http://unitsofmeasure.org/ucum.html) standard:Basic units (UNIT)
// bit bit
// By byte
// s second
// min minute
// h hour
// d dayPrefixes (PREFIX)
// k kilo (10**3)
// M mega (10**6)
// G giga (10**9)
// T tera (10**12)
// P peta (10**15)
// E exa (10**18)
// Z zetta (10**21)
// Y yotta (10**24)
// m milli (10**-3)
// u micro (10**-6)
// n nano (10**-9)
// p pico (10**-12)
// f femto (10**-15)
// a atto (10**-18)
// z zepto (10**-21)
// y yocto (10**-24)
// Ki kibi (2**10)
// Mi mebi (2**20)
// Gi gibi (2**30)
// Ti tebi (2**40)GrammarThe grammar also includes these connectors:
// / division (as an infix operator, e.g. 1/s).
// . multiplication (as an infix operator, e.g. GBy.d)The grammar for a
// unit is as follows:
// Expression = Component { "." Component } { "/" Component }
// ;
//
// Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ]
// | Annotation
// | "1"
// ;
//
// Annotation = "{" NAME "}" ;
// Notes:
// Annotation is just a comment if it follows a UNIT and is equivalent
// to 1 if it is used alone. For examples, {requests}/s == 1/s,
// By{transmitted}/s == By/s.
// NAME is a sequence of non-blank printable ASCII characters not
// containing '{' or '}'.
// 1 represents dimensionless value 1, such as in 1/s.
// % represents dimensionless value 1/100, and annotates values giving
// a percentage.
Unit string `json:"unit,omitempty"`
// ValueType: Whether the measurement is an integer, a floating-point
// number, etc. Some combinations of metric_kind and value_type might
// not be supported.
//
// Possible values:
// "VALUE_TYPE_UNSPECIFIED" - Do not use this default value.
// "BOOL" - The value is a boolean. This value type can be used only
// if the metric kind is GAUGE.
// "INT64" - The value is a signed 64-bit integer.
// "DOUBLE" - The value is a double precision floating point number.
// "STRING" - The value is a text string. This value type can be used
// only if the metric kind is GAUGE.
// "DISTRIBUTION" - The value is a Distribution.
// "MONEY" - The value is money.
ValueType string `json:"valueType,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod MetricDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricDescriptorMetadata: Additional annotations that can be used to
// guide the usage of a metric.
type MetricDescriptorMetadata struct {
// IngestDelay: The delay of data points caused by ingestion. Data
// points older than this age are guaranteed to be ingested and
// available to be read, excluding data loss due to errors.
IngestDelay string `json:"ingestDelay,omitempty"`
// LaunchStage: Deprecated. Must use the MetricDescriptor.launch_stage
// instead.
//
// Possible values:
// "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value.
// "EARLY_ACCESS" - Early Access features are limited to a closed
// group of testers. To use these features, you must sign up in advance
// and sign a Trusted Tester agreement (which includes confidentiality
// provisions). These features may be unstable, changed in
// backward-incompatible ways, and are not guaranteed to be released.
// "ALPHA" - Alpha is a limited availability test for releases before
// they are cleared for widespread use. By Alpha, all significant design
// issues are resolved and we are in the process of verifying
// functionality. Alpha customers need to apply for access, agree to
// applicable terms, and have their projects whitelisted. Alpha releases
// don’t have to be feature complete, no SLAs are provided, and there
// are no technical support obligations, but they will be far enough
// along that customers can actually use them in test environments or
// for limited-use tests -- just like they would in normal production
// cases.
// "BETA" - Beta is the point at which we are ready to open a release
// for any customer to use. There are no SLA or technical support
// obligations in a Beta release. Products will be complete from a
// feature perspective, but may have some open outstanding issues. Beta
// releases are suitable for limited production use cases.
// "GA" - GA features are open to all developers and are considered
// stable and fully qualified for production use.
// "DEPRECATED" - Deprecated features are scheduled to be shut down
// and removed. For more information, see the “Deprecation Policy”
// section of our Terms of Service (https://cloud.google.com/terms/) and
// the Google Cloud Platform Subject to the Deprecation Policy
// (https://cloud.google.com/terms/deprecation) documentation.
LaunchStage string `json:"launchStage,omitempty"`
// SamplePeriod: The sampling period of metric data points. For metrics
// which are written periodically, consecutive data points are stored at
// this time interval, excluding data loss due to errors. Metrics with a
// higher granularity have a smaller sampling period.
SamplePeriod string `json:"samplePeriod,omitempty"`
// ForceSendFields is a list of field names (e.g. "IngestDelay") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "IngestDelay") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricDescriptorMetadata) MarshalJSON() ([]byte, error) {
type NoMethod MetricDescriptorMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricRange: A MetricRange is used when each window is good when the
// value x of a single TimeSeries satisfies range.min <= x < range.max.
// The provided TimeSeries must have ValueType = INT64 or ValueType =
// DOUBLE and MetricKind = GAUGE.
type MetricRange struct {
// Range: Range of values considered "good." For a one-sided range, set
// one bound to an infinite value.
Range *GoogleMonitoringV3Range `json:"range,omitempty"`
// TimeSeries: A monitoring filter
// (https://cloud.google.com/monitoring/api/v3/filters) specifying the
// TimeSeries to use for evaluating window quality.
TimeSeries string `json:"timeSeries,omitempty"`
// ForceSendFields is a list of field names (e.g. "Range") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Range") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricRange) MarshalJSON() ([]byte, error) {
type NoMethod MetricRange
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MetricThreshold: A condition type that compares a collection of time
// series against a threshold.
type MetricThreshold struct {
// Aggregations: Specifies the alignment of data points in individual
// time series as well as how to combine the retrieved time series
// together (such as when aggregating multiple streams on each resource
// to a single stream for each resource or when aggregating streams
// across all members of a group of resrouces). Multiple aggregations
// are applied in the order specified.This field is similar to the one
// in the ListTimeSeries request. It is advisable to use the
// ListTimeSeries method when debugging this field.
Aggregations []*Aggregation `json:"aggregations,omitempty"`
// Comparison: The comparison to apply between the time series
// (indicated by filter and aggregation) and the threshold (indicated by
// threshold_value). The comparison is applied on each time series, with
// the time series on the left-hand side and the threshold on the
// right-hand side.Only COMPARISON_LT and COMPARISON_GT are supported
// currently.
//
// Possible values:
// "COMPARISON_UNSPECIFIED" - No ordering relationship is specified.
// "COMPARISON_GT" - The left argument is greater than the right
// argument.
// "COMPARISON_GE" - The left argument is greater than or equal to the
// right argument.
// "COMPARISON_LT" - The left argument is less than the right
// argument.
// "COMPARISON_LE" - The left argument is less than or equal to the
// right argument.
// "COMPARISON_EQ" - The left argument is equal to the right argument.
// "COMPARISON_NE" - The left argument is not equal to the right
// argument.
Comparison string `json:"comparison,omitempty"`
// DenominatorAggregations: Specifies the alignment of data points in
// individual time series selected by denominatorFilter as well as how
// to combine the retrieved time series together (such as when
// aggregating multiple streams on each resource to a single stream for
// each resource or when aggregating streams across all members of a
// group of resources).When computing ratios, the aggregations and
// denominator_aggregations fields must use the same alignment period
// and produce time series that have the same periodicity and labels.
DenominatorAggregations []*Aggregation `json:"denominatorAggregations,omitempty"`
// DenominatorFilter: A filter that identifies a time series that should
// be used as the denominator of a ratio that will be compared with the
// threshold. If a denominator_filter is specified, the time series
// specified by the filter field will be used as the numerator.The
// filter must specify the metric type and optionally may contain
// restrictions on resource type, resource labels, and metric labels.
// This field may not exceed 2048 Unicode characters in length.
DenominatorFilter string `json:"denominatorFilter,omitempty"`
// Duration: The amount of time that a time series must violate the
// threshold to be considered failing. Currently, only values that are a
// multiple of a minute--e.g., 0, 60, 120, or 300 seconds--are
// supported. If an invalid value is given, an error will be returned.
// When choosing a duration, it is useful to keep in mind the frequency
// of the underlying time series data (which may also be affected by any
// alignments specified in the aggregations field); a good duration is
// long enough so that a single outlier does not generate spurious
// alerts, but short enough that unhealthy states are detected and
// alerted on quickly.
Duration string `json:"duration,omitempty"`
// Filter: A filter that identifies which time series should be compared
// with the threshold.The filter is similar to the one that is specified
// in the ListTimeSeries request (that call is useful to verify the time
// series that will be retrieved / processed) and must specify the
// metric type and optionally may contain restrictions on resource type,
// resource labels, and metric labels. This field may not exceed 2048
// Unicode characters in length.
Filter string `json:"filter,omitempty"`
// ThresholdValue: A value against which to compare the time series.
ThresholdValue float64 `json:"thresholdValue,omitempty"`
// Trigger: The number/percent of time series for which the comparison
// must hold in order for the condition to trigger. If unspecified, then
// the condition will trigger if the comparison is true for any of the
// time series that have been identified by filter and aggregations, or
// by the ratio, if denominator_filter and denominator_aggregations are
// specified.
Trigger *Trigger `json:"trigger,omitempty"`
// ForceSendFields is a list of field names (e.g. "Aggregations") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Aggregations") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MetricThreshold) MarshalJSON() ([]byte, error) {
type NoMethod MetricThreshold
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *MetricThreshold) UnmarshalJSON(data []byte) error {
type NoMethod MetricThreshold
var s1 struct {
ThresholdValue gensupport.JSONFloat64 `json:"thresholdValue"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.ThresholdValue = float64(s1.ThresholdValue)
return nil
}
// MonitoredResource: An object representing a resource that can be used
// for monitoring, logging, billing, or other purposes. Examples include
// virtual machine instances, databases, and storage devices such as
// disks. The type field identifies a MonitoredResourceDescriptor object
// that describes the resource's schema. Information in the labels field
// identifies the actual resource and its attributes according to the
// schema. For example, a particular Compute Engine VM instance could be
// represented by the following object, because the
// MonitoredResourceDescriptor for "gce_instance" has labels
// "instance_id" and "zone":
// { "type": "gce_instance",
// "labels": { "instance_id": "12345678901234",
// "zone": "us-central1-a" }}
//
type MonitoredResource struct {
// Labels: Required. Values for all of the labels listed in the
// associated monitored resource descriptor. For example, Compute Engine
// VM instances use the labels "project_id", "instance_id", and "zone".
Labels map[string]string `json:"labels,omitempty"`
// Type: Required. The monitored resource type. This field must match
// the type field of a MonitoredResourceDescriptor object. For example,
// the type of a Compute Engine VM instance is gce_instance. For a list
// of types, see Monitoring resource types and Logging resource types.
Type string `json:"type,omitempty"`
// ForceSendFields is a list of field names (e.g. "Labels") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Labels") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MonitoredResource) MarshalJSON() ([]byte, error) {
type NoMethod MonitoredResource
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MonitoredResourceDescriptor: An object that describes the schema of a
// MonitoredResource object using a type name and a set of labels. For
// example, the monitored resource descriptor for Google Compute Engine
// VM instances has a type of "gce_instance" and specifies the use of
// the labels "instance_id" and "zone" to identify particular VM
// instances.Different APIs can support different monitored resource
// types. APIs generally provide a list method that returns the
// monitored resource descriptors used by the API.
type MonitoredResourceDescriptor struct {
// Description: Optional. A detailed description of the monitored
// resource type that might be used in documentation.
Description string `json:"description,omitempty"`
// DisplayName: Optional. A concise name for the monitored resource type
// that might be displayed in user interfaces. It should be a Title
// Cased Noun Phrase, without any article or other determiners. For
// example, "Google Cloud SQL Database".
DisplayName string `json:"displayName,omitempty"`
// Labels: Required. A set of labels used to describe instances of this
// monitored resource type. For example, an individual Google Cloud SQL
// database is identified by values for the labels "database_id" and
// "zone".
Labels []*LabelDescriptor `json:"labels,omitempty"`
// LaunchStage: Optional. The launch stage of the monitored resource
// definition.
//
// Possible values:
// "LAUNCH_STAGE_UNSPECIFIED" - Do not use this default value.
// "EARLY_ACCESS" - Early Access features are limited to a closed
// group of testers. To use these features, you must sign up in advance
// and sign a Trusted Tester agreement (which includes confidentiality
// provisions). These features may be unstable, changed in
// backward-incompatible ways, and are not guaranteed to be released.
// "ALPHA" - Alpha is a limited availability test for releases before
// they are cleared for widespread use. By Alpha, all significant design
// issues are resolved and we are in the process of verifying
// functionality. Alpha customers need to apply for access, agree to
// applicable terms, and have their projects whitelisted. Alpha releases
// don’t have to be feature complete, no SLAs are provided, and there
// are no technical support obligations, but they will be far enough
// along that customers can actually use them in test environments or
// for limited-use tests -- just like they would in normal production
// cases.
// "BETA" - Beta is the point at which we are ready to open a release
// for any customer to use. There are no SLA or technical support
// obligations in a Beta release. Products will be complete from a
// feature perspective, but may have some open outstanding issues. Beta
// releases are suitable for limited production use cases.
// "GA" - GA features are open to all developers and are considered
// stable and fully qualified for production use.
// "DEPRECATED" - Deprecated features are scheduled to be shut down
// and removed. For more information, see the “Deprecation Policy”
// section of our Terms of Service (https://cloud.google.com/terms/) and
// the Google Cloud Platform Subject to the Deprecation Policy
// (https://cloud.google.com/terms/deprecation) documentation.
LaunchStage string `json:"launchStage,omitempty"`
// Name: Optional. The resource name of the monitored resource
// descriptor:
// "projects/{project_id}/monitoredResourceDescriptors/{type}" where
// {type} is the value of the type field in this object and {project_id}
// is a project ID that provides API-specific context for accessing the
// type. APIs that do not use project information can use the resource
// name format "monitoredResourceDescriptors/{type}".
Name string `json:"name,omitempty"`
// Type: Required. The monitored resource type. For example, the type
// "cloudsql_database" represents databases in Google Cloud SQL. The
// maximum length of this value is 256 characters.
Type string `json:"type,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MonitoredResourceDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod MonitoredResourceDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MonitoredResourceMetadata: Auxiliary metadata for a MonitoredResource
// object. MonitoredResource objects contain the minimum set of
// information to uniquely identify a monitored resource instance. There
// is some other useful auxiliary metadata. Monitoring and Logging use
// an ingestion pipeline to extract metadata for cloud resources of all
// types, and store the metadata in this message.
type MonitoredResourceMetadata struct {
// SystemLabels: Output only. Values for predefined system metadata
// labels. System labels are a kind of metadata extracted by Google,
// including "machine_image", "vpc", "subnet_id", "security_group",
// "name", etc. System label values can be only strings, Boolean values,
// or a list of strings. For example:
// { "name": "my-test-instance",
// "security_group": ["a", "b", "c"],
// "spot_instance": false }
//
SystemLabels googleapi.RawMessage `json:"systemLabels,omitempty"`
// UserLabels: Output only. A map of user-defined metadata labels.
UserLabels map[string]string `json:"userLabels,omitempty"`
// ForceSendFields is a list of field names (e.g. "SystemLabels") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "SystemLabels") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MonitoredResourceMetadata) MarshalJSON() ([]byte, error) {
type NoMethod MonitoredResourceMetadata
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// MutationRecord: Describes a change made to a configuration.
type MutationRecord struct {
// MutateTime: When the change occurred.
MutateTime string `json:"mutateTime,omitempty"`
// MutatedBy: The email address of the user making the change.
MutatedBy string `json:"mutatedBy,omitempty"`
// ForceSendFields is a list of field names (e.g. "MutateTime") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MutateTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *MutationRecord) MarshalJSON() ([]byte, error) {
type NoMethod MutationRecord
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// NotificationChannel: A NotificationChannel is a medium through which
// an alert is delivered when a policy violation is detected. Examples
// of channels include email, SMS, and third-party messaging
// applications. Fields containing sensitive information like
// authentication tokens or contact info are only partially populated on
// retrieval.
type NotificationChannel struct {
// Description: An optional human-readable description of this
// notification channel. This description may provide additional
// details, beyond the display name, for the channel. This may not
// exceed 1024 Unicode characters.
Description string `json:"description,omitempty"`
// DisplayName: An optional human-readable name for this notification
// channel. It is recommended that you specify a non-empty and unique
// name in order to make it easier to identify the channels in your
// project, though this is not enforced. The display name is limited to
// 512 Unicode characters.
DisplayName string `json:"displayName,omitempty"`
// Enabled: Whether notifications are forwarded to the described
// channel. This makes it possible to disable delivery of notifications
// to a particular channel without removing the channel from all
// alerting policies that reference the channel. This is a more
// convenient approach when the change is temporary and you want to
// receive notifications from the same set of alerting policies on the
// channel at some point in the future.
Enabled bool `json:"enabled,omitempty"`
// Labels: Configuration fields that define the channel and its
// behavior. The permissible and required labels are specified in the
// NotificationChannelDescriptor.labels of the
// NotificationChannelDescriptor corresponding to the type field.
Labels map[string]string `json:"labels,omitempty"`
// Name: The full REST resource name for this channel. The syntax
// is:
// projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]
// The [CHANNEL_ID] is automatically assigned by the server on creation.
Name string `json:"name,omitempty"`
// Type: The type of the notification channel. This field matches the
// value of the NotificationChannelDescriptor.type field.
Type string `json:"type,omitempty"`
// UserLabels: User-supplied key/value data that does not need to
// conform to the corresponding NotificationChannelDescriptor's schema,
// unlike the labels field. This field is intended to be used for
// organizing and identifying the NotificationChannel objects.The field
// can contain up to 64 entries. Each key and value is limited to 63
// Unicode characters or 128 bytes, whichever is smaller. Labels and
// values can contain only lowercase letters, numerals, underscores, and
// dashes. Keys must begin with a letter.
UserLabels map[string]string `json:"userLabels,omitempty"`
// VerificationStatus: Indicates whether this channel has been verified
// or not. On a ListNotificationChannels or GetNotificationChannel
// operation, this field is expected to be populated.If the value is
// UNVERIFIED, then it indicates that the channel is non-functioning (it
// both requires verification and lacks verification); otherwise, it is
// assumed that the channel works.If the channel is neither VERIFIED nor
// UNVERIFIED, it implies that the channel is of a type that does not
// require verification or that this specific channel has been exempted
// from verification because it was created prior to verification being
// required for channels of this type.This field cannot be modified
// using a standard UpdateNotificationChannel operation. To change the
// value of this field, you must call VerifyNotificationChannel.
//
// Possible values:
// "VERIFICATION_STATUS_UNSPECIFIED" - Sentinel value used to indicate
// that the state is unknown, omitted, or is not applicable (as in the
// case of channels that neither support nor require verification in
// order to function).
// "UNVERIFIED" - The channel has yet to be verified and requires
// verification to function. Note that this state also applies to the
// case where the verification process has been initiated by sending a
// verification code but where the verification code has not been
// submitted to complete the process.
// "VERIFIED" - It has been proven that notifications can be received
// on this notification channel and that someone on the project has
// access to messages that are delivered to that channel.
VerificationStatus string `json:"verificationStatus,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *NotificationChannel) MarshalJSON() ([]byte, error) {
type NoMethod NotificationChannel
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// NotificationChannelDescriptor: A description of a notification
// channel. The descriptor includes the properties of the channel and
// the set of labels or fields that must be specified to configure
// channels of a given type.
type NotificationChannelDescriptor struct {
// Description: A human-readable description of the notification channel
// type. The description may include a description of the properties of
// the channel and pointers to external documentation.
Description string `json:"description,omitempty"`
// DisplayName: A human-readable name for the notification channel type.
// This form of the name is suitable for a user interface.
DisplayName string `json:"displayName,omitempty"`
// Labels: The set of labels that must be defined to identify a
// particular channel of the corresponding type. Each label includes a
// description for how that field should be populated.
Labels []*LabelDescriptor `json:"labels,omitempty"`
// Name: The full REST resource name for this descriptor. The syntax
// is:
// projects/[PROJECT_ID]/notificationChannelDescriptors/[TYPE]
// In the above, [TYPE] is the value of the type field.
Name string `json:"name,omitempty"`
// SupportedTiers: The tiers that support this notification channel; the
// project service tier must be one of the supported_tiers.
//
// Possible values:
// "SERVICE_TIER_UNSPECIFIED" - An invalid sentinel value, used to
// indicate that a tier has not been provided explicitly.
// "SERVICE_TIER_BASIC" - A free tier of service that provided access
// to basic features.
// "SERVICE_TIER_PREMIUM" - A paid tier of service that provided
// access to all features.
SupportedTiers []string `json:"supportedTiers,omitempty"`
// Type: The type of notification channel, such as "email", "sms", etc.
// Notification channel types are globally unique.
Type string `json:"type,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *NotificationChannelDescriptor) MarshalJSON() ([]byte, error) {
type NoMethod NotificationChannelDescriptor
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Option: A protocol buffer option, which can be attached to a message,
// field, enumeration, etc.
type Option struct {
// Name: The option's name. For protobuf built-in options (options
// defined in descriptor.proto), this is the short name. For example,
// "map_entry". For custom options, it should be the fully-qualified
// name. For example, "google.api.http".
Name string `json:"name,omitempty"`
// Value: The option's value packed in an Any message. If the value is a
// primitive, the corresponding wrapper type defined in
// google/protobuf/wrappers.proto should be used. If the value is an
// enum, it should be stored as an int32 value using the
// google.protobuf.Int32Value type.
Value googleapi.RawMessage `json:"value,omitempty"`
// ForceSendFields is a list of field names (e.g. "Name") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Name") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Option) MarshalJSON() ([]byte, error) {
type NoMethod Option
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PerformanceThreshold: A PerformanceThreshold is used when each window
// is good when that window has a sufficiently high performance.
type PerformanceThreshold struct {
// BasicSliPerformance: BasicSli to evaluate to judge window quality.
BasicSliPerformance *BasicSli `json:"basicSliPerformance,omitempty"`
// Performance: RequestBasedSli to evaluate to judge window quality.
Performance *RequestBasedSli `json:"performance,omitempty"`
// Threshold: If window performance >= threshold, the window is counted
// as good.
Threshold float64 `json:"threshold,omitempty"`
// ForceSendFields is a list of field names (e.g. "BasicSliPerformance")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BasicSliPerformance") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *PerformanceThreshold) MarshalJSON() ([]byte, error) {
type NoMethod PerformanceThreshold
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *PerformanceThreshold) UnmarshalJSON(data []byte) error {
type NoMethod PerformanceThreshold
var s1 struct {
Threshold gensupport.JSONFloat64 `json:"threshold"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Threshold = float64(s1.Threshold)
return nil
}
// Point: A single data point in a time series.
type Point struct {
// Interval: The time interval to which the data point applies. For
// GAUGE metrics, the start time is optional, but if it is supplied, it
// must equal the end time. For DELTA metrics, the start and end time
// should specify a non-zero interval, with subsequent points specifying
// contiguous and non-overlapping intervals. For CUMULATIVE metrics, the
// start and end time should specify a non-zero interval, with
// subsequent points specifying the same start time and increasing end
// times, until an event resets the cumulative value to zero and sets a
// new start time for the following points.
Interval *TimeInterval `json:"interval,omitempty"`
// Value: The value of the data point.
Value *TypedValue `json:"value,omitempty"`
// ForceSendFields is a list of field names (e.g. "Interval") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Interval") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Point) MarshalJSON() ([]byte, error) {
type NoMethod Point
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Range: The range of the population values.
type Range struct {
// Max: The maximum of the population values.
Max float64 `json:"max,omitempty"`
// Min: The minimum of the population values.
Min float64 `json:"min,omitempty"`
// ForceSendFields is a list of field names (e.g. "Max") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Max") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Range) MarshalJSON() ([]byte, error) {
type NoMethod Range
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *Range) UnmarshalJSON(data []byte) error {
type NoMethod Range
var s1 struct {
Max gensupport.JSONFloat64 `json:"max"`
Min gensupport.JSONFloat64 `json:"min"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Max = float64(s1.Max)
s.Min = float64(s1.Min)
return nil
}
// RequestBasedSli: Service Level Indicators for which atomic units of
// service are counted directly.
type RequestBasedSli struct {
// DistributionCut: distribution_cut is used when good_service is a
// count of values aggregated in a Distribution that fall into a good
// range. The total_service is the total count of all values aggregated
// in the Distribution.
DistributionCut *DistributionCut `json:"distributionCut,omitempty"`
// GoodTotalRatio: good_total_ratio is used when the ratio of
// good_service to total_service is computed from two TimeSeries.
GoodTotalRatio *TimeSeriesRatio `json:"goodTotalRatio,omitempty"`
// ForceSendFields is a list of field names (e.g. "DistributionCut") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DistributionCut") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *RequestBasedSli) MarshalJSON() ([]byte, error) {
type NoMethod RequestBasedSli
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ResourceGroup: The resource submessage for group checks. It can be
// used instead of a monitored resource, when multiple resources are
// being monitored.
type ResourceGroup struct {
// GroupId: The group of resources being monitored. Should be only the
// [GROUP_ID], and not the full-path
// projects/[PROJECT_ID]/groups/[GROUP_ID].
GroupId string `json:"groupId,omitempty"`
// ResourceType: The resource type of the group members.
//
// Possible values:
// "RESOURCE_TYPE_UNSPECIFIED" - Default value (not valid).
// "INSTANCE" - A group of instances from Google Cloud Platform (GCP)
// or Amazon Web Services (AWS).
// "AWS_ELB_LOAD_BALANCER" - A group of Amazon ELB load balancers.
ResourceType string `json:"resourceType,omitempty"`
// ForceSendFields is a list of field names (e.g. "GroupId") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "GroupId") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ResourceGroup) MarshalJSON() ([]byte, error) {
type NoMethod ResourceGroup
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SendNotificationChannelVerificationCodeRequest: The
// SendNotificationChannelVerificationCode request.
type SendNotificationChannelVerificationCodeRequest struct {
}
// Service: A Service is a discrete, autonomous, and network-accessible
// unit, designed to solve an individual concern (Wikipedia
// (https://en.wikipedia.org/wiki/Service-orientation)). In Stackdriver
// Monitoring, a Service acts as the root resource under which
// operational aspects of the service are accessible.
type Service struct {
// AppEngine: Type used for App Engine services.
AppEngine *AppEngine `json:"appEngine,omitempty"`
// CloudEndpoints: Type used for Cloud Endpoints services.
CloudEndpoints *CloudEndpoints `json:"cloudEndpoints,omitempty"`
// ClusterIstio: Type used for Istio services that live in a Kubernetes
// cluster.
ClusterIstio *ClusterIstio `json:"clusterIstio,omitempty"`
// Custom: Custom service type.
Custom *Custom `json:"custom,omitempty"`
// DisplayName: Name used for UI elements listing this Service.
DisplayName string `json:"displayName,omitempty"`
// Name: Resource name for this Service. Of the form
// projects/{project_id}/services/{service_id}.
Name string `json:"name,omitempty"`
// Telemetry: Configuration for how to query telemetry on a Service.
Telemetry *Telemetry `json:"telemetry,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "AppEngine") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngine") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Service) MarshalJSON() ([]byte, error) {
type NoMethod Service
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ServiceLevelIndicator: A Service-Level Indicator (SLI) describes the
// "performance" of a service. For some services, the SLI is
// well-defined. In such cases, the SLI can be described easily by
// referencing the well-known SLI and providing the needed parameters.
// Alternatively, a "custom" SLI can be defined with a query to the
// underlying metric store. An SLI is defined to be good_service
// /
// total_service over any queried time interval. The value of
// performance always falls into the range 0 <= performance <= 1. A
// custom SLI describes how to compute this ratio, whether this is by
// dividing values from a pair of time series, cutting a Distribution
// into good and bad counts, or counting time windows in which the
// service complies with a criterion. For separation of concerns, a
// single Service-Level Indicator measures performance for only one
// aspect of service quality, such as fraction of successful queries or
// fast-enough queries.
type ServiceLevelIndicator struct {
// BasicSli: Basic SLI on a well-known service type.
BasicSli *BasicSli `json:"basicSli,omitempty"`
// RequestBased: Request-based SLIs
RequestBased *RequestBasedSli `json:"requestBased,omitempty"`
// WindowsBased: Windows-based SLIs
WindowsBased *WindowsBasedSli `json:"windowsBased,omitempty"`
// ForceSendFields is a list of field names (e.g. "BasicSli") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BasicSli") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ServiceLevelIndicator) MarshalJSON() ([]byte, error) {
type NoMethod ServiceLevelIndicator
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ServiceLevelObjective: A Service-Level Objective (SLO) describes a
// level of desired good service. It consists of a service-level
// indicator (SLI), a performance goal, and a period over which the
// objective is to be evaluated against that goal. The SLO can use SLIs
// defined in a number of different manners. Typical SLOs might include
// "99% of requests in each rolling week have latency below 200
// milliseconds" or "99.5% of requests in each calendar month return
// successfully."
type ServiceLevelObjective struct {
// CalendarPeriod: A calendar period, semantically "since the start of
// the current <calendar_period>". At this time, only DAY, WEEK,
// FORTNIGHT, and MONTH are supported.
//
// Possible values:
// "CALENDAR_PERIOD_UNSPECIFIED" - Undefined period, raises an error.
// "DAY" - A day.
// "WEEK" - A week. Weeks begin on Monday, following ISO 8601
// (https://en.wikipedia.org/wiki/ISO_week_date).
// "FORTNIGHT" - A fortnight. The first calendar fortnight of the year
// begins at the start of week 1 according to ISO 8601
// (https://en.wikipedia.org/wiki/ISO_week_date).
// "MONTH" - A month.
// "QUARTER" - A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul,
// and 1-Oct of each year.
// "HALF" - A half-year. Half-years start on dates 1-Jan and 1-Jul.
// "YEAR" - A year.
CalendarPeriod string `json:"calendarPeriod,omitempty"`
// DisplayName: Name used for UI elements listing this SLO.
DisplayName string `json:"displayName,omitempty"`
// Goal: The fraction of service that must be good in order for this
// objective to be met. 0 < goal <= 1.
Goal float64 `json:"goal,omitempty"`
// Name: Resource name for this ServiceLevelObjective. Of the form
// projects/{project_id}/services/{service_id}/serviceLevelObjectives/{sl
// o_name}.
Name string `json:"name,omitempty"`
// RollingPeriod: A rolling time period, semantically "in the past
// <rolling_period>". Must be an integer multiple of 1 day no larger
// than 30 days.
RollingPeriod string `json:"rollingPeriod,omitempty"`
// ServiceLevelIndicator: The definition of good service, used to
// measure and calculate the quality of the Service's performance with
// respect to a single aspect of service quality.
ServiceLevelIndicator *ServiceLevelIndicator `json:"serviceLevelIndicator,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "CalendarPeriod") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "CalendarPeriod") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *ServiceLevelObjective) MarshalJSON() ([]byte, error) {
type NoMethod ServiceLevelObjective
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *ServiceLevelObjective) UnmarshalJSON(data []byte) error {
type NoMethod ServiceLevelObjective
var s1 struct {
Goal gensupport.JSONFloat64 `json:"goal"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Goal = float64(s1.Goal)
return nil
}
// SourceContext: SourceContext represents information about the source
// of a protobuf element, like the file in which it is defined.
type SourceContext struct {
// FileName: The path-qualified name of the .proto file that contained
// the associated protobuf element. For example:
// "google/protobuf/source_context.proto".
FileName string `json:"fileName,omitempty"`
// ForceSendFields is a list of field names (e.g. "FileName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "FileName") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SourceContext) MarshalJSON() ([]byte, error) {
type NoMethod SourceContext
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SpanContext: The context of a span, attached to
// google.api.Distribution.Exemplars in google.api.Distribution values
// during aggregation.It contains the name of a span with format:
// projects/PROJECT_ID/traces/TRACE_ID/spans/SPAN_ID
type SpanContext struct {
// SpanName: The resource name of the span in the following
// format:
// projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]
// TRACE_
// ID is a unique identifier for a trace within a project; it is a
// 32-character hexadecimal encoding of a 16-byte array.SPAN_ID is a
// unique identifier for a span within a trace; it is a 16-character
// hexadecimal encoding of an 8-byte array.
SpanName string `json:"spanName,omitempty"`
// ForceSendFields is a list of field names (e.g. "SpanName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "SpanName") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SpanContext) MarshalJSON() ([]byte, error) {
type NoMethod SpanContext
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Status: The Status type defines a logical error model that is
// suitable for different programming environments, including REST APIs
// and RPC APIs. It is used by gRPC (https://github.com/grpc). Each
// Status message contains three pieces of data: error code, error
// message, and error details.You can find out more about this error
// model and how to work with it in the API Design Guide
// (https://cloud.google.com/apis/design/errors).
type Status struct {
// Code: The status code, which should be an enum value of
// google.rpc.Code.
Code int64 `json:"code,omitempty"`
// Details: A list of messages that carry the error details. There is a
// common set of message types for APIs to use.
Details []googleapi.RawMessage `json:"details,omitempty"`
// Message: A developer-facing error message, which should be in
// English. Any user-facing error message should be localized and sent
// in the google.rpc.Status.details field, or localized by the client.
Message string `json:"message,omitempty"`
// ForceSendFields is a list of field names (e.g. "Code") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Code") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Status) MarshalJSON() ([]byte, error) {
type NoMethod Status
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TcpCheck: Information required for a TCP Uptime check request.
type TcpCheck struct {
// Port: The TCP port on the server against which to run the check. Will
// be combined with host (specified within the monitored_resource) to
// construct the full URL. Required.
Port int64 `json:"port,omitempty"`
// ForceSendFields is a list of field names (e.g. "Port") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Port") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TcpCheck) MarshalJSON() ([]byte, error) {
type NoMethod TcpCheck
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Telemetry: Configuration for how to query telemetry on a Service.
type Telemetry struct {
// ResourceName: The full name of the resource that defines this
// service. Formatted as described in
// https://cloud.google.com/apis/design/resource_names.
ResourceName string `json:"resourceName,omitempty"`
// ForceSendFields is a list of field names (e.g. "ResourceName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ResourceName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Telemetry) MarshalJSON() ([]byte, error) {
type NoMethod Telemetry
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TimeInterval: A closed time interval. It extends from the start time
// to the end time, and includes both: [startTime, endTime]. Valid time
// intervals depend on the MetricKind of the metric value. In no case
// can the end time be earlier than the start time.
// For a GAUGE metric, the startTime value is technically optional; if
// no value is specified, the start time defaults to the value of the
// end time, and the interval represents a single point in time. If both
// start and end times are specified, they must be identical. Such an
// interval is valid only for GAUGE metrics, which are point-in-time
// measurements.
// For DELTA and CUMULATIVE metrics, the start time must be earlier
// than the end time.
// In all cases, the start time of the next interval must be at least a
// microsecond after the end time of the previous interval. Because the
// interval is closed, if the start time of a new interval is the same
// as the end time of the previous interval, data written at the new
// start time could overwrite data written at the previous end time.
type TimeInterval struct {
// EndTime: Required. The end of the time interval.
EndTime string `json:"endTime,omitempty"`
// StartTime: Optional. The beginning of the time interval. The default
// value for the start time is the end time. The start time must not be
// later than the end time.
StartTime string `json:"startTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "EndTime") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "EndTime") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TimeInterval) MarshalJSON() ([]byte, error) {
type NoMethod TimeInterval
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TimeSeries: A collection of data points that describes the
// time-varying values of a metric. A time series is identified by a
// combination of a fully-specified monitored resource and a
// fully-specified metric. This type is used for both listing and
// creating time series.
type TimeSeries struct {
// Metadata: Output only. The associated monitored resource metadata.
// When reading a a timeseries, this field will include metadata labels
// that are explicitly named in the reduction. When creating a
// timeseries, this field is ignored.
Metadata *MonitoredResourceMetadata `json:"metadata,omitempty"`
// Metric: The associated metric. A fully-specified metric used to
// identify the time series.
Metric *Metric `json:"metric,omitempty"`
// MetricKind: The metric kind of the time series. When listing time
// series, this metric kind might be different from the metric kind of
// the associated metric if this time series is an alignment or
// reduction of other time series.When creating a time series, this
// field is optional. If present, it must be the same as the metric kind
// of the associated metric. If the associated metric's descriptor must
// be auto-created, then this field specifies the metric kind of the new
// descriptor and must be either GAUGE (the default) or CUMULATIVE.
//
// Possible values:
// "METRIC_KIND_UNSPECIFIED" - Do not use this default value.
// "GAUGE" - An instantaneous measurement of a value.
// "DELTA" - The change in a value during a time interval.
// "CUMULATIVE" - A value accumulated over a time interval. Cumulative
// measurements in a time series should have the same start time and
// increasing end times, until an event resets the cumulative value to
// zero and sets a new start time for the following points.
MetricKind string `json:"metricKind,omitempty"`
// Points: The data points of this time series. When listing time
// series, points are returned in reverse time order.When creating a
// time series, this field must contain exactly one point and the
// point's type must be the same as the value type of the associated
// metric. If the associated metric's descriptor must be auto-created,
// then the value type of the descriptor is determined by the point's
// type, which must be BOOL, INT64, DOUBLE, or DISTRIBUTION.
Points []*Point `json:"points,omitempty"`
// Resource: The associated monitored resource. Custom metrics can use
// only certain monitored resource types in their time series data.
Resource *MonitoredResource `json:"resource,omitempty"`
// ValueType: The value type of the time series. When listing time
// series, this value type might be different from the value type of the
// associated metric if this time series is an alignment or reduction of
// other time series.When creating a time series, this field is
// optional. If present, it must be the same as the type of the data in
// the points field.
//
// Possible values:
// "VALUE_TYPE_UNSPECIFIED" - Do not use this default value.
// "BOOL" - The value is a boolean. This value type can be used only
// if the metric kind is GAUGE.
// "INT64" - The value is a signed 64-bit integer.
// "DOUBLE" - The value is a double precision floating point number.
// "STRING" - The value is a text string. This value type can be used
// only if the metric kind is GAUGE.
// "DISTRIBUTION" - The value is a Distribution.
// "MONEY" - The value is money.
ValueType string `json:"valueType,omitempty"`
// ForceSendFields is a list of field names (e.g. "Metadata") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Metadata") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TimeSeries) MarshalJSON() ([]byte, error) {
type NoMethod TimeSeries
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TimeSeriesRatio: A TimeSeriesRatio specifies two TimeSeries to use
// for computing the good_service / total_service ratio. The specified
// TimeSeries must have ValueType = DOUBLE or ValueType = INT64 and must
// have MetricKind =
// DELTA or MetricKind = CUMULATIVE. The TimeSeriesRatio must specify
// exactly two of good, bad, and total, and the relationship
// good_service +
// bad_service = total_service will be assumed.
type TimeSeriesRatio struct {
// BadServiceFilter: A monitoring filter
// (https://cloud.google.com/monitoring/api/v3/filters) specifying a
// TimeSeries quantifying bad service, either demanded service that was
// not provided or demanded service that was of inadequate quality. Must
// have ValueType = DOUBLE or ValueType = INT64 and must have MetricKind
// = DELTA or MetricKind = CUMULATIVE.
BadServiceFilter string `json:"badServiceFilter,omitempty"`
// GoodServiceFilter: A monitoring filter
// (https://cloud.google.com/monitoring/api/v3/filters) specifying a
// TimeSeries quantifying good service provided. Must have ValueType =
// DOUBLE or ValueType = INT64 and must have MetricKind =
// DELTA or MetricKind = CUMULATIVE.
GoodServiceFilter string `json:"goodServiceFilter,omitempty"`
// TotalServiceFilter: A monitoring filter
// (https://cloud.google.com/monitoring/api/v3/filters) specifying a
// TimeSeries quantifying total demanded service. Must have ValueType =
// DOUBLE or ValueType = INT64 and must have MetricKind =
// DELTA or MetricKind = CUMULATIVE.
TotalServiceFilter string `json:"totalServiceFilter,omitempty"`
// ForceSendFields is a list of field names (e.g. "BadServiceFilter") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BadServiceFilter") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *TimeSeriesRatio) MarshalJSON() ([]byte, error) {
type NoMethod TimeSeriesRatio
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Trigger: Specifies how many time series must fail a predicate to
// trigger a condition. If not specified, then a {count: 1} trigger is
// used.
type Trigger struct {
// Count: The absolute number of time series that must fail the
// predicate for the condition to be triggered.
Count int64 `json:"count,omitempty"`
// Percent: The percentage of time series that must fail the predicate
// for the condition to be triggered.
Percent float64 `json:"percent,omitempty"`
// ForceSendFields is a list of field names (e.g. "Count") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Count") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Trigger) MarshalJSON() ([]byte, error) {
type NoMethod Trigger
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *Trigger) UnmarshalJSON(data []byte) error {
type NoMethod Trigger
var s1 struct {
Percent gensupport.JSONFloat64 `json:"percent"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.Percent = float64(s1.Percent)
return nil
}
// Type: A protocol buffer message type.
type Type struct {
// Fields: The list of fields.
Fields []*Field `json:"fields,omitempty"`
// Name: The fully qualified message name.
Name string `json:"name,omitempty"`
// Oneofs: The list of types appearing in oneof definitions in this
// type.
Oneofs []string `json:"oneofs,omitempty"`
// Options: The protocol buffer options.
Options []*Option `json:"options,omitempty"`
// SourceContext: The source context.
SourceContext *SourceContext `json:"sourceContext,omitempty"`
// Syntax: The source syntax.
//
// Possible values:
// "SYNTAX_PROTO2" - Syntax proto2.
// "SYNTAX_PROTO3" - Syntax proto3.
Syntax string `json:"syntax,omitempty"`
// ForceSendFields is a list of field names (e.g. "Fields") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Fields") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Type) MarshalJSON() ([]byte, error) {
type NoMethod Type
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TypedValue: A single strongly-typed value.
type TypedValue struct {
// BoolValue: A Boolean value: true or false.
BoolValue *bool `json:"boolValue,omitempty"`
// DistributionValue: A distribution value.
DistributionValue *Distribution `json:"distributionValue,omitempty"`
// DoubleValue: A 64-bit double-precision floating-point number. Its
// magnitude is approximately ±10<sup>±300</sup> and it
// has 16 significant digits of precision.
DoubleValue *float64 `json:"doubleValue,omitempty"`
// Int64Value: A 64-bit integer. Its range is approximately
// ±9.2x10<sup>18</sup>.
Int64Value *int64 `json:"int64Value,omitempty,string"`
// StringValue: A variable-length string value.
StringValue *string `json:"stringValue,omitempty"`
// ForceSendFields is a list of field names (e.g. "BoolValue") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "BoolValue") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TypedValue) MarshalJSON() ([]byte, error) {
type NoMethod TypedValue
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *TypedValue) UnmarshalJSON(data []byte) error {
type NoMethod TypedValue
var s1 struct {
DoubleValue *gensupport.JSONFloat64 `json:"doubleValue"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
if s1.DoubleValue != nil {
s.DoubleValue = (*float64)(s1.DoubleValue)
}
return nil
}
// UptimeCheckConfig: This message configures which resources and
// services to monitor for availability.
type UptimeCheckConfig struct {
// ContentMatchers: The content that is expected to appear in the data
// returned by the target server against which the check is run.
// Currently, only the first entry in the content_matchers list is
// supported, and additional entries will be ignored. This field is
// optional and should only be specified if a content match is required
// as part of the/ Uptime check.
ContentMatchers []*ContentMatcher `json:"contentMatchers,omitempty"`
// DisplayName: A human-friendly name for the Uptime check
// configuration. The display name should be unique within a Stackdriver
// Workspace in order to make it easier to identify; however, uniqueness
// is not enforced. Required.
DisplayName string `json:"displayName,omitempty"`
// HttpCheck: Contains information needed to make an HTTP or HTTPS
// check.
HttpCheck *HttpCheck `json:"httpCheck,omitempty"`
// InternalCheckers: The internal checkers that this check will egress
// from. If is_internal is true and this list is empty, the check will
// egress from all the InternalCheckers configured for the project that
// owns this UptimeCheckConfig.
InternalCheckers []*InternalChecker `json:"internalCheckers,omitempty"`
// IsInternal: If this is true, then checks are made only from the
// 'internal_checkers'. If it is false, then checks are made only from
// the 'selected_regions'. It is an error to provide 'selected_regions'
// when is_internal is true, or to provide 'internal_checkers' when
// is_internal is false.
IsInternal bool `json:"isInternal,omitempty"`
// MonitoredResource: The monitored resource
// (https://cloud.google.com/monitoring/api/resources) associated with
// the configuration. The following monitored resource types are
// supported for Uptime checks: uptime_url, gce_instance, gae_app,
// aws_ec2_instance, aws_elb_load_balancer
MonitoredResource *MonitoredResource `json:"monitoredResource,omitempty"`
// Name: A unique resource name for this Uptime check configuration. The
// format
// is:projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].This
// field should be omitted when creating the Uptime check configuration;
// on create, the resource name is assigned by the server and included
// in the response.
Name string `json:"name,omitempty"`
// Period: How often, in seconds, the Uptime check is performed.
// Currently, the only supported values are 60s (1 minute), 300s (5
// minutes), 600s (10 minutes), and 900s (15 minutes). Optional,
// defaults to 60s.
Period string `json:"period,omitempty"`
// ResourceGroup: The group resource associated with the configuration.
ResourceGroup *ResourceGroup `json:"resourceGroup,omitempty"`
// SelectedRegions: The list of regions from which the check will be
// run. Some regions contain one location, and others contain more than
// one. If this field is specified, enough regions must be provided to
// include a minimum of 3 locations. Not specifying this field will
// result in Uptime checks running from all available regions.
//
// Possible values:
// "REGION_UNSPECIFIED" - Default value if no region is specified.
// Will result in Uptime checks running from all regions.
// "USA" - Allows checks to run from locations within the United
// States of America.
// "EUROPE" - Allows checks to run from locations within the continent
// of Europe.
// "SOUTH_AMERICA" - Allows checks to run from locations within the
// continent of South America.
// "ASIA_PACIFIC" - Allows checks to run from locations within the
// Asia Pacific area (ex: Singapore).
SelectedRegions []string `json:"selectedRegions,omitempty"`
// TcpCheck: Contains information needed to make a TCP check.
TcpCheck *TcpCheck `json:"tcpCheck,omitempty"`
// Timeout: The maximum amount of time to wait for the request to
// complete (must be between 1 and 60 seconds). Required.
Timeout string `json:"timeout,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "ContentMatchers") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ContentMatchers") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *UptimeCheckConfig) MarshalJSON() ([]byte, error) {
type NoMethod UptimeCheckConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// UptimeCheckIp: Contains the region, location, and list of IP
// addresses where checkers in the location run from.
type UptimeCheckIp struct {
// IpAddress: The IP address from which the Uptime check originates.
// This is a fully specified IP address (not an IP address range). Most
// IP addresses, as of this publication, are in IPv4 format; however,
// one should not rely on the IP addresses being in IPv4 format
// indefinitely, and should support interpreting this field in either
// IPv4 or IPv6 format.
IpAddress string `json:"ipAddress,omitempty"`
// Location: A more specific location within the region that typically
// encodes a particular city/town/metro (and its containing
// state/province or country) within the broader umbrella region
// category.
Location string `json:"location,omitempty"`
// Region: A broad region category in which the IP address is located.
//
// Possible values:
// "REGION_UNSPECIFIED" - Default value if no region is specified.
// Will result in Uptime checks running from all regions.
// "USA" - Allows checks to run from locations within the United
// States of America.
// "EUROPE" - Allows checks to run from locations within the continent
// of Europe.
// "SOUTH_AMERICA" - Allows checks to run from locations within the
// continent of South America.
// "ASIA_PACIFIC" - Allows checks to run from locations within the
// Asia Pacific area (ex: Singapore).
Region string `json:"region,omitempty"`
// ForceSendFields is a list of field names (e.g. "IpAddress") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "IpAddress") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *UptimeCheckIp) MarshalJSON() ([]byte, error) {
type NoMethod UptimeCheckIp
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// VerifyNotificationChannelRequest: The VerifyNotificationChannel
// request.
type VerifyNotificationChannelRequest struct {
// Code: The verification code that was delivered to the channel as a
// result of invoking the SendNotificationChannelVerificationCode API
// method or that was retrieved from a verified channel via
// GetNotificationChannelVerificationCode. For example, one might have
// "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only
// guaranteed that the code is valid UTF-8; one should not make any
// assumptions regarding the structure or format of the code).
Code string `json:"code,omitempty"`
// ForceSendFields is a list of field names (e.g. "Code") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Code") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *VerifyNotificationChannelRequest) MarshalJSON() ([]byte, error) {
type NoMethod VerifyNotificationChannelRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// WindowsBasedSli: A WindowsBasedSli defines good_service as the count
// of time windows for which the provided service was of good quality.
// Criteria for determining if service was good are embedded in the
// window_criterion.
type WindowsBasedSli struct {
// GoodBadMetricFilter: A monitoring filter
// (https://cloud.google.com/monitoring/api/v3/filters) specifying a
// TimeSeries with ValueType = BOOL. The window is good if any true
// values appear in the window.
GoodBadMetricFilter string `json:"goodBadMetricFilter,omitempty"`
// GoodTotalRatioThreshold: A window is good if its performance is high
// enough.
GoodTotalRatioThreshold *PerformanceThreshold `json:"goodTotalRatioThreshold,omitempty"`
// MetricMeanInRange: A window is good if the metric's value is in a
// good range, averaged across returned streams.
MetricMeanInRange *MetricRange `json:"metricMeanInRange,omitempty"`
// MetricSumInRange: A window is good if the metric's value is in a good
// range, summed across returned streams.
MetricSumInRange *MetricRange `json:"metricSumInRange,omitempty"`
// WindowPeriod: Duration over which window quality is evaluated. Must
// be an integer fraction of a day and at least 60s.
WindowPeriod string `json:"windowPeriod,omitempty"`
// ForceSendFields is a list of field names (e.g. "GoodBadMetricFilter")
// to unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "GoodBadMetricFilter") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *WindowsBasedSli) MarshalJSON() ([]byte, error) {
type NoMethod WindowsBasedSli
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "monitoring.projects.alertPolicies.create":
type ProjectsAlertPoliciesCreateCall struct {
s *APIService
name string
alertpolicy *AlertPolicy
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new alerting policy.
func (r *ProjectsAlertPoliciesService) Create(name string, alertpolicy *AlertPolicy) *ProjectsAlertPoliciesCreateCall {
c := &ProjectsAlertPoliciesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.alertpolicy = alertpolicy
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsAlertPoliciesCreateCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsAlertPoliciesCreateCall) Context(ctx context.Context) *ProjectsAlertPoliciesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsAlertPoliciesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsAlertPoliciesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.alertpolicy)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/alertPolicies")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.alertPolicies.create" call.
// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *AlertPolicy.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsAlertPoliciesCreateCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &AlertPolicy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new alerting policy.",
// "flatPath": "v3/projects/{projectsId}/alertPolicies",
// "httpMethod": "POST",
// "id": "monitoring.projects.alertPolicies.create",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The project in which to create the alerting policy. The format is projects/[PROJECT_ID].Note that this field names the parent container in which the alerting policy will be written, not the name of the created policy. The alerting policy that is returned will have a name that contains a normalized representation of this name as a prefix but adds a suffix of the form /alertPolicies/[POLICY_ID], identifying the policy in the container.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}/alertPolicies",
// "request": {
// "$ref": "AlertPolicy"
// },
// "response": {
// "$ref": "AlertPolicy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.alertPolicies.delete":
type ProjectsAlertPoliciesDeleteCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes an alerting policy.
func (r *ProjectsAlertPoliciesService) Delete(name string) *ProjectsAlertPoliciesDeleteCall {
c := &ProjectsAlertPoliciesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsAlertPoliciesDeleteCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsAlertPoliciesDeleteCall) Context(ctx context.Context) *ProjectsAlertPoliciesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsAlertPoliciesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsAlertPoliciesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.alertPolicies.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsAlertPoliciesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes an alerting policy.",
// "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}",
// "httpMethod": "DELETE",
// "id": "monitoring.projects.alertPolicies.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The alerting policy to delete. The format is:\nprojects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]\nFor more information, see AlertPolicy.",
// "location": "path",
// "pattern": "^projects/[^/]+/alertPolicies/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.alertPolicies.get":
type ProjectsAlertPoliciesGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a single alerting policy.
func (r *ProjectsAlertPoliciesService) Get(name string) *ProjectsAlertPoliciesGetCall {
c := &ProjectsAlertPoliciesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsAlertPoliciesGetCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsAlertPoliciesGetCall) IfNoneMatch(entityTag string) *ProjectsAlertPoliciesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsAlertPoliciesGetCall) Context(ctx context.Context) *ProjectsAlertPoliciesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsAlertPoliciesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsAlertPoliciesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.alertPolicies.get" call.
// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *AlertPolicy.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsAlertPoliciesGetCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &AlertPolicy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a single alerting policy.",
// "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}",
// "httpMethod": "GET",
// "id": "monitoring.projects.alertPolicies.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The alerting policy to retrieve. The format is\nprojects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]\n",
// "location": "path",
// "pattern": "^projects/[^/]+/alertPolicies/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "AlertPolicy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// method id "monitoring.projects.alertPolicies.list":
type ProjectsAlertPoliciesListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the existing alerting policies for the project.
func (r *ProjectsAlertPoliciesService) List(name string) *ProjectsAlertPoliciesListCall {
c := &ProjectsAlertPoliciesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": If provided, this field
// specifies the criteria that must be met by alert policies to be
// included in the response.For more details, see sorting and filtering.
func (c *ProjectsAlertPoliciesListCall) Filter(filter string) *ProjectsAlertPoliciesListCall {
c.urlParams_.Set("filter", filter)
return c
}
// OrderBy sets the optional parameter "orderBy": A comma-separated list
// of fields by which to sort the result. Supports the same set of field
// references as the filter field. Entries can be prefixed with a minus
// sign to sort by the field in descending order.For more details, see
// sorting and filtering.
func (c *ProjectsAlertPoliciesListCall) OrderBy(orderBy string) *ProjectsAlertPoliciesListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return in a single response.
func (c *ProjectsAlertPoliciesListCall) PageSize(pageSize int64) *ProjectsAlertPoliciesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return more results from the previous method call.
func (c *ProjectsAlertPoliciesListCall) PageToken(pageToken string) *ProjectsAlertPoliciesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsAlertPoliciesListCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsAlertPoliciesListCall) IfNoneMatch(entityTag string) *ProjectsAlertPoliciesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsAlertPoliciesListCall) Context(ctx context.Context) *ProjectsAlertPoliciesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsAlertPoliciesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsAlertPoliciesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/alertPolicies")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.alertPolicies.list" call.
// Exactly one of *ListAlertPoliciesResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *ListAlertPoliciesResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsAlertPoliciesListCall) Do(opts ...googleapi.CallOption) (*ListAlertPoliciesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListAlertPoliciesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the existing alerting policies for the project.",
// "flatPath": "v3/projects/{projectsId}/alertPolicies",
// "httpMethod": "GET",
// "id": "monitoring.projects.alertPolicies.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "If provided, this field specifies the criteria that must be met by alert policies to be included in the response.For more details, see sorting and filtering.",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The project whose alert policies are to be listed. The format is\nprojects/[PROJECT_ID]\nNote that this field names the parent container in which the alerting policies to be listed are stored. To retrieve a single alerting policy by name, use the GetAlertPolicy operation, instead.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "orderBy": {
// "description": "A comma-separated list of fields by which to sort the result. Supports the same set of field references as the filter field. Entries can be prefixed with a minus sign to sort by the field in descending order.For more details, see sorting and filtering.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "The maximum number of results to return in a single response.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/alertPolicies",
// "response": {
// "$ref": "ListAlertPoliciesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsAlertPoliciesListCall) Pages(ctx context.Context, f func(*ListAlertPoliciesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.alertPolicies.patch":
type ProjectsAlertPoliciesPatchCall struct {
s *APIService
name string
alertpolicy *AlertPolicy
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates an alerting policy. You can either replace the entire
// policy with a new one or replace only certain fields in the current
// alerting policy by specifying the fields to be updated via
// updateMask. Returns the updated alerting policy.
func (r *ProjectsAlertPoliciesService) Patch(name string, alertpolicy *AlertPolicy) *ProjectsAlertPoliciesPatchCall {
c := &ProjectsAlertPoliciesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.alertpolicy = alertpolicy
return c
}
// UpdateMask sets the optional parameter "updateMask": A list of
// alerting policy field names. If this field is not empty, each listed
// field in the existing alerting policy is set to the value of the
// corresponding field in the supplied policy (alert_policy), or to the
// field's default value if the field is not in the supplied alerting
// policy. Fields not listed retain their previous value.Examples of
// valid field masks include display_name, documentation,
// documentation.content, documentation.mime_type, user_labels,
// user_label.nameofkey, enabled, conditions, combiner, etc.If this
// field is empty, then the supplied alerting policy replaces the
// existing policy. It is the same as deleting the existing policy and
// adding the supplied policy, except for the following:
// The new policy will have the same [ALERT_POLICY_ID] as the former
// policy. This gives you continuity with the former policy in your
// notifications and incidents.
// Conditions in the new policy will keep their former [CONDITION_ID] if
// the supplied condition includes the name field with that
// [CONDITION_ID]. If the supplied condition omits the name field, then
// a new [CONDITION_ID] is created.
func (c *ProjectsAlertPoliciesPatchCall) UpdateMask(updateMask string) *ProjectsAlertPoliciesPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsAlertPoliciesPatchCall) Fields(s ...googleapi.Field) *ProjectsAlertPoliciesPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsAlertPoliciesPatchCall) Context(ctx context.Context) *ProjectsAlertPoliciesPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsAlertPoliciesPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsAlertPoliciesPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.alertpolicy)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.alertPolicies.patch" call.
// Exactly one of *AlertPolicy or error will be non-nil. Any non-2xx
// status code is an error. Response headers are in either
// *AlertPolicy.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsAlertPoliciesPatchCall) Do(opts ...googleapi.CallOption) (*AlertPolicy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &AlertPolicy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates an alerting policy. You can either replace the entire policy with a new one or replace only certain fields in the current alerting policy by specifying the fields to be updated via updateMask. Returns the updated alerting policy.",
// "flatPath": "v3/projects/{projectsId}/alertPolicies/{alertPoliciesId}",
// "httpMethod": "PATCH",
// "id": "monitoring.projects.alertPolicies.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required if the policy exists. The resource name for this policy. The syntax is:\nprojects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]\n[ALERT_POLICY_ID] is assigned by Stackdriver Monitoring when the policy is created. When calling the alertPolicies.create method, do not include the name field in the alerting policy passed as part of the request.",
// "location": "path",
// "pattern": "^projects/[^/]+/alertPolicies/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "Optional. A list of alerting policy field names. If this field is not empty, each listed field in the existing alerting policy is set to the value of the corresponding field in the supplied policy (alert_policy), or to the field's default value if the field is not in the supplied alerting policy. Fields not listed retain their previous value.Examples of valid field masks include display_name, documentation, documentation.content, documentation.mime_type, user_labels, user_label.nameofkey, enabled, conditions, combiner, etc.If this field is empty, then the supplied alerting policy replaces the existing policy. It is the same as deleting the existing policy and adding the supplied policy, except for the following:\nThe new policy will have the same [ALERT_POLICY_ID] as the former policy. This gives you continuity with the former policy in your notifications and incidents.\nConditions in the new policy will keep their former [CONDITION_ID] if the supplied condition includes the name field with that [CONDITION_ID]. If the supplied condition omits the name field, then a new [CONDITION_ID] is created.",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "request": {
// "$ref": "AlertPolicy"
// },
// "response": {
// "$ref": "AlertPolicy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.collectdTimeSeries.create":
type ProjectsCollectdTimeSeriesCreateCall struct {
s *APIService
name string
createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Stackdriver Monitoring Agent only: Creates a new time
// series.<aside class="caution">This method is only for use by the
// Stackdriver Monitoring Agent. Use projects.timeSeries.create
// instead.</aside>
func (r *ProjectsCollectdTimeSeriesService) Create(name string, createcollectdtimeseriesrequest *CreateCollectdTimeSeriesRequest) *ProjectsCollectdTimeSeriesCreateCall {
c := &ProjectsCollectdTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.createcollectdtimeseriesrequest = createcollectdtimeseriesrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsCollectdTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsCollectdTimeSeriesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsCollectdTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsCollectdTimeSeriesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsCollectdTimeSeriesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsCollectdTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createcollectdtimeseriesrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/collectdTimeSeries")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.collectdTimeSeries.create" call.
// Exactly one of *CreateCollectdTimeSeriesResponse or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *CreateCollectdTimeSeriesResponse.ServerResponse.Header or (if
// a response was returned at all) in error.(*googleapi.Error).Header.
// Use googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsCollectdTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*CreateCollectdTimeSeriesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &CreateCollectdTimeSeriesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Stackdriver Monitoring Agent only: Creates a new time series.\u003caside class=\"caution\"\u003eThis method is only for use by the Stackdriver Monitoring Agent. Use projects.timeSeries.create instead.\u003c/aside\u003e",
// "flatPath": "v3/projects/{projectsId}/collectdTimeSeries",
// "httpMethod": "POST",
// "id": "monitoring.projects.collectdTimeSeries.create",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The project in which to create the time series. The format is \"projects/PROJECT_ID_OR_NUMBER\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}/collectdTimeSeries",
// "request": {
// "$ref": "CreateCollectdTimeSeriesRequest"
// },
// "response": {
// "$ref": "CreateCollectdTimeSeriesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.write"
// ]
// }
}
// method id "monitoring.projects.groups.create":
type ProjectsGroupsCreateCall struct {
s *APIService
name string
group *Group
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new group.
func (r *ProjectsGroupsService) Create(name string, group *Group) *ProjectsGroupsCreateCall {
c := &ProjectsGroupsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.group = group
return c
}
// ValidateOnly sets the optional parameter "validateOnly": If true,
// validate this request but do not create the group.
func (c *ProjectsGroupsCreateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsCreateCall {
c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsGroupsCreateCall) Fields(s ...googleapi.Field) *ProjectsGroupsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsGroupsCreateCall) Context(ctx context.Context) *ProjectsGroupsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsGroupsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsGroupsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.group)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.groups.create" call.
// Exactly one of *Group or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Group.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsGroupsCreateCall) Do(opts ...googleapi.CallOption) (*Group, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Group{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new group.",
// "flatPath": "v3/projects/{projectsId}/groups",
// "httpMethod": "POST",
// "id": "monitoring.projects.groups.create",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The project in which to create the group. The format is \"projects/{project_id_or_number}\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "validateOnly": {
// "description": "If true, validate this request but do not create the group.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v3/{+name}/groups",
// "request": {
// "$ref": "Group"
// },
// "response": {
// "$ref": "Group"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.groups.delete":
type ProjectsGroupsDeleteCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes an existing group.
func (r *ProjectsGroupsService) Delete(name string) *ProjectsGroupsDeleteCall {
c := &ProjectsGroupsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Recursive sets the optional parameter "recursive": If this field is
// true, then the request means to delete a group with all its
// descendants. Otherwise, the request means to delete a group only when
// it has no descendants. The default value is false.
func (c *ProjectsGroupsDeleteCall) Recursive(recursive bool) *ProjectsGroupsDeleteCall {
c.urlParams_.Set("recursive", fmt.Sprint(recursive))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsGroupsDeleteCall) Fields(s ...googleapi.Field) *ProjectsGroupsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsGroupsDeleteCall) Context(ctx context.Context) *ProjectsGroupsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsGroupsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsGroupsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.groups.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes an existing group.",
// "flatPath": "v3/projects/{projectsId}/groups/{groupsId}",
// "httpMethod": "DELETE",
// "id": "monitoring.projects.groups.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The group to delete. The format is \"projects/{project_id_or_number}/groups/{group_id}\".",
// "location": "path",
// "pattern": "^projects/[^/]+/groups/[^/]+$",
// "required": true,
// "type": "string"
// },
// "recursive": {
// "description": "If this field is true, then the request means to delete a group with all its descendants. Otherwise, the request means to delete a group only when it has no descendants. The default value is false.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.groups.get":
type ProjectsGroupsGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a single group.
func (r *ProjectsGroupsService) Get(name string) *ProjectsGroupsGetCall {
c := &ProjectsGroupsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsGroupsGetCall) Fields(s ...googleapi.Field) *ProjectsGroupsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsGroupsGetCall) IfNoneMatch(entityTag string) *ProjectsGroupsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsGroupsGetCall) Context(ctx context.Context) *ProjectsGroupsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsGroupsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsGroupsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.groups.get" call.
// Exactly one of *Group or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Group.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsGroupsGetCall) Do(opts ...googleapi.CallOption) (*Group, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Group{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a single group.",
// "flatPath": "v3/projects/{projectsId}/groups/{groupsId}",
// "httpMethod": "GET",
// "id": "monitoring.projects.groups.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The group to retrieve. The format is \"projects/{project_id_or_number}/groups/{group_id}\".",
// "location": "path",
// "pattern": "^projects/[^/]+/groups/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Group"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// method id "monitoring.projects.groups.list":
type ProjectsGroupsListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the existing groups.
func (r *ProjectsGroupsService) List(name string) *ProjectsGroupsListCall {
c := &ProjectsGroupsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// AncestorsOfGroup sets the optional parameter "ancestorsOfGroup": A
// group name: "projects/{project_id_or_number}/groups/{group_id}".
// Returns groups that are ancestors of the specified group. The groups
// are returned in order, starting with the immediate parent and ending
// with the most distant ancestor. If the specified group has no
// immediate parent, the results are empty.
func (c *ProjectsGroupsListCall) AncestorsOfGroup(ancestorsOfGroup string) *ProjectsGroupsListCall {
c.urlParams_.Set("ancestorsOfGroup", ancestorsOfGroup)
return c
}
// ChildrenOfGroup sets the optional parameter "childrenOfGroup": A
// group name: "projects/{project_id_or_number}/groups/{group_id}".
// Returns groups whose parentName field contains the group name. If no
// groups have this parent, the results are empty.
func (c *ProjectsGroupsListCall) ChildrenOfGroup(childrenOfGroup string) *ProjectsGroupsListCall {
c.urlParams_.Set("childrenOfGroup", childrenOfGroup)
return c
}
// DescendantsOfGroup sets the optional parameter "descendantsOfGroup":
// A group name: "projects/{project_id_or_number}/groups/{group_id}".
// Returns the descendants of the specified group. This is a superset of
// the results returned by the childrenOfGroup filter, and includes
// children-of-children, and so forth.
func (c *ProjectsGroupsListCall) DescendantsOfGroup(descendantsOfGroup string) *ProjectsGroupsListCall {
c.urlParams_.Set("descendantsOfGroup", descendantsOfGroup)
return c
}
// PageSize sets the optional parameter "pageSize": A positive number
// that is the maximum number of results to return.
func (c *ProjectsGroupsListCall) PageSize(pageSize int64) *ProjectsGroupsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return additional results from the previous method call.
func (c *ProjectsGroupsListCall) PageToken(pageToken string) *ProjectsGroupsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsGroupsListCall) Fields(s ...googleapi.Field) *ProjectsGroupsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsGroupsListCall) IfNoneMatch(entityTag string) *ProjectsGroupsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsGroupsListCall) Context(ctx context.Context) *ProjectsGroupsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsGroupsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsGroupsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/groups")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.groups.list" call.
// Exactly one of *ListGroupsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListGroupsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsGroupsListCall) Do(opts ...googleapi.CallOption) (*ListGroupsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListGroupsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the existing groups.",
// "flatPath": "v3/projects/{projectsId}/groups",
// "httpMethod": "GET",
// "id": "monitoring.projects.groups.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "ancestorsOfGroup": {
// "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups that are ancestors of the specified group. The groups are returned in order, starting with the immediate parent and ending with the most distant ancestor. If the specified group has no immediate parent, the results are empty.",
// "location": "query",
// "type": "string"
// },
// "childrenOfGroup": {
// "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns groups whose parentName field contains the group name. If no groups have this parent, the results are empty.",
// "location": "query",
// "type": "string"
// },
// "descendantsOfGroup": {
// "description": "A group name: \"projects/{project_id_or_number}/groups/{group_id}\". Returns the descendants of the specified group. This is a superset of the results returned by the childrenOfGroup filter, and includes children-of-children, and so forth.",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The project whose groups are to be listed. The format is \"projects/{project_id_or_number}\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "A positive number that is the maximum number of results to return.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/groups",
// "response": {
// "$ref": "ListGroupsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsGroupsListCall) Pages(ctx context.Context, f func(*ListGroupsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.groups.update":
type ProjectsGroupsUpdateCall struct {
s *APIService
name string
group *Group
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Update: Updates an existing group. You can change any group
// attributes except name.
func (r *ProjectsGroupsService) Update(name string, group *Group) *ProjectsGroupsUpdateCall {
c := &ProjectsGroupsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.group = group
return c
}
// ValidateOnly sets the optional parameter "validateOnly": If true,
// validate this request but do not update the existing group.
func (c *ProjectsGroupsUpdateCall) ValidateOnly(validateOnly bool) *ProjectsGroupsUpdateCall {
c.urlParams_.Set("validateOnly", fmt.Sprint(validateOnly))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsGroupsUpdateCall) Fields(s ...googleapi.Field) *ProjectsGroupsUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsGroupsUpdateCall) Context(ctx context.Context) *ProjectsGroupsUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsGroupsUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsGroupsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.group)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PUT", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.groups.update" call.
// Exactly one of *Group or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Group.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsGroupsUpdateCall) Do(opts ...googleapi.CallOption) (*Group, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Group{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates an existing group. You can change any group attributes except name.",
// "flatPath": "v3/projects/{projectsId}/groups/{groupsId}",
// "httpMethod": "PUT",
// "id": "monitoring.projects.groups.update",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Output only. The name of this group. The format is \"projects/{project_id_or_number}/groups/{group_id}\". When creating a group, this field is ignored and a new name is created consisting of the project specified in the call to CreateGroup and a unique {group_id} that is generated automatically.",
// "location": "path",
// "pattern": "^projects/[^/]+/groups/[^/]+$",
// "required": true,
// "type": "string"
// },
// "validateOnly": {
// "description": "If true, validate this request but do not update the existing group.",
// "location": "query",
// "type": "boolean"
// }
// },
// "path": "v3/{+name}",
// "request": {
// "$ref": "Group"
// },
// "response": {
// "$ref": "Group"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.groups.members.list":
type ProjectsGroupsMembersListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the monitored resources that are members of a group.
func (r *ProjectsGroupsMembersService) List(name string) *ProjectsGroupsMembersListCall {
c := &ProjectsGroupsMembersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": An optional list filter
// describing the members to be returned. The filter may reference the
// type, labels, and metadata of monitored resources that comprise the
// group. For example, to return only resources representing Compute
// Engine VM instances, use this filter:
// resource.type = "gce_instance"
func (c *ProjectsGroupsMembersListCall) Filter(filter string) *ProjectsGroupsMembersListCall {
c.urlParams_.Set("filter", filter)
return c
}
// IntervalEndTime sets the optional parameter "interval.endTime":
// Required. The end of the time interval.
func (c *ProjectsGroupsMembersListCall) IntervalEndTime(intervalEndTime string) *ProjectsGroupsMembersListCall {
c.urlParams_.Set("interval.endTime", intervalEndTime)
return c
}
// IntervalStartTime sets the optional parameter "interval.startTime":
// The beginning of the time interval. The default value for the start
// time is the end time. The start time must not be later than the end
// time.
func (c *ProjectsGroupsMembersListCall) IntervalStartTime(intervalStartTime string) *ProjectsGroupsMembersListCall {
c.urlParams_.Set("interval.startTime", intervalStartTime)
return c
}
// PageSize sets the optional parameter "pageSize": A positive number
// that is the maximum number of results to return.
func (c *ProjectsGroupsMembersListCall) PageSize(pageSize int64) *ProjectsGroupsMembersListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return additional results from the previous method call.
func (c *ProjectsGroupsMembersListCall) PageToken(pageToken string) *ProjectsGroupsMembersListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsGroupsMembersListCall) Fields(s ...googleapi.Field) *ProjectsGroupsMembersListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsGroupsMembersListCall) IfNoneMatch(entityTag string) *ProjectsGroupsMembersListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsGroupsMembersListCall) Context(ctx context.Context) *ProjectsGroupsMembersListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsGroupsMembersListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsGroupsMembersListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/members")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.groups.members.list" call.
// Exactly one of *ListGroupMembersResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *ListGroupMembersResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsGroupsMembersListCall) Do(opts ...googleapi.CallOption) (*ListGroupMembersResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListGroupMembersResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the monitored resources that are members of a group.",
// "flatPath": "v3/projects/{projectsId}/groups/{groupsId}/members",
// "httpMethod": "GET",
// "id": "monitoring.projects.groups.members.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "An optional list filter describing the members to be returned. The filter may reference the type, labels, and metadata of monitored resources that comprise the group. For example, to return only resources representing Compute Engine VM instances, use this filter:\nresource.type = \"gce_instance\"\n",
// "location": "query",
// "type": "string"
// },
// "interval.endTime": {
// "description": "Required. The end of the time interval.",
// "format": "google-datetime",
// "location": "query",
// "type": "string"
// },
// "interval.startTime": {
// "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.",
// "format": "google-datetime",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The group whose members are listed. The format is \"projects/{project_id_or_number}/groups/{group_id}\".",
// "location": "path",
// "pattern": "^projects/[^/]+/groups/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "A positive number that is the maximum number of results to return.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/members",
// "response": {
// "$ref": "ListGroupMembersResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsGroupsMembersListCall) Pages(ctx context.Context, f func(*ListGroupMembersResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.metricDescriptors.create":
type ProjectsMetricDescriptorsCreateCall struct {
s *APIService
name string
metricdescriptor *MetricDescriptor
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new metric descriptor. User-created metric
// descriptors define custom metrics.
func (r *ProjectsMetricDescriptorsService) Create(name string, metricdescriptor *MetricDescriptor) *ProjectsMetricDescriptorsCreateCall {
c := &ProjectsMetricDescriptorsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.metricdescriptor = metricdescriptor
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricDescriptorsCreateCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricDescriptorsCreateCall) Context(ctx context.Context) *ProjectsMetricDescriptorsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricDescriptorsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.metricDescriptors.create" call.
// Exactly one of *MetricDescriptor or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *MetricDescriptor.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsMetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &MetricDescriptor{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new metric descriptor. User-created metric descriptors define custom metrics.",
// "flatPath": "v3/projects/{projectsId}/metricDescriptors",
// "httpMethod": "POST",
// "id": "monitoring.projects.metricDescriptors.create",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}/metricDescriptors",
// "request": {
// "$ref": "MetricDescriptor"
// },
// "response": {
// "$ref": "MetricDescriptor"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.write"
// ]
// }
}
// method id "monitoring.projects.metricDescriptors.delete":
type ProjectsMetricDescriptorsDeleteCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a metric descriptor. Only user-created custom metrics
// can be deleted.
func (r *ProjectsMetricDescriptorsService) Delete(name string) *ProjectsMetricDescriptorsDeleteCall {
c := &ProjectsMetricDescriptorsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricDescriptorsDeleteCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricDescriptorsDeleteCall) Context(ctx context.Context) *ProjectsMetricDescriptorsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricDescriptorsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.metricDescriptors.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsMetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a metric descriptor. Only user-created custom metrics can be deleted.",
// "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}",
// "httpMethod": "DELETE",
// "id": "monitoring.projects.metricDescriptors.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example of {metric_id} is: \"custom.googleapis.com/my_test_metric\".",
// "location": "path",
// "pattern": "^projects/[^/]+/metricDescriptors/.+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.metricDescriptors.get":
type ProjectsMetricDescriptorsGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a single metric descriptor. This method does not require a
// Stackdriver account.
func (r *ProjectsMetricDescriptorsService) Get(name string) *ProjectsMetricDescriptorsGetCall {
c := &ProjectsMetricDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsMetricDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricDescriptorsGetCall) Context(ctx context.Context) *ProjectsMetricDescriptorsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricDescriptorsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricDescriptorsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.metricDescriptors.get" call.
// Exactly one of *MetricDescriptor or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *MetricDescriptor.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsMetricDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MetricDescriptor, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &MetricDescriptor{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a single metric descriptor. This method does not require a Stackdriver account.",
// "flatPath": "v3/projects/{projectsId}/metricDescriptors/{metricDescriptorsId}",
// "httpMethod": "GET",
// "id": "monitoring.projects.metricDescriptors.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The metric descriptor on which to execute the request. The format is \"projects/{project_id_or_number}/metricDescriptors/{metric_id}\". An example value of {metric_id} is \"compute.googleapis.com/instance/disk/read_bytes_count\".",
// "location": "path",
// "pattern": "^projects/[^/]+/metricDescriptors/.+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "MetricDescriptor"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read",
// "https://www.googleapis.com/auth/monitoring.write"
// ]
// }
}
// method id "monitoring.projects.metricDescriptors.list":
type ProjectsMetricDescriptorsListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists metric descriptors that match a filter. This method does
// not require a Stackdriver account.
func (r *ProjectsMetricDescriptorsService) List(name string) *ProjectsMetricDescriptorsListCall {
c := &ProjectsMetricDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": If this field is empty,
// all custom and system-defined metric descriptors are returned.
// Otherwise, the filter specifies which metric descriptors are to be
// returned. For example, the following filter matches all custom
// metrics:
// metric.type = starts_with("custom.googleapis.com/")
func (c *ProjectsMetricDescriptorsListCall) Filter(filter string) *ProjectsMetricDescriptorsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": A positive number
// that is the maximum number of results to return.
func (c *ProjectsMetricDescriptorsListCall) PageSize(pageSize int64) *ProjectsMetricDescriptorsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return additional results from the previous method call.
func (c *ProjectsMetricDescriptorsListCall) PageToken(pageToken string) *ProjectsMetricDescriptorsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMetricDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMetricDescriptorsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsMetricDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMetricDescriptorsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMetricDescriptorsListCall) Context(ctx context.Context) *ProjectsMetricDescriptorsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMetricDescriptorsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMetricDescriptorsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/metricDescriptors")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.metricDescriptors.list" call.
// Exactly one of *ListMetricDescriptorsResponse or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *ListMetricDescriptorsResponse.ServerResponse.Header or (if a
// response was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsMetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMetricDescriptorsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListMetricDescriptorsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists metric descriptors that match a filter. This method does not require a Stackdriver account.",
// "flatPath": "v3/projects/{projectsId}/metricDescriptors",
// "httpMethod": "GET",
// "id": "monitoring.projects.metricDescriptors.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "If this field is empty, all custom and system-defined metric descriptors are returned. Otherwise, the filter specifies which metric descriptors are to be returned. For example, the following filter matches all custom metrics:\nmetric.type = starts_with(\"custom.googleapis.com/\")\n",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "A positive number that is the maximum number of results to return.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/metricDescriptors",
// "response": {
// "$ref": "ListMetricDescriptorsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read",
// "https://www.googleapis.com/auth/monitoring.write"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsMetricDescriptorsListCall) Pages(ctx context.Context, f func(*ListMetricDescriptorsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.monitoredResourceDescriptors.get":
type ProjectsMonitoredResourceDescriptorsGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a single monitored resource descriptor. This method does
// not require a Stackdriver account.
func (r *ProjectsMonitoredResourceDescriptorsService) Get(name string) *ProjectsMonitoredResourceDescriptorsGetCall {
c := &ProjectsMonitoredResourceDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMonitoredResourceDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsMonitoredResourceDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMonitoredResourceDescriptorsGetCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMonitoredResourceDescriptorsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMonitoredResourceDescriptorsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.monitoredResourceDescriptors.get" call.
// Exactly one of *MonitoredResourceDescriptor or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *MonitoredResourceDescriptor.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsMonitoredResourceDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*MonitoredResourceDescriptor, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &MonitoredResourceDescriptor{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a single monitored resource descriptor. This method does not require a Stackdriver account.",
// "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors/{monitoredResourceDescriptorsId}",
// "httpMethod": "GET",
// "id": "monitoring.projects.monitoredResourceDescriptors.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The monitored resource descriptor to get. The format is \"projects/{project_id_or_number}/monitoredResourceDescriptors/{resource_type}\". The {resource_type} is a predefined type, such as cloudsql_database.",
// "location": "path",
// "pattern": "^projects/[^/]+/monitoredResourceDescriptors/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "MonitoredResourceDescriptor"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read",
// "https://www.googleapis.com/auth/monitoring.write"
// ]
// }
}
// method id "monitoring.projects.monitoredResourceDescriptors.list":
type ProjectsMonitoredResourceDescriptorsListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists monitored resource descriptors that match a filter. This
// method does not require a Stackdriver account.
func (r *ProjectsMonitoredResourceDescriptorsService) List(name string) *ProjectsMonitoredResourceDescriptorsListCall {
c := &ProjectsMonitoredResourceDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": An optional filter
// describing the descriptors to be returned. The filter can reference
// the descriptor's type and labels. For example, the following filter
// returns only Google Compute Engine descriptors that have an id
// label:
// resource.type = starts_with("gce_") AND resource.label:id
func (c *ProjectsMonitoredResourceDescriptorsListCall) Filter(filter string) *ProjectsMonitoredResourceDescriptorsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": A positive number
// that is the maximum number of results to return.
func (c *ProjectsMonitoredResourceDescriptorsListCall) PageSize(pageSize int64) *ProjectsMonitoredResourceDescriptorsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return additional results from the previous method call.
func (c *ProjectsMonitoredResourceDescriptorsListCall) PageToken(pageToken string) *ProjectsMonitoredResourceDescriptorsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsMonitoredResourceDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsMonitoredResourceDescriptorsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsMonitoredResourceDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsMonitoredResourceDescriptorsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsMonitoredResourceDescriptorsListCall) Context(ctx context.Context) *ProjectsMonitoredResourceDescriptorsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsMonitoredResourceDescriptorsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsMonitoredResourceDescriptorsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/monitoredResourceDescriptors")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.monitoredResourceDescriptors.list" call.
// Exactly one of *ListMonitoredResourceDescriptorsResponse or error
// will be non-nil. Any non-2xx status code is an error. Response
// headers are in either
// *ListMonitoredResourceDescriptorsResponse.ServerResponse.Header or
// (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsMonitoredResourceDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListMonitoredResourceDescriptorsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account.",
// "flatPath": "v3/projects/{projectsId}/monitoredResourceDescriptors",
// "httpMethod": "GET",
// "id": "monitoring.projects.monitoredResourceDescriptors.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "An optional filter describing the descriptors to be returned. The filter can reference the descriptor's type and labels. For example, the following filter returns only Google Compute Engine descriptors that have an id label:\nresource.type = starts_with(\"gce_\") AND resource.label:id\n",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "A positive number that is the maximum number of results to return.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/monitoredResourceDescriptors",
// "response": {
// "$ref": "ListMonitoredResourceDescriptorsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read",
// "https://www.googleapis.com/auth/monitoring.write"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsMonitoredResourceDescriptorsListCall) Pages(ctx context.Context, f func(*ListMonitoredResourceDescriptorsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.notificationChannelDescriptors.get":
type ProjectsNotificationChannelDescriptorsGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a single channel descriptor. The descriptor indicates which
// fields are expected / permitted for a notification channel of the
// given type.
func (r *ProjectsNotificationChannelDescriptorsService) Get(name string) *ProjectsNotificationChannelDescriptorsGetCall {
c := &ProjectsNotificationChannelDescriptorsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelDescriptorsGetCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelDescriptorsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsNotificationChannelDescriptorsGetCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelDescriptorsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelDescriptorsGetCall) Context(ctx context.Context) *ProjectsNotificationChannelDescriptorsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelDescriptorsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelDescriptorsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannelDescriptors.get" call.
// Exactly one of *NotificationChannelDescriptor or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *NotificationChannelDescriptor.ServerResponse.Header or (if a
// response was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsNotificationChannelDescriptorsGetCall) Do(opts ...googleapi.CallOption) (*NotificationChannelDescriptor, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &NotificationChannelDescriptor{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a single channel descriptor. The descriptor indicates which fields are expected / permitted for a notification channel of the given type.",
// "flatPath": "v3/projects/{projectsId}/notificationChannelDescriptors/{notificationChannelDescriptorsId}",
// "httpMethod": "GET",
// "id": "monitoring.projects.notificationChannelDescriptors.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The channel type for which to execute the request. The format is projects/[PROJECT_ID]/notificationChannelDescriptors/{channel_type}.",
// "location": "path",
// "pattern": "^projects/[^/]+/notificationChannelDescriptors/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "NotificationChannelDescriptor"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// method id "monitoring.projects.notificationChannelDescriptors.list":
type ProjectsNotificationChannelDescriptorsListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the descriptors for supported channel types. The use of
// descriptors makes it possible for new channel types to be dynamically
// added.
func (r *ProjectsNotificationChannelDescriptorsService) List(name string) *ProjectsNotificationChannelDescriptorsListCall {
c := &ProjectsNotificationChannelDescriptorsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return in a single response. If not set to a positive
// number, a reasonable value will be chosen by the service.
func (c *ProjectsNotificationChannelDescriptorsListCall) PageSize(pageSize int64) *ProjectsNotificationChannelDescriptorsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If non-empty,
// page_token must contain a value returned as the next_page_token in a
// previous response to request the next set of results.
func (c *ProjectsNotificationChannelDescriptorsListCall) PageToken(pageToken string) *ProjectsNotificationChannelDescriptorsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelDescriptorsListCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelDescriptorsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsNotificationChannelDescriptorsListCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelDescriptorsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelDescriptorsListCall) Context(ctx context.Context) *ProjectsNotificationChannelDescriptorsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelDescriptorsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelDescriptorsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannelDescriptors")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannelDescriptors.list" call.
// Exactly one of *ListNotificationChannelDescriptorsResponse or error
// will be non-nil. Any non-2xx status code is an error. Response
// headers are in either
// *ListNotificationChannelDescriptorsResponse.ServerResponse.Header or
// (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsNotificationChannelDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListNotificationChannelDescriptorsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListNotificationChannelDescriptorsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the descriptors for supported channel types. The use of descriptors makes it possible for new channel types to be dynamically added.",
// "flatPath": "v3/projects/{projectsId}/notificationChannelDescriptors",
// "httpMethod": "GET",
// "id": "monitoring.projects.notificationChannelDescriptors.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The REST resource name of the parent from which to retrieve the notification channel descriptors. The expected syntax is:\nprojects/[PROJECT_ID]\nNote that this names the parent container in which to look for the descriptors; to retrieve a single descriptor by name, use the GetNotificationChannelDescriptor operation, instead.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "The maximum number of results to return in a single response. If not set to a positive number, a reasonable value will be chosen by the service.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If non-empty, page_token must contain a value returned as the next_page_token in a previous response to request the next set of results.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/notificationChannelDescriptors",
// "response": {
// "$ref": "ListNotificationChannelDescriptorsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsNotificationChannelDescriptorsListCall) Pages(ctx context.Context, f func(*ListNotificationChannelDescriptorsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.notificationChannels.create":
type ProjectsNotificationChannelsCreateCall struct {
s *APIService
name string
notificationchannel *NotificationChannel
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new notification channel, representing a single
// notification endpoint such as an email address, SMS number, or
// PagerDuty service.
func (r *ProjectsNotificationChannelsService) Create(name string, notificationchannel *NotificationChannel) *ProjectsNotificationChannelsCreateCall {
c := &ProjectsNotificationChannelsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.notificationchannel = notificationchannel
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsCreateCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsCreateCall) Context(ctx context.Context) *ProjectsNotificationChannelsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationchannel)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannels")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.create" call.
// Exactly one of *NotificationChannel or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *NotificationChannel.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsNotificationChannelsCreateCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &NotificationChannel{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new notification channel, representing a single notification endpoint such as an email address, SMS number, or PagerDuty service.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels",
// "httpMethod": "POST",
// "id": "monitoring.projects.notificationChannels.create",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The project on which to execute the request. The format is:\nprojects/[PROJECT_ID]\nNote that this names the container into which the channel will be written. This does not name the newly created channel. The resulting channel's name will have a normalized version of this field as a prefix, but will add /notificationChannels/[CHANNEL_ID] to identify the channel.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}/notificationChannels",
// "request": {
// "$ref": "NotificationChannel"
// },
// "response": {
// "$ref": "NotificationChannel"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.notificationChannels.delete":
type ProjectsNotificationChannelsDeleteCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a notification channel.
func (r *ProjectsNotificationChannelsService) Delete(name string) *ProjectsNotificationChannelsDeleteCall {
c := &ProjectsNotificationChannelsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Force sets the optional parameter "force": If true, the notification
// channel will be deleted regardless of its use in alert policies (the
// policies will be updated to remove the channel). If false, channels
// that are still referenced by an existing alerting policy will fail to
// be deleted in a delete operation.
func (c *ProjectsNotificationChannelsDeleteCall) Force(force bool) *ProjectsNotificationChannelsDeleteCall {
c.urlParams_.Set("force", fmt.Sprint(force))
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsDeleteCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsDeleteCall) Context(ctx context.Context) *ProjectsNotificationChannelsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsNotificationChannelsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a notification channel.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}",
// "httpMethod": "DELETE",
// "id": "monitoring.projects.notificationChannels.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "force": {
// "description": "If true, the notification channel will be deleted regardless of its use in alert policies (the policies will be updated to remove the channel). If false, channels that are still referenced by an existing alerting policy will fail to be deleted in a delete operation.",
// "location": "query",
// "type": "boolean"
// },
// "name": {
// "description": "The channel for which to execute the request. The format is projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID].",
// "location": "path",
// "pattern": "^projects/[^/]+/notificationChannels/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.notificationChannels.get":
type ProjectsNotificationChannelsGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a single notification channel. The channel includes the
// relevant configuration details with which the channel was created.
// However, the response may truncate or omit passwords, API keys, or
// other private key matter and thus the response may not be 100%
// identical to the information that was supplied in the call to the
// create method.
func (r *ProjectsNotificationChannelsService) Get(name string) *ProjectsNotificationChannelsGetCall {
c := &ProjectsNotificationChannelsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsGetCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsNotificationChannelsGetCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsGetCall) Context(ctx context.Context) *ProjectsNotificationChannelsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.get" call.
// Exactly one of *NotificationChannel or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *NotificationChannel.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsNotificationChannelsGetCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &NotificationChannel{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a single notification channel. The channel includes the relevant configuration details with which the channel was created. However, the response may truncate or omit passwords, API keys, or other private key matter and thus the response may not be 100% identical to the information that was supplied in the call to the create method.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}",
// "httpMethod": "GET",
// "id": "monitoring.projects.notificationChannels.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The channel for which to execute the request. The format is projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID].",
// "location": "path",
// "pattern": "^projects/[^/]+/notificationChannels/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "NotificationChannel"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// method id "monitoring.projects.notificationChannels.getVerificationCode":
type ProjectsNotificationChannelsGetVerificationCodeCall struct {
s *APIService
name string
getnotificationchannelverificationcoderequest *GetNotificationChannelVerificationCodeRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// GetVerificationCode: Requests a verification code for an already
// verified channel that can then be used in a call to
// VerifyNotificationChannel() on a different channel with an equivalent
// identity in the same or in a different project. This makes it
// possible to copy a channel between projects without requiring manual
// reverification of the channel. If the channel is not in the verified
// state, this method will fail (in other words, this may only be used
// if the SendNotificationChannelVerificationCode and
// VerifyNotificationChannel paths have already been used to put the
// given channel into the verified state).There is no guarantee that the
// verification codes returned by this method will be of a similar
// structure or form as the ones that are delivered to the channel via
// SendNotificationChannelVerificationCode; while
// VerifyNotificationChannel() will recognize both the codes delivered
// via SendNotificationChannelVerificationCode() and returned from
// GetNotificationChannelVerificationCode(), it is typically the case
// that the verification codes delivered via
// SendNotificationChannelVerificationCode() will be shorter and also
// have a shorter expiration (e.g. codes such as "G-123456") whereas
// GetVerificationCode() will typically return a much longer, websafe
// base 64 encoded string that has a longer expiration time.
func (r *ProjectsNotificationChannelsService) GetVerificationCode(name string, getnotificationchannelverificationcoderequest *GetNotificationChannelVerificationCodeRequest) *ProjectsNotificationChannelsGetVerificationCodeCall {
c := &ProjectsNotificationChannelsGetVerificationCodeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.getnotificationchannelverificationcoderequest = getnotificationchannelverificationcoderequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsGetVerificationCodeCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Context(ctx context.Context) *ProjectsNotificationChannelsGetVerificationCodeCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsGetVerificationCodeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.getnotificationchannelverificationcoderequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:getVerificationCode")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.getVerificationCode" call.
// Exactly one of *GetNotificationChannelVerificationCodeResponse or
// error will be non-nil. Any non-2xx status code is an error. Response
// headers are in either
// *GetNotificationChannelVerificationCodeResponse.ServerResponse.Header
// or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsNotificationChannelsGetVerificationCodeCall) Do(opts ...googleapi.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &GetNotificationChannelVerificationCodeResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Requests a verification code for an already verified channel that can then be used in a call to VerifyNotificationChannel() on a different channel with an equivalent identity in the same or in a different project. This makes it possible to copy a channel between projects without requiring manual reverification of the channel. If the channel is not in the verified state, this method will fail (in other words, this may only be used if the SendNotificationChannelVerificationCode and VerifyNotificationChannel paths have already been used to put the given channel into the verified state).There is no guarantee that the verification codes returned by this method will be of a similar structure or form as the ones that are delivered to the channel via SendNotificationChannelVerificationCode; while VerifyNotificationChannel() will recognize both the codes delivered via SendNotificationChannelVerificationCode() and returned from GetNotificationChannelVerificationCode(), it is typically the case that the verification codes delivered via SendNotificationChannelVerificationCode() will be shorter and also have a shorter expiration (e.g. codes such as \"G-123456\") whereas GetVerificationCode() will typically return a much longer, websafe base 64 encoded string that has a longer expiration time.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:getVerificationCode",
// "httpMethod": "POST",
// "id": "monitoring.projects.notificationChannels.getVerificationCode",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The notification channel for which a verification code is to be generated and retrieved. This must name a channel that is already verified; if the specified channel is not verified, the request will fail.",
// "location": "path",
// "pattern": "^projects/[^/]+/notificationChannels/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}:getVerificationCode",
// "request": {
// "$ref": "GetNotificationChannelVerificationCodeRequest"
// },
// "response": {
// "$ref": "GetNotificationChannelVerificationCodeResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.notificationChannels.list":
type ProjectsNotificationChannelsListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the notification channels that have been created for the
// project.
func (r *ProjectsNotificationChannelsService) List(name string) *ProjectsNotificationChannelsListCall {
c := &ProjectsNotificationChannelsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": If provided, this field
// specifies the criteria that must be met by notification channels to
// be included in the response.For more details, see sorting and
// filtering.
func (c *ProjectsNotificationChannelsListCall) Filter(filter string) *ProjectsNotificationChannelsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// OrderBy sets the optional parameter "orderBy": A comma-separated list
// of fields by which to sort the result. Supports the same set of
// fields as in filter. Entries can be prefixed with a minus sign to
// sort in descending rather than ascending order.For more details, see
// sorting and filtering.
func (c *ProjectsNotificationChannelsListCall) OrderBy(orderBy string) *ProjectsNotificationChannelsListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return in a single response. If not set to a positive
// number, a reasonable value will be chosen by the service.
func (c *ProjectsNotificationChannelsListCall) PageSize(pageSize int64) *ProjectsNotificationChannelsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If non-empty,
// page_token must contain a value returned as the next_page_token in a
// previous response to request the next set of results.
func (c *ProjectsNotificationChannelsListCall) PageToken(pageToken string) *ProjectsNotificationChannelsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsListCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsNotificationChannelsListCall) IfNoneMatch(entityTag string) *ProjectsNotificationChannelsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsListCall) Context(ctx context.Context) *ProjectsNotificationChannelsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/notificationChannels")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.list" call.
// Exactly one of *ListNotificationChannelsResponse or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *ListNotificationChannelsResponse.ServerResponse.Header or (if
// a response was returned at all) in error.(*googleapi.Error).Header.
// Use googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsNotificationChannelsListCall) Do(opts ...googleapi.CallOption) (*ListNotificationChannelsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListNotificationChannelsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the notification channels that have been created for the project.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels",
// "httpMethod": "GET",
// "id": "monitoring.projects.notificationChannels.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "If provided, this field specifies the criteria that must be met by notification channels to be included in the response.For more details, see sorting and filtering.",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The project on which to execute the request. The format is projects/[PROJECT_ID]. That is, this names the container in which to look for the notification channels; it does not name a specific channel. To query a specific channel by REST resource name, use the GetNotificationChannel operation.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "orderBy": {
// "description": "A comma-separated list of fields by which to sort the result. Supports the same set of fields as in filter. Entries can be prefixed with a minus sign to sort in descending rather than ascending order.For more details, see sorting and filtering.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "The maximum number of results to return in a single response. If not set to a positive number, a reasonable value will be chosen by the service.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If non-empty, page_token must contain a value returned as the next_page_token in a previous response to request the next set of results.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/notificationChannels",
// "response": {
// "$ref": "ListNotificationChannelsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsNotificationChannelsListCall) Pages(ctx context.Context, f func(*ListNotificationChannelsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.notificationChannels.patch":
type ProjectsNotificationChannelsPatchCall struct {
s *APIService
name string
notificationchannel *NotificationChannel
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates a notification channel. Fields not specified in the
// field mask remain unchanged.
func (r *ProjectsNotificationChannelsService) Patch(name string, notificationchannel *NotificationChannel) *ProjectsNotificationChannelsPatchCall {
c := &ProjectsNotificationChannelsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.notificationchannel = notificationchannel
return c
}
// UpdateMask sets the optional parameter "updateMask": The fields to
// update.
func (c *ProjectsNotificationChannelsPatchCall) UpdateMask(updateMask string) *ProjectsNotificationChannelsPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsPatchCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsPatchCall) Context(ctx context.Context) *ProjectsNotificationChannelsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.notificationchannel)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.patch" call.
// Exactly one of *NotificationChannel or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *NotificationChannel.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsNotificationChannelsPatchCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &NotificationChannel{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a notification channel. Fields not specified in the field mask remain unchanged.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}",
// "httpMethod": "PATCH",
// "id": "monitoring.projects.notificationChannels.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The full REST resource name for this channel. The syntax is:\nprojects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]\nThe [CHANNEL_ID] is automatically assigned by the server on creation.",
// "location": "path",
// "pattern": "^projects/[^/]+/notificationChannels/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "The fields to update.",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "request": {
// "$ref": "NotificationChannel"
// },
// "response": {
// "$ref": "NotificationChannel"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.notificationChannels.sendVerificationCode":
type ProjectsNotificationChannelsSendVerificationCodeCall struct {
s *APIService
name string
sendnotificationchannelverificationcoderequest *SendNotificationChannelVerificationCodeRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SendVerificationCode: Causes a verification code to be delivered to
// the channel. The code can then be supplied in
// VerifyNotificationChannel to verify the channel.
func (r *ProjectsNotificationChannelsService) SendVerificationCode(name string, sendnotificationchannelverificationcoderequest *SendNotificationChannelVerificationCodeRequest) *ProjectsNotificationChannelsSendVerificationCodeCall {
c := &ProjectsNotificationChannelsSendVerificationCodeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.sendnotificationchannelverificationcoderequest = sendnotificationchannelverificationcoderequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsSendVerificationCodeCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Context(ctx context.Context) *ProjectsNotificationChannelsSendVerificationCodeCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsSendVerificationCodeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.sendnotificationchannelverificationcoderequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:sendVerificationCode")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.sendVerificationCode" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsNotificationChannelsSendVerificationCodeCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Causes a verification code to be delivered to the channel. The code can then be supplied in VerifyNotificationChannel to verify the channel.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:sendVerificationCode",
// "httpMethod": "POST",
// "id": "monitoring.projects.notificationChannels.sendVerificationCode",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The notification channel to which to send a verification code.",
// "location": "path",
// "pattern": "^projects/[^/]+/notificationChannels/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}:sendVerificationCode",
// "request": {
// "$ref": "SendNotificationChannelVerificationCodeRequest"
// },
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.notificationChannels.verify":
type ProjectsNotificationChannelsVerifyCall struct {
s *APIService
name string
verifynotificationchannelrequest *VerifyNotificationChannelRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Verify: Verifies a NotificationChannel by proving receipt of the code
// delivered to the channel as a result of calling
// SendNotificationChannelVerificationCode.
func (r *ProjectsNotificationChannelsService) Verify(name string, verifynotificationchannelrequest *VerifyNotificationChannelRequest) *ProjectsNotificationChannelsVerifyCall {
c := &ProjectsNotificationChannelsVerifyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.verifynotificationchannelrequest = verifynotificationchannelrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsNotificationChannelsVerifyCall) Fields(s ...googleapi.Field) *ProjectsNotificationChannelsVerifyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsNotificationChannelsVerifyCall) Context(ctx context.Context) *ProjectsNotificationChannelsVerifyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsNotificationChannelsVerifyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsNotificationChannelsVerifyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.verifynotificationchannelrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}:verify")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.notificationChannels.verify" call.
// Exactly one of *NotificationChannel or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *NotificationChannel.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsNotificationChannelsVerifyCall) Do(opts ...googleapi.CallOption) (*NotificationChannel, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &NotificationChannel{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Verifies a NotificationChannel by proving receipt of the code delivered to the channel as a result of calling SendNotificationChannelVerificationCode.",
// "flatPath": "v3/projects/{projectsId}/notificationChannels/{notificationChannelsId}:verify",
// "httpMethod": "POST",
// "id": "monitoring.projects.notificationChannels.verify",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The notification channel to verify.",
// "location": "path",
// "pattern": "^projects/[^/]+/notificationChannels/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}:verify",
// "request": {
// "$ref": "VerifyNotificationChannelRequest"
// },
// "response": {
// "$ref": "NotificationChannel"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.timeSeries.create":
type ProjectsTimeSeriesCreateCall struct {
s *APIService
name string
createtimeseriesrequest *CreateTimeSeriesRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates or adds data to one or more time series. The response
// is empty if all time series in the request were written. If any time
// series could not be written, a corresponding failure message is
// included in the error response.
func (r *ProjectsTimeSeriesService) Create(name string, createtimeseriesrequest *CreateTimeSeriesRequest) *ProjectsTimeSeriesCreateCall {
c := &ProjectsTimeSeriesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.createtimeseriesrequest = createtimeseriesrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsTimeSeriesCreateCall) Fields(s ...googleapi.Field) *ProjectsTimeSeriesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsTimeSeriesCreateCall) Context(ctx context.Context) *ProjectsTimeSeriesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsTimeSeriesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsTimeSeriesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtimeseriesrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.timeSeries.create" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsTimeSeriesCreateCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates or adds data to one or more time series. The response is empty if all time series in the request were written. If any time series could not be written, a corresponding failure message is included in the error response.",
// "flatPath": "v3/projects/{projectsId}/timeSeries",
// "httpMethod": "POST",
// "id": "monitoring.projects.timeSeries.create",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}/timeSeries",
// "request": {
// "$ref": "CreateTimeSeriesRequest"
// },
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.write"
// ]
// }
}
// method id "monitoring.projects.timeSeries.list":
type ProjectsTimeSeriesListCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists time series that match a filter. This method does not
// require a Stackdriver account.
func (r *ProjectsTimeSeriesService) List(name string) *ProjectsTimeSeriesListCall {
c := &ProjectsTimeSeriesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// AggregationAlignmentPeriod sets the optional parameter
// "aggregation.alignmentPeriod": The alignment period for per-time
// series alignment. If present, alignmentPeriod must be at least 60
// seconds. After per-time series alignment, each time series will
// contain data points only on the period boundaries. If
// perSeriesAligner is not specified or equals ALIGN_NONE, then this
// field is ignored. If perSeriesAligner is specified and does not equal
// ALIGN_NONE, then this field must be defined; otherwise an error is
// returned.
func (c *ProjectsTimeSeriesListCall) AggregationAlignmentPeriod(aggregationAlignmentPeriod string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("aggregation.alignmentPeriod", aggregationAlignmentPeriod)
return c
}
// AggregationCrossSeriesReducer sets the optional parameter
// "aggregation.crossSeriesReducer": The approach to be used to combine
// time series. Not all reducer functions may be applied to all time
// series, depending on the metric type and the value type of the
// original time series. Reduction may change the metric type of value
// type of the time series.Time series data must be aligned in order to
// perform cross-time series reduction. If crossSeriesReducer is
// specified, then perSeriesAligner must be specified and not equal
// ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error
// is returned.
//
// Possible values:
// "REDUCE_NONE"
// "REDUCE_MEAN"
// "REDUCE_MIN"
// "REDUCE_MAX"
// "REDUCE_SUM"
// "REDUCE_STDDEV"
// "REDUCE_COUNT"
// "REDUCE_COUNT_TRUE"
// "REDUCE_COUNT_FALSE"
// "REDUCE_FRACTION_TRUE"
// "REDUCE_PERCENTILE_99"
// "REDUCE_PERCENTILE_95"
// "REDUCE_PERCENTILE_50"
// "REDUCE_PERCENTILE_05"
func (c *ProjectsTimeSeriesListCall) AggregationCrossSeriesReducer(aggregationCrossSeriesReducer string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("aggregation.crossSeriesReducer", aggregationCrossSeriesReducer)
return c
}
// AggregationGroupByFields sets the optional parameter
// "aggregation.groupByFields": The set of fields to preserve when
// crossSeriesReducer is specified. The groupByFields determine how the
// time series are partitioned into subsets prior to applying the
// aggregation function. Each subset contains time series that have the
// same value for each of the grouping fields. Each individual time
// series is a member of exactly one subset. The crossSeriesReducer is
// applied to each subset of time series. It is not possible to reduce
// across different resource types, so this field implicitly contains
// resource.type. Fields not specified in groupByFields are aggregated
// away. If groupByFields is not specified and all the time series have
// the same resource type, then the time series are aggregated into a
// single output time series. If crossSeriesReducer is not defined, this
// field is ignored.
func (c *ProjectsTimeSeriesListCall) AggregationGroupByFields(aggregationGroupByFields ...string) *ProjectsTimeSeriesListCall {
c.urlParams_.SetMulti("aggregation.groupByFields", append([]string{}, aggregationGroupByFields...))
return c
}
// AggregationPerSeriesAligner sets the optional parameter
// "aggregation.perSeriesAligner": The approach to be used to align
// individual time series. Not all alignment functions may be applied to
// all time series, depending on the metric type and value type of the
// original time series. Alignment may change the metric type or the
// value type of the time series.Time series data must be aligned in
// order to perform cross-time series reduction. If crossSeriesReducer
// is specified, then perSeriesAligner must be specified and not equal
// ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error
// is returned.
//
// Possible values:
// "ALIGN_NONE"
// "ALIGN_DELTA"
// "ALIGN_RATE"
// "ALIGN_INTERPOLATE"
// "ALIGN_NEXT_OLDER"
// "ALIGN_MIN"
// "ALIGN_MAX"
// "ALIGN_MEAN"
// "ALIGN_COUNT"
// "ALIGN_SUM"
// "ALIGN_STDDEV"
// "ALIGN_COUNT_TRUE"
// "ALIGN_COUNT_FALSE"
// "ALIGN_FRACTION_TRUE"
// "ALIGN_PERCENTILE_99"
// "ALIGN_PERCENTILE_95"
// "ALIGN_PERCENTILE_50"
// "ALIGN_PERCENTILE_05"
// "ALIGN_PERCENT_CHANGE"
func (c *ProjectsTimeSeriesListCall) AggregationPerSeriesAligner(aggregationPerSeriesAligner string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("aggregation.perSeriesAligner", aggregationPerSeriesAligner)
return c
}
// Filter sets the optional parameter "filter": A monitoring filter that
// specifies which time series should be returned. The filter must
// specify a single metric type, and can additionally specify metric
// labels and other information. For example:
// metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND
// metric.labels.instance_name = "my-instance-name"
func (c *ProjectsTimeSeriesListCall) Filter(filter string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("filter", filter)
return c
}
// IntervalEndTime sets the optional parameter "interval.endTime":
// Required. The end of the time interval.
func (c *ProjectsTimeSeriesListCall) IntervalEndTime(intervalEndTime string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("interval.endTime", intervalEndTime)
return c
}
// IntervalStartTime sets the optional parameter "interval.startTime":
// The beginning of the time interval. The default value for the start
// time is the end time. The start time must not be later than the end
// time.
func (c *ProjectsTimeSeriesListCall) IntervalStartTime(intervalStartTime string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("interval.startTime", intervalStartTime)
return c
}
// OrderBy sets the optional parameter "orderBy": Unsupported: must be
// left blank. The points in each time series are currently returned in
// reverse time order (most recent to oldest).
func (c *ProjectsTimeSeriesListCall) OrderBy(orderBy string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("orderBy", orderBy)
return c
}
// PageSize sets the optional parameter "pageSize": A positive number
// that is the maximum number of results to return. If page_size is
// empty or more than 100,000 results, the effective page_size is
// 100,000 results. If view is set to FULL, this is the maximum number
// of Points returned. If view is set to HEADERS, this is the maximum
// number of TimeSeries returned.
func (c *ProjectsTimeSeriesListCall) PageSize(pageSize int64) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return additional results from the previous method call.
func (c *ProjectsTimeSeriesListCall) PageToken(pageToken string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// View sets the optional parameter "view": Specifies which information
// is returned about the time series.
//
// Possible values:
// "FULL"
// "HEADERS"
func (c *ProjectsTimeSeriesListCall) View(view string) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("view", view)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsTimeSeriesListCall) Fields(s ...googleapi.Field) *ProjectsTimeSeriesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsTimeSeriesListCall) IfNoneMatch(entityTag string) *ProjectsTimeSeriesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsTimeSeriesListCall) Context(ctx context.Context) *ProjectsTimeSeriesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsTimeSeriesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsTimeSeriesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}/timeSeries")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.timeSeries.list" call.
// Exactly one of *ListTimeSeriesResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListTimeSeriesResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsTimeSeriesListCall) Do(opts ...googleapi.CallOption) (*ListTimeSeriesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListTimeSeriesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists time series that match a filter. This method does not require a Stackdriver account.",
// "flatPath": "v3/projects/{projectsId}/timeSeries",
// "httpMethod": "GET",
// "id": "monitoring.projects.timeSeries.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "aggregation.alignmentPeriod": {
// "description": "The alignment period for per-time series alignment. If present, alignmentPeriod must be at least 60 seconds. After per-time series alignment, each time series will contain data points only on the period boundaries. If perSeriesAligner is not specified or equals ALIGN_NONE, then this field is ignored. If perSeriesAligner is specified and does not equal ALIGN_NONE, then this field must be defined; otherwise an error is returned.",
// "format": "google-duration",
// "location": "query",
// "type": "string"
// },
// "aggregation.crossSeriesReducer": {
// "description": "The approach to be used to combine time series. Not all reducer functions may be applied to all time series, depending on the metric type and the value type of the original time series. Reduction may change the metric type of value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.",
// "enum": [
// "REDUCE_NONE",
// "REDUCE_MEAN",
// "REDUCE_MIN",
// "REDUCE_MAX",
// "REDUCE_SUM",
// "REDUCE_STDDEV",
// "REDUCE_COUNT",
// "REDUCE_COUNT_TRUE",
// "REDUCE_COUNT_FALSE",
// "REDUCE_FRACTION_TRUE",
// "REDUCE_PERCENTILE_99",
// "REDUCE_PERCENTILE_95",
// "REDUCE_PERCENTILE_50",
// "REDUCE_PERCENTILE_05"
// ],
// "location": "query",
// "type": "string"
// },
// "aggregation.groupByFields": {
// "description": "The set of fields to preserve when crossSeriesReducer is specified. The groupByFields determine how the time series are partitioned into subsets prior to applying the aggregation function. Each subset contains time series that have the same value for each of the grouping fields. Each individual time series is a member of exactly one subset. The crossSeriesReducer is applied to each subset of time series. It is not possible to reduce across different resource types, so this field implicitly contains resource.type. Fields not specified in groupByFields are aggregated away. If groupByFields is not specified and all the time series have the same resource type, then the time series are aggregated into a single output time series. If crossSeriesReducer is not defined, this field is ignored.",
// "location": "query",
// "repeated": true,
// "type": "string"
// },
// "aggregation.perSeriesAligner": {
// "description": "The approach to be used to align individual time series. Not all alignment functions may be applied to all time series, depending on the metric type and value type of the original time series. Alignment may change the metric type or the value type of the time series.Time series data must be aligned in order to perform cross-time series reduction. If crossSeriesReducer is specified, then perSeriesAligner must be specified and not equal ALIGN_NONE and alignmentPeriod must be specified; otherwise, an error is returned.",
// "enum": [
// "ALIGN_NONE",
// "ALIGN_DELTA",
// "ALIGN_RATE",
// "ALIGN_INTERPOLATE",
// "ALIGN_NEXT_OLDER",
// "ALIGN_MIN",
// "ALIGN_MAX",
// "ALIGN_MEAN",
// "ALIGN_COUNT",
// "ALIGN_SUM",
// "ALIGN_STDDEV",
// "ALIGN_COUNT_TRUE",
// "ALIGN_COUNT_FALSE",
// "ALIGN_FRACTION_TRUE",
// "ALIGN_PERCENTILE_99",
// "ALIGN_PERCENTILE_95",
// "ALIGN_PERCENTILE_50",
// "ALIGN_PERCENTILE_05",
// "ALIGN_PERCENT_CHANGE"
// ],
// "location": "query",
// "type": "string"
// },
// "filter": {
// "description": "A monitoring filter that specifies which time series should be returned. The filter must specify a single metric type, and can additionally specify metric labels and other information. For example:\nmetric.type = \"compute.googleapis.com/instance/cpu/usage_time\" AND\n metric.labels.instance_name = \"my-instance-name\"\n",
// "location": "query",
// "type": "string"
// },
// "interval.endTime": {
// "description": "Required. The end of the time interval.",
// "format": "google-datetime",
// "location": "query",
// "type": "string"
// },
// "interval.startTime": {
// "description": "Optional. The beginning of the time interval. The default value for the start time is the end time. The start time must not be later than the end time.",
// "format": "google-datetime",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The project on which to execute the request. The format is \"projects/{project_id_or_number}\".",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "orderBy": {
// "description": "Unsupported: must be left blank. The points in each time series are currently returned in reverse time order (most recent to oldest).",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "A positive number that is the maximum number of results to return. If page_size is empty or more than 100,000 results, the effective page_size is 100,000 results. If view is set to FULL, this is the maximum number of Points returned. If view is set to HEADERS, this is the maximum number of TimeSeries returned.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.",
// "location": "query",
// "type": "string"
// },
// "view": {
// "description": "Specifies which information is returned about the time series.",
// "enum": [
// "FULL",
// "HEADERS"
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}/timeSeries",
// "response": {
// "$ref": "ListTimeSeriesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsTimeSeriesListCall) Pages(ctx context.Context, f func(*ListTimeSeriesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.uptimeCheckConfigs.create":
type ProjectsUptimeCheckConfigsCreateCall struct {
s *APIService
parent string
uptimecheckconfig *UptimeCheckConfig
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a new Uptime check configuration.
func (r *ProjectsUptimeCheckConfigsService) Create(parent string, uptimecheckconfig *UptimeCheckConfig) *ProjectsUptimeCheckConfigsCreateCall {
c := &ProjectsUptimeCheckConfigsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.uptimecheckconfig = uptimecheckconfig
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsUptimeCheckConfigsCreateCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsUptimeCheckConfigsCreateCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsUptimeCheckConfigsCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsUptimeCheckConfigsCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.uptimecheckconfig)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/uptimeCheckConfigs")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.uptimeCheckConfigs.create" call.
// Exactly one of *UptimeCheckConfig or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *UptimeCheckConfig.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsUptimeCheckConfigsCreateCall) Do(opts ...googleapi.CallOption) (*UptimeCheckConfig, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &UptimeCheckConfig{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a new Uptime check configuration.",
// "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs",
// "httpMethod": "POST",
// "id": "monitoring.projects.uptimeCheckConfigs.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "The project in which to create the Uptime check. The format is projects/[PROJECT_ID].",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+parent}/uptimeCheckConfigs",
// "request": {
// "$ref": "UptimeCheckConfig"
// },
// "response": {
// "$ref": "UptimeCheckConfig"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.uptimeCheckConfigs.delete":
type ProjectsUptimeCheckConfigsDeleteCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes an Uptime check configuration. Note that this method
// will fail if the Uptime check configuration is referenced by an alert
// policy or other dependent configs that would be rendered invalid by
// the deletion.
func (r *ProjectsUptimeCheckConfigsService) Delete(name string) *ProjectsUptimeCheckConfigsDeleteCall {
c := &ProjectsUptimeCheckConfigsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsUptimeCheckConfigsDeleteCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsUptimeCheckConfigsDeleteCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsUptimeCheckConfigsDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsUptimeCheckConfigsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.uptimeCheckConfigs.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsUptimeCheckConfigsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes an Uptime check configuration. Note that this method will fail if the Uptime check configuration is referenced by an alert policy or other dependent configs that would be rendered invalid by the deletion.",
// "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}",
// "httpMethod": "DELETE",
// "id": "monitoring.projects.uptimeCheckConfigs.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The Uptime check configuration to delete. The format is projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].",
// "location": "path",
// "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.projects.uptimeCheckConfigs.get":
type ProjectsUptimeCheckConfigsGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a single Uptime check configuration.
func (r *ProjectsUptimeCheckConfigsService) Get(name string) *ProjectsUptimeCheckConfigsGetCall {
c := &ProjectsUptimeCheckConfigsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsUptimeCheckConfigsGetCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsUptimeCheckConfigsGetCall) IfNoneMatch(entityTag string) *ProjectsUptimeCheckConfigsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsUptimeCheckConfigsGetCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsUptimeCheckConfigsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsUptimeCheckConfigsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.uptimeCheckConfigs.get" call.
// Exactly one of *UptimeCheckConfig or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *UptimeCheckConfig.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsUptimeCheckConfigsGetCall) Do(opts ...googleapi.CallOption) (*UptimeCheckConfig, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &UptimeCheckConfig{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a single Uptime check configuration.",
// "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}",
// "httpMethod": "GET",
// "id": "monitoring.projects.uptimeCheckConfigs.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "The Uptime check configuration to retrieve. The format is projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].",
// "location": "path",
// "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "UptimeCheckConfig"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// method id "monitoring.projects.uptimeCheckConfigs.list":
type ProjectsUptimeCheckConfigsListCall struct {
s *APIService
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the existing valid Uptime check configurations for the
// project (leaving out any invalid configurations).
func (r *ProjectsUptimeCheckConfigsService) List(parent string) *ProjectsUptimeCheckConfigsListCall {
c := &ProjectsUptimeCheckConfigsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return in a single response. The server may further
// constrain the maximum number of results returned in a single page. If
// the page_size is <=0, the server will decide the number of results to
// be returned.
func (c *ProjectsUptimeCheckConfigsListCall) PageSize(pageSize int64) *ProjectsUptimeCheckConfigsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return more results from the previous method call.
func (c *ProjectsUptimeCheckConfigsListCall) PageToken(pageToken string) *ProjectsUptimeCheckConfigsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsUptimeCheckConfigsListCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsUptimeCheckConfigsListCall) IfNoneMatch(entityTag string) *ProjectsUptimeCheckConfigsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsUptimeCheckConfigsListCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsUptimeCheckConfigsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsUptimeCheckConfigsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/uptimeCheckConfigs")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.uptimeCheckConfigs.list" call.
// Exactly one of *ListUptimeCheckConfigsResponse or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *ListUptimeCheckConfigsResponse.ServerResponse.Header or (if a
// response was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsUptimeCheckConfigsListCall) Do(opts ...googleapi.CallOption) (*ListUptimeCheckConfigsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListUptimeCheckConfigsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the existing valid Uptime check configurations for the project (leaving out any invalid configurations).",
// "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs",
// "httpMethod": "GET",
// "id": "monitoring.projects.uptimeCheckConfigs.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "The project whose Uptime check configurations are listed. The format is projects/[PROJECT_ID].",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+parent}/uptimeCheckConfigs",
// "response": {
// "$ref": "ListUptimeCheckConfigsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsUptimeCheckConfigsListCall) Pages(ctx context.Context, f func(*ListUptimeCheckConfigsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.projects.uptimeCheckConfigs.patch":
type ProjectsUptimeCheckConfigsPatchCall struct {
s *APIService
name string
uptimecheckconfig *UptimeCheckConfig
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates an Uptime check configuration. You can either replace
// the entire configuration with a new one or replace only certain
// fields in the current configuration by specifying the fields to be
// updated via updateMask. Returns the updated configuration.
func (r *ProjectsUptimeCheckConfigsService) Patch(name string, uptimecheckconfig *UptimeCheckConfig) *ProjectsUptimeCheckConfigsPatchCall {
c := &ProjectsUptimeCheckConfigsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.uptimecheckconfig = uptimecheckconfig
return c
}
// UpdateMask sets the optional parameter "updateMask": If present, only
// the listed fields in the current Uptime check configuration are
// updated with values from the new configuration. If this field is
// empty, then the current configuration is completely replaced with the
// new configuration.
func (c *ProjectsUptimeCheckConfigsPatchCall) UpdateMask(updateMask string) *ProjectsUptimeCheckConfigsPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsUptimeCheckConfigsPatchCall) Fields(s ...googleapi.Field) *ProjectsUptimeCheckConfigsPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsUptimeCheckConfigsPatchCall) Context(ctx context.Context) *ProjectsUptimeCheckConfigsPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsUptimeCheckConfigsPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsUptimeCheckConfigsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.uptimecheckconfig)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.projects.uptimeCheckConfigs.patch" call.
// Exactly one of *UptimeCheckConfig or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *UptimeCheckConfig.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsUptimeCheckConfigsPatchCall) Do(opts ...googleapi.CallOption) (*UptimeCheckConfig, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &UptimeCheckConfig{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates an Uptime check configuration. You can either replace the entire configuration with a new one or replace only certain fields in the current configuration by specifying the fields to be updated via updateMask. Returns the updated configuration.",
// "flatPath": "v3/projects/{projectsId}/uptimeCheckConfigs/{uptimeCheckConfigsId}",
// "httpMethod": "PATCH",
// "id": "monitoring.projects.uptimeCheckConfigs.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "A unique resource name for this Uptime check configuration. The format is:projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].This field should be omitted when creating the Uptime check configuration; on create, the resource name is assigned by the server and included in the response.",
// "location": "path",
// "pattern": "^projects/[^/]+/uptimeCheckConfigs/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "Optional. If present, only the listed fields in the current Uptime check configuration are updated with values from the new configuration. If this field is empty, then the current configuration is completely replaced with the new configuration.",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "request": {
// "$ref": "UptimeCheckConfig"
// },
// "response": {
// "$ref": "UptimeCheckConfig"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.services.create":
type ServicesCreateCall struct {
s *APIService
parent string
service *Service
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Create a Service.
func (r *ServicesService) Create(parent string, service *Service) *ServicesCreateCall {
c := &ServicesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.service = service
return c
}
// ServiceId sets the optional parameter "serviceId": The Service id to
// use for this Service. If omitted, an id will be generated instead.
// Must match the pattern a-z0-9-+
func (c *ServicesCreateCall) ServiceId(serviceId string) *ServicesCreateCall {
c.urlParams_.Set("serviceId", serviceId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesCreateCall) Fields(s ...googleapi.Field) *ServicesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesCreateCall) Context(ctx context.Context) *ServicesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.service)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/services")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.create" call.
// Exactly one of *Service or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Service.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesCreateCall) Do(opts ...googleapi.CallOption) (*Service, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Service{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Create a Service.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services",
// "httpMethod": "POST",
// "id": "monitoring.services.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Resource name of the parent workspace. Of the form projects/{project_id}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+$",
// "required": true,
// "type": "string"
// },
// "serviceId": {
// "description": "Optional. The Service id to use for this Service. If omitted, an id will be generated instead. Must match the pattern a-z0-9-+",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+parent}/services",
// "request": {
// "$ref": "Service"
// },
// "response": {
// "$ref": "Service"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.services.delete":
type ServicesDeleteCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Soft delete this Service.
func (r *ServicesService) Delete(name string) *ServicesDeleteCall {
c := &ServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesDeleteCall) Fields(s ...googleapi.Field) *ServicesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesDeleteCall) Context(ctx context.Context) *ServicesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Soft delete this Service.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}",
// "httpMethod": "DELETE",
// "id": "monitoring.services.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name of the Service to delete. Of the form projects/{project_id}/service/{service_id}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.services.get":
type ServicesGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Get the named Service.
func (r *ServicesService) Get(name string) *ServicesGetCall {
c := &ServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesGetCall) Fields(s ...googleapi.Field) *ServicesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesGetCall) IfNoneMatch(entityTag string) *ServicesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesGetCall) Context(ctx context.Context) *ServicesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.get" call.
// Exactly one of *Service or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Service.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesGetCall) Do(opts ...googleapi.CallOption) (*Service, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Service{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Get the named Service.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}",
// "httpMethod": "GET",
// "id": "monitoring.services.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name of the Service. Of the form projects/{project_id}/services/{service_id}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Service"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// method id "monitoring.services.list":
type ServicesListCall struct {
s *APIService
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: List Services for this workspace.
func (r *ServicesService) List(parent string) *ServicesListCall {
c := &ServicesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// Filter sets the optional parameter "filter": A filter specifying what
// Services to return. The filter currently supports the following
// fields:
// - `identifier_case`
// - `app_engine.module_id`
// - `cloud_endpoints.service`
// - `cluster_istio.location`
// - `cluster_istio.cluster_name`
// - `cluster_istio.service_namespace`
// - `cluster_istio.service_name`
// identifier_case refers to which option in the identifier oneof is
// populated. For example, the filter identifier_case = "CUSTOM" would
// match all services with a value for the custom field. Valid options
// are "CUSTOM", "APP_ENGINE", "CLOUD_ENDPOINTS", and "CLUSTER_ISTIO".
func (c *ServicesListCall) Filter(filter string) *ServicesListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": A non-negative
// number that is the maximum number of results to return. When 0, use
// default page size.
func (c *ServicesListCall) PageSize(pageSize int64) *ServicesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return additional results from the previous method call.
func (c *ServicesListCall) PageToken(pageToken string) *ServicesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesListCall) Fields(s ...googleapi.Field) *ServicesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesListCall) IfNoneMatch(entityTag string) *ServicesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesListCall) Context(ctx context.Context) *ServicesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/services")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.list" call.
// Exactly one of *ListServicesResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListServicesResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesListCall) Do(opts ...googleapi.CallOption) (*ListServicesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListServicesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "List Services for this workspace.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services",
// "httpMethod": "GET",
// "id": "monitoring.services.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "filter": {
// "description": "A filter specifying what Services to return. The filter currently supports the following fields:\n- `identifier_case`\n- `app_engine.module_id`\n- `cloud_endpoints.service`\n- `cluster_istio.location`\n- `cluster_istio.cluster_name`\n- `cluster_istio.service_namespace`\n- `cluster_istio.service_name`\nidentifier_case refers to which option in the identifier oneof is populated. For example, the filter identifier_case = \"CUSTOM\" would match all services with a value for the custom field. Valid options are \"CUSTOM\", \"APP_ENGINE\", \"CLOUD_ENDPOINTS\", and \"CLUSTER_ISTIO\".",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "A non-negative number that is the maximum number of results to return. When 0, use default page size.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Resource name of the parent Workspace. Of the form projects/{project_id}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+parent}/services",
// "response": {
// "$ref": "ListServicesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ServicesListCall) Pages(ctx context.Context, f func(*ListServicesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.services.patch":
type ServicesPatchCall struct {
s *APIService
name string
service *Service
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Update this Service.
func (r *ServicesService) Patch(name string, service *Service) *ServicesPatchCall {
c := &ServicesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.service = service
return c
}
// UpdateMask sets the optional parameter "updateMask": A set of field
// paths defining which fields to use for the update.
func (c *ServicesPatchCall) UpdateMask(updateMask string) *ServicesPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesPatchCall) Fields(s ...googleapi.Field) *ServicesPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesPatchCall) Context(ctx context.Context) *ServicesPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.service)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.patch" call.
// Exactly one of *Service or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Service.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesPatchCall) Do(opts ...googleapi.CallOption) (*Service, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Service{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Update this Service.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}",
// "httpMethod": "PATCH",
// "id": "monitoring.services.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name for this Service. Of the form projects/{project_id}/services/{service_id}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "A set of field paths defining which fields to use for the update.",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "request": {
// "$ref": "Service"
// },
// "response": {
// "$ref": "Service"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.services.serviceLevelObjectives.create":
type ServicesServiceLevelObjectivesCreateCall struct {
s *APIService
parent string
servicelevelobjective *ServiceLevelObjective
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Create a ServiceLevelObjective for the given Service.
func (r *ServicesServiceLevelObjectivesService) Create(parent string, servicelevelobjective *ServiceLevelObjective) *ServicesServiceLevelObjectivesCreateCall {
c := &ServicesServiceLevelObjectivesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.servicelevelobjective = servicelevelobjective
return c
}
// ServiceLevelObjectiveId sets the optional parameter
// "serviceLevelObjectiveId": The ServiceLevelObjective id to use for
// this ServiceLevelObjective. If omitted, an id will be generated
// instead. Must match the pattern a-z0-9-+
func (c *ServicesServiceLevelObjectivesCreateCall) ServiceLevelObjectiveId(serviceLevelObjectiveId string) *ServicesServiceLevelObjectivesCreateCall {
c.urlParams_.Set("serviceLevelObjectiveId", serviceLevelObjectiveId)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesServiceLevelObjectivesCreateCall) Fields(s ...googleapi.Field) *ServicesServiceLevelObjectivesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesServiceLevelObjectivesCreateCall) Context(ctx context.Context) *ServicesServiceLevelObjectivesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesServiceLevelObjectivesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesServiceLevelObjectivesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.servicelevelobjective)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/serviceLevelObjectives")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.serviceLevelObjectives.create" call.
// Exactly one of *ServiceLevelObjective or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ServiceLevelObjective.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesServiceLevelObjectivesCreateCall) Do(opts ...googleapi.CallOption) (*ServiceLevelObjective, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ServiceLevelObjective{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Create a ServiceLevelObjective for the given Service.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}/serviceLevelObjectives",
// "httpMethod": "POST",
// "id": "monitoring.services.serviceLevelObjectives.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Resource name of the parent Service. Of the form projects/{project_id}/services/{service_id}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+$",
// "required": true,
// "type": "string"
// },
// "serviceLevelObjectiveId": {
// "description": "Optional. The ServiceLevelObjective id to use for this ServiceLevelObjective. If omitted, an id will be generated instead. Must match the pattern a-z0-9-+",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+parent}/serviceLevelObjectives",
// "request": {
// "$ref": "ServiceLevelObjective"
// },
// "response": {
// "$ref": "ServiceLevelObjective"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.services.serviceLevelObjectives.delete":
type ServicesServiceLevelObjectivesDeleteCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Delete the given ServiceLevelObjective.
func (r *ServicesServiceLevelObjectivesService) Delete(name string) *ServicesServiceLevelObjectivesDeleteCall {
c := &ServicesServiceLevelObjectivesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesServiceLevelObjectivesDeleteCall) Fields(s ...googleapi.Field) *ServicesServiceLevelObjectivesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesServiceLevelObjectivesDeleteCall) Context(ctx context.Context) *ServicesServiceLevelObjectivesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesServiceLevelObjectivesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesServiceLevelObjectivesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.serviceLevelObjectives.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ServicesServiceLevelObjectivesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Delete the given ServiceLevelObjective.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}/serviceLevelObjectives/{serviceLevelObjectivesId}",
// "httpMethod": "DELETE",
// "id": "monitoring.services.serviceLevelObjectives.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name of the ServiceLevelObjective to delete. Of the form projects/{project_id}/services/{service_id}/serviceLevelObjectives/{slo_name}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.services.serviceLevelObjectives.get":
type ServicesServiceLevelObjectivesGetCall struct {
s *APIService
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Get a ServiceLevelObjective by name.
func (r *ServicesServiceLevelObjectivesService) Get(name string) *ServicesServiceLevelObjectivesGetCall {
c := &ServicesServiceLevelObjectivesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// View sets the optional parameter "view": View of the
// ServiceLevelObjective to return. If DEFAULT, return the
// ServiceLevelObjective as originally defined. If EXPLICIT and the
// ServiceLevelObjective is defined in terms of a BasicSli, replace the
// BasicSli with a RequestBasedSli spelling out how the SLI is computed.
//
// Possible values:
// "VIEW_UNSPECIFIED"
// "FULL"
// "EXPLICIT"
func (c *ServicesServiceLevelObjectivesGetCall) View(view string) *ServicesServiceLevelObjectivesGetCall {
c.urlParams_.Set("view", view)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesServiceLevelObjectivesGetCall) Fields(s ...googleapi.Field) *ServicesServiceLevelObjectivesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesServiceLevelObjectivesGetCall) IfNoneMatch(entityTag string) *ServicesServiceLevelObjectivesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesServiceLevelObjectivesGetCall) Context(ctx context.Context) *ServicesServiceLevelObjectivesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesServiceLevelObjectivesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesServiceLevelObjectivesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.serviceLevelObjectives.get" call.
// Exactly one of *ServiceLevelObjective or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ServiceLevelObjective.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesServiceLevelObjectivesGetCall) Do(opts ...googleapi.CallOption) (*ServiceLevelObjective, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ServiceLevelObjective{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Get a ServiceLevelObjective by name.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}/serviceLevelObjectives/{serviceLevelObjectivesId}",
// "httpMethod": "GET",
// "id": "monitoring.services.serviceLevelObjectives.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name of the ServiceLevelObjective to get. Of the form projects/{project_id}/services/{service_id}/serviceLevelObjectives/{slo_name}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$",
// "required": true,
// "type": "string"
// },
// "view": {
// "description": "View of the ServiceLevelObjective to return. If DEFAULT, return the ServiceLevelObjective as originally defined. If EXPLICIT and the ServiceLevelObjective is defined in terms of a BasicSli, replace the BasicSli with a RequestBasedSli spelling out how the SLI is computed.",
// "enum": [
// "VIEW_UNSPECIFIED",
// "FULL",
// "EXPLICIT"
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "response": {
// "$ref": "ServiceLevelObjective"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// method id "monitoring.services.serviceLevelObjectives.list":
type ServicesServiceLevelObjectivesListCall struct {
s *APIService
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: List the ServiceLevelObjectives for the given Service.
func (r *ServicesServiceLevelObjectivesService) List(parent string) *ServicesServiceLevelObjectivesListCall {
c := &ServicesServiceLevelObjectivesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// Filter sets the optional parameter "filter": A filter specifying what
// ServiceLevelObjectives to return.
func (c *ServicesServiceLevelObjectivesListCall) Filter(filter string) *ServicesServiceLevelObjectivesListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": A non-negative
// number that is the maximum number of results to return. When 0, use
// default page size.
func (c *ServicesServiceLevelObjectivesListCall) PageSize(pageSize int64) *ServicesServiceLevelObjectivesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return additional results from the previous method call.
func (c *ServicesServiceLevelObjectivesListCall) PageToken(pageToken string) *ServicesServiceLevelObjectivesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// View sets the optional parameter "view": View of the
// ServiceLevelObjectives to return. If DEFAULT, return each
// ServiceLevelObjective as originally defined. If EXPLICIT and the
// ServiceLevelObjective is defined in terms of a BasicSli, replace the
// BasicSli with a RequestBasedSli spelling out how the SLI is computed.
//
// Possible values:
// "VIEW_UNSPECIFIED"
// "FULL"
// "EXPLICIT"
func (c *ServicesServiceLevelObjectivesListCall) View(view string) *ServicesServiceLevelObjectivesListCall {
c.urlParams_.Set("view", view)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesServiceLevelObjectivesListCall) Fields(s ...googleapi.Field) *ServicesServiceLevelObjectivesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ServicesServiceLevelObjectivesListCall) IfNoneMatch(entityTag string) *ServicesServiceLevelObjectivesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesServiceLevelObjectivesListCall) Context(ctx context.Context) *ServicesServiceLevelObjectivesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesServiceLevelObjectivesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesServiceLevelObjectivesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+parent}/serviceLevelObjectives")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.serviceLevelObjectives.list" call.
// Exactly one of *ListServiceLevelObjectivesResponse or error will be
// non-nil. Any non-2xx status code is an error. Response headers are in
// either *ListServiceLevelObjectivesResponse.ServerResponse.Header or
// (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ServicesServiceLevelObjectivesListCall) Do(opts ...googleapi.CallOption) (*ListServiceLevelObjectivesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListServiceLevelObjectivesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "List the ServiceLevelObjectives for the given Service.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}/serviceLevelObjectives",
// "httpMethod": "GET",
// "id": "monitoring.services.serviceLevelObjectives.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "filter": {
// "description": "A filter specifying what ServiceLevelObjectives to return.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "A non-negative number that is the maximum number of results to return. When 0, use default page size.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return additional results from the previous method call.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Resource name of the parent Service. Of the form projects/{project_id}/services/{service_id}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+$",
// "required": true,
// "type": "string"
// },
// "view": {
// "description": "View of the ServiceLevelObjectives to return. If DEFAULT, return each ServiceLevelObjective as originally defined. If EXPLICIT and the ServiceLevelObjective is defined in terms of a BasicSli, replace the BasicSli with a RequestBasedSli spelling out how the SLI is computed.",
// "enum": [
// "VIEW_UNSPECIFIED",
// "FULL",
// "EXPLICIT"
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+parent}/serviceLevelObjectives",
// "response": {
// "$ref": "ListServiceLevelObjectivesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ServicesServiceLevelObjectivesListCall) Pages(ctx context.Context, f func(*ListServiceLevelObjectivesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "monitoring.services.serviceLevelObjectives.patch":
type ServicesServiceLevelObjectivesPatchCall struct {
s *APIService
name string
servicelevelobjective *ServiceLevelObjective
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Update the given ServiceLevelObjective.
func (r *ServicesServiceLevelObjectivesService) Patch(name string, servicelevelobjective *ServiceLevelObjective) *ServicesServiceLevelObjectivesPatchCall {
c := &ServicesServiceLevelObjectivesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.servicelevelobjective = servicelevelobjective
return c
}
// UpdateMask sets the optional parameter "updateMask": A set of field
// paths defining which fields to use for the update.
func (c *ServicesServiceLevelObjectivesPatchCall) UpdateMask(updateMask string) *ServicesServiceLevelObjectivesPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ServicesServiceLevelObjectivesPatchCall) Fields(s ...googleapi.Field) *ServicesServiceLevelObjectivesPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ServicesServiceLevelObjectivesPatchCall) Context(ctx context.Context) *ServicesServiceLevelObjectivesPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ServicesServiceLevelObjectivesPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ServicesServiceLevelObjectivesPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.servicelevelobjective)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.services.serviceLevelObjectives.patch" call.
// Exactly one of *ServiceLevelObjective or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ServiceLevelObjective.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ServicesServiceLevelObjectivesPatchCall) Do(opts ...googleapi.CallOption) (*ServiceLevelObjective, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ServiceLevelObjective{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Update the given ServiceLevelObjective.",
// "flatPath": "v3/{v3Id}/{v3Id1}/services/{servicesId}/serviceLevelObjectives/{serviceLevelObjectivesId}",
// "httpMethod": "PATCH",
// "id": "monitoring.services.serviceLevelObjectives.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name for this ServiceLevelObjective. Of the form projects/{project_id}/services/{service_id}/serviceLevelObjectives/{slo_name}.",
// "location": "path",
// "pattern": "^[^/]+/[^/]+/services/[^/]+/serviceLevelObjectives/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "A set of field paths defining which fields to use for the update.",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/{+name}",
// "request": {
// "$ref": "ServiceLevelObjective"
// },
// "response": {
// "$ref": "ServiceLevelObjective"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring"
// ]
// }
}
// method id "monitoring.uptimeCheckIps.list":
type UptimeCheckIpsListCall struct {
s *APIService
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Returns the list of IP addresses that checkers run from
func (r *UptimeCheckIpsService) List() *UptimeCheckIpsListCall {
c := &UptimeCheckIpsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
return c
}
// PageSize sets the optional parameter "pageSize": The maximum number
// of results to return in a single response. The server may further
// constrain the maximum number of results returned in a single page. If
// the page_size is <=0, the server will decide the number of results to
// be returned. NOTE: this field is not yet implemented
func (c *UptimeCheckIpsListCall) PageSize(pageSize int64) *UptimeCheckIpsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": If this field is
// not empty then it must contain the nextPageToken value returned by a
// previous call to this method. Using this field causes the method to
// return more results from the previous method call. NOTE: this field
// is not yet implemented
func (c *UptimeCheckIpsListCall) PageToken(pageToken string) *UptimeCheckIpsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *UptimeCheckIpsListCall) Fields(s ...googleapi.Field) *UptimeCheckIpsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *UptimeCheckIpsListCall) IfNoneMatch(entityTag string) *UptimeCheckIpsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *UptimeCheckIpsListCall) Context(ctx context.Context) *UptimeCheckIpsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *UptimeCheckIpsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *UptimeCheckIpsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20191124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v3/uptimeCheckIps")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "monitoring.uptimeCheckIps.list" call.
// Exactly one of *ListUptimeCheckIpsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *ListUptimeCheckIpsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *UptimeCheckIpsListCall) Do(opts ...googleapi.CallOption) (*ListUptimeCheckIpsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListUptimeCheckIpsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns the list of IP addresses that checkers run from",
// "flatPath": "v3/uptimeCheckIps",
// "httpMethod": "GET",
// "id": "monitoring.uptimeCheckIps.list",
// "parameterOrder": [],
// "parameters": {
// "pageSize": {
// "description": "The maximum number of results to return in a single response. The server may further constrain the maximum number of results returned in a single page. If the page_size is \u003c=0, the server will decide the number of results to be returned. NOTE: this field is not yet implemented",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "If this field is not empty then it must contain the nextPageToken value returned by a previous call to this method. Using this field causes the method to return more results from the previous method call. NOTE: this field is not yet implemented",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v3/uptimeCheckIps",
// "response": {
// "$ref": "ListUptimeCheckIpsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform",
// "https://www.googleapis.com/auth/monitoring",
// "https://www.googleapis.com/auth/monitoring.read"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *UptimeCheckIpsListCall) Pages(ctx context.Context, f func(*ListUptimeCheckIpsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
| {
rs := &ServicesService{s: s}
rs.ServiceLevelObjectives = NewServicesServiceLevelObjectivesService(s)
return rs
} |
mypath.py | class Path(object):
@staticmethod
def | (dataset):
if dataset == 'cityscapes':
# return 'data/cityscapes'
return '../../../cvlabdata2/forOganes/cityscapes'
# return '/path/to/datasets/cityscapes/' # folder that contains leftImg8bit/
elif dataset == 'cityscapes_local':
return 'data/cityscapes'
elif dataset == 'synthia':
return 'data/synthia'
# elif dataset == 'sbd':
# return '/path/to/datasets/benchmark_RELEASE/' # folder that contains dataset/.
# elif dataset == 'pascal':
# return '/path/to/datasets/VOCdevkit/VOC2012/' # folder that contains VOCdevkit/.
# elif dataset == 'coco':
# return '/path/to/datasets/coco/'
else:
print('Dataset {} not available.'.format(dataset))
raise NotImplementedError
| db_root_dir |
decisions.go | package v1
import (
"crypto/sha512"
"fmt"
"net/http"
"strconv"
"time"
"github.com/crowdsecurity/crowdsec/pkg/database/ent"
"github.com/crowdsecurity/crowdsec/pkg/models"
"github.com/crowdsecurity/crowdsec/pkg/types"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
)
func | (decisions []*ent.Decision) ([]*models.Decision, error) {
var results []*models.Decision
for _, dbDecision := range decisions {
duration := dbDecision.Until.Sub(time.Now()).String()
decision := models.Decision{
ID: int64(dbDecision.ID),
Duration: &duration,
EndIP: dbDecision.EndIP,
StartIP: dbDecision.StartIP,
Scenario: &dbDecision.Scenario,
Scope: &dbDecision.Scope,
Value: &dbDecision.Value,
Type: &dbDecision.Type,
Origin: &dbDecision.Origin,
}
results = append(results, &decision)
}
return results, nil
}
func (c *Controller) GetDecision(gctx *gin.Context) {
defer types.CatchPanic("crowdsec/controllersV1/GetDecision")
var err error
var results []*models.Decision
var data []*ent.Decision
data, err = c.DBClient.QueryDecisionWithFilter(gctx.Request.URL.Query())
if err != nil {
c.HandleDBErrors(gctx, err)
return
}
results, err = FormatDecisions(data)
if err != nil {
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
/*let's follow a naive logic : when a bouncer queries /decisions, if the answer is empty, we assume there is no decision for this ip/user/...,
but if it's non-empty, it means that there is one or more decisions for this target*/
if len(results) > 0 {
PrometheusBouncersHasNonEmptyDecision(gctx)
} else {
PrometheusBouncersHasEmptyDecision(gctx)
}
if gctx.Request.Method == "HEAD" {
gctx.String(http.StatusOK, "")
return
}
gctx.JSON(http.StatusOK, results)
return
}
func (c *Controller) DeleteDecisionById(gctx *gin.Context) {
defer types.CatchPanic("crowdsec/controllersV1/DeleteDecisionById")
var err error
decisionIDStr := gctx.Param("decision_id")
decisionID, err := strconv.Atoi(decisionIDStr)
if err != nil {
gctx.JSON(http.StatusBadRequest, gin.H{"message": "decision_id must be valid integer"})
return
}
err = c.DBClient.SoftDeleteDecisionByID(decisionID)
if err != nil {
c.HandleDBErrors(gctx, err)
return
}
deleteDecisionResp := models.DeleteDecisionResponse{
NbDeleted: "1",
}
gctx.JSON(http.StatusOK, deleteDecisionResp)
return
}
func (c *Controller) DeleteDecisions(gctx *gin.Context) {
defer types.CatchPanic("crowdsec/controllersV1/DeleteDecisions")
var err error
nbDeleted, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query())
if err != nil {
c.HandleDBErrors(gctx, err)
return
}
deleteDecisionResp := models.DeleteDecisionResponse{
NbDeleted: nbDeleted,
}
gctx.JSON(http.StatusOK, deleteDecisionResp)
return
}
func (c *Controller) StreamDecision(gctx *gin.Context) {
defer types.CatchPanic("crowdsec/controllersV1/StreamDecision")
var data []*ent.Decision
ret := make(map[string][]*models.Decision, 0)
ret["new"] = []*models.Decision{}
ret["deleted"] = []*models.Decision{}
val := gctx.Request.Header.Get(c.APIKeyHeader)
hashedKey := sha512.New()
hashedKey.Write([]byte(val))
hashStr := fmt.Sprintf("%x", hashedKey.Sum(nil))
bouncerInfo, err := c.DBClient.SelectBouncer(hashStr)
if err != nil {
if _, ok := err.(*ent.NotFoundError); ok {
gctx.JSON(http.StatusForbidden, gin.H{"message": err.Error()})
} else {
gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"})
}
return
}
if bouncerInfo == nil {
gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"})
return
}
// if the blocker just start, return all decisions
if val, ok := gctx.Request.URL.Query()["startup"]; ok {
if val[0] == "true" {
data, err := c.DBClient.QueryAllDecisions()
if err != nil {
log.Errorf("failed querying decisions: %v", err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
ret["new"], err = FormatDecisions(data)
if err != nil {
log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
// getting expired decisions
data, err = c.DBClient.QueryExpiredDecisions()
if err != nil {
log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
ret["deleted"], err = FormatDecisions(data)
if err != nil {
log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
if err := c.DBClient.UpdateBouncerLastPull(time.Now(), bouncerInfo.ID); err != nil {
log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
if gctx.Request.Method == "HEAD" {
gctx.String(http.StatusOK, "")
return
}
gctx.JSON(http.StatusOK, ret)
return
}
}
// getting new decisions
data, err = c.DBClient.QueryNewDecisionsSince(bouncerInfo.LastPull)
if err != nil {
log.Errorf("unable to query new decision for '%s' : %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
ret["new"], err = FormatDecisions(data)
if err != nil {
log.Errorf("unable to format new decision for '%s' : %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
// getting expired decisions
data, err = c.DBClient.QueryExpiredDecisionsSince(bouncerInfo.LastPull.Add((-2 * time.Second))) // do we want to give exactly lastPull time ?
if err != nil {
log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
ret["deleted"], err = FormatDecisions(data)
if err != nil {
log.Errorf("unable to format expired decision for '%s' : %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
if err := c.DBClient.UpdateBouncerLastPull(time.Now(), bouncerInfo.ID); err != nil {
log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err)
gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()})
return
}
gctx.JSON(http.StatusOK, ret)
return
}
| FormatDecisions |
resnext_wsl.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the |
'''
Code From : https://github.com/facebookresearch/WSL-Images/blob/master/hubconf.py
'''
dependencies = ['torch', 'torchvision']
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from Res import ResNet, Bottleneck
model_urls = {
'resnext101_32x8d': 'https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth',
'resnext101_32x16d': 'https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth',
'resnext101_32x32d': 'https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth',
'resnext101_32x48d': 'https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth',
}
def _resnext(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnext101_32x8d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnext('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x16d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 16
return _resnext('resnext101_32x16d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x32d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 32
return _resnext('resnext101_32x32d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs)
def resnext101_32x48d_wsl(progress=True, **kwargs):
"""Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 48
return _resnext('resnext101_32x48d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs) | # LICENSE file in the root directory of this source tree.
# Optional list of dependencies required by the package |
mongoose.js | import mongoose from 'mongoose'
mongoose.Promise = global.Promise
export const createConnection = (url) => {
return mongoose.connect(url) | } |
|
index.js | import './index.scss';
import $ from 'jquery';
import assets from './assets';
import header from '../partials/header';
import footer from '../partials/footer';
function | (router, api) {
this.router = router;
this.templateIndex = require('./index.html');
}
Ecosystem.prototype = {
index: function () {
let self = this;
$('title').html('Mixin Official Website');
$('body').attr('class', 'ecosystem layout');
let wallets = [
{
logo: assets.mixin_logo,
cover: assets.mixin,
name: "ecosystem.messenger.name",
description: "ecosystem.messenger.description",
address: "/messenger",
},
{
class: "revert",
logo: assets.fennec_log,
cover: assets.fennec,
name: "ecosystem.fennec.name",
description: "ecosystem.fennec.description",
address: "https://github.com/fox-one/fennec",
},
];
let defi = [
{
logo: assets.foxswap,
name: "ecosystem.4swap.name",
description: "ecosystem.4swap.description",
},
{
logo: assets.leaf,
name: "ecosystem.leaf.name",
description: "ecosystem.leaf.description",
},
{
logo: assets.rings,
name: "ecosystem.rings.name",
description: "ecosystem.rings.description",
},
{
logo: assets.mixswap,
name: "ecosystem.mixswap.name",
description: "ecosystem.mixswap.description",
},
{
logo: assets.exinlocal_defi,
name: "ecosystem.exinlocal.name",
description: "ecosystem.exinlocal.description",
},
{
logo: assets.optiondance,
name: "ecosystem.optiondance.name",
description: "ecosystem.optiondance.description",
},
];
let datas = [0, 0, 0 , 0, 0];
let partners = [
{
class: "brands",
data: datas.fill([
{
logo: assets.links,
name: "Links",
},
{
logo: assets.poolin,
name: "Poolin",
},
{
logo: assets.exinone,
name: "Exin",
},
{
logo: assets.fox,
name: "Fox",
},
{
logo: assets.quorum,
name: "Quorum",
},
{
logo: assets.bigone,
name: "BigONE",
},
]).flat(),
},
{
class: "ecosystems",
data: datas.fill([
{
logo: assets.exinearn,
name: "ExinEarn",
},
{
logo: assets.coinview,
name: "CoinView",
},
{
logo: assets.pando,
name: "Pando",
},
{
logo: assets.blockchair,
name: "BlockChair",
},
]).flat(),
},
{
class: "coins",
data: datas.fill([
{
logo: assets.tron,
name: "Tron",
},
{
logo: assets.hzn,
name: "Horizen",
},
{
logo: assets.dot,
name: "Polkadot",
},
{
logo: assets.mob,
name: "MobileCoin",
},
{
logo: assets.vcash,
name: "VCash",
},
]).flat(),
}
]
let apps = [
{
logo: assets.links,
type: "ecosystem.links.type",
name: "ecosystem.links.name",
description: "ecosystem.links.description",
},
{
logo: assets.exinearn,
type: "ecosystem.exinearn.type",
name: "ecosystem.exinearn.name",
description: "ecosystem.exinearn.description",
},
{
logo: assets.exinone,
type: "ecosystem.exinone.type",
name: "ecosystem.exinone.name",
description: "ecosystem.exinone.description",
},
{
logo: assets.blockchair,
type: "ecosystem.blockchair.type",
name: "ecosystem.blockchair.name",
description: "ecosystem.blockchair.description",
},
{
logo: assets.bwatch,
type: "ecosystem.bwatch.type",
name: "ecosystem.bwatch.name",
description: "ecosystem.bwatch.description",
},
{
logo: assets.exinlocal,
type: "ecosystem.exinlocal.type",
name: "ecosystem.exinlocal.name",
description: "ecosystem.exinlocal.description",
},
{
logo: assets.oceanone,
type: "ecosystem.oceanone.type",
name: "ecosystem.oceanone.name",
description: "ecosystem.oceanone.description",
},
{
logo: assets.bigdex,
type: "ecosystem.bigdex.type",
name: "ecosystem.bigdex.name",
description: "ecosystem.bigdex.description",
},
{
logo: assets.coinview,
type: "ecosystem.coinview.type",
name: "ecosystem.coinview.name",
description: "ecosystem.coinview.description",
},
];
let index = self.templateIndex({
slogan: 'ecosystem.slogan',
wallets: wallets,
partners: partners,
defi: defi,
apps: apps,
...header,
...footer
});
$('#layout-container').html(index);
self.router.updatePageLinks();
$('.ic_down').click(function() {
$([document.documentElement, document.body]).animate({
scrollTop: $(".wallets").offset().top
}, 1000);
});
},
};
export default Ecosystem;
| Ecosystem |
errors.go | package parse
import (
e "errors"
"fmt"
)
var (
ErrStopIteration = e.New("Stop iteration")
ErrSkipBranch = e.New("Skip branch")
ErrSkipRule = e.New("Skip rule")
)
// ErrBoundIncomplete is an error which mean
// that a closing token was not
// found in the input which is making a requested
// logical «bound» to be incomplete.
type ErrBoundIncomplete struct {
Starting []byte
Closing []byte
Position int
}
func (e *ErrBoundIncomplete) Error() string {
return fmt.Sprintf(
"Bound start token '%s' found but close token '%s' is not, bound incomplete at position %d",
string(e.Starting),
string(e.Closing),
e.Position,
)
}
// NewErrBoundIncomplete constructs new ErrBoundIncomplete.
func NewErrBoundIncomplete(position int, starting, closing []byte) error {
return &ErrBoundIncomplete{starting, closing, position}
}
//
// ErrUnsupportedRule is an error which mean
// that parser support for specifier Rule is not implemented.
type ErrUnsupportedRule struct {
Rule
}
func (e *ErrUnsupportedRule) Error() string {
return fmt.Sprintf(
"Unsupported rule type '%T'",
e.Rule,
)
}
// NewErrUnsupportedRule constructs new ErrUnsupportedRule.
func NewErrUnsupportedRule(rule Rule) error {
return &ErrUnsupportedRule{rule}
}
//
// ErrUnexpectedEOF is an error which mean | // that EOF was meat while parser wanted more
// input.
type ErrUnexpectedEOF struct {
Position int
Rule
}
func (e *ErrUnexpectedEOF) Error() string {
return fmt.Sprintf(
"Unexpected EOF at position '%d' while applying '%s'",
e.Position,
e.Rule,
)
}
// NewErrUnexpectedEOF constructs new ErrUnexpectedEOF.
func NewErrUnexpectedEOF(position int, rule Rule) error {
return &ErrUnexpectedEOF{position, rule}
}
//
// ErrUnexpectedToken is an error which mean
// that token read from current position in input
// is not expected by the current Rule.
type ErrUnexpectedToken struct {
Token []byte
Position int
Rule
}
func (e *ErrUnexpectedToken) Error() string {
return fmt.Sprintf(
"Unexpected token '%s' at position '%d' while applying '%s'",
e.Token,
e.Position,
e.Rule,
)
}
// NewErrUnexpectedToken constructs new ErrUnexpectedToken.
func NewErrUnexpectedToken(token []byte, position int, rule Rule) error {
return &ErrUnexpectedToken{token, position, rule}
}
//
// ErrNestingTooDeep is an error which mean
// the Rule nesting is too deep.
type ErrNestingTooDeep struct {
Nesting int
Position int
}
func (e *ErrNestingTooDeep) Error() string {
return fmt.Sprintf(
"Nesting too deep, counted to '%d' levels at position %d",
e.Nesting,
e.Position,
)
}
// NewErrNestingTooDeep constructs new ErrNestingTooDeep.
func NewErrNestingTooDeep(nesting int, position int) error {
return &ErrNestingTooDeep{nesting, position}
}
//
// ErrEmptyRule is an error which mean
// a Rule with empty content was passed to the parser.
type ErrEmptyRule struct {
Rule
Inside Rule
}
func (e *ErrEmptyRule) Error() string {
return fmt.Sprintf(
"Empty rule of type '%T' = '%s' inside '%s' rule",
e.Rule,
e.Rule,
e.Inside,
)
}
// NewErrEmptyRule constructs new ErrEmptyRule.
func NewErrEmptyRule(rule Rule, inside Rule) error {
return &ErrEmptyRule{rule, inside}
} | |
kes-statefulset.go | // Copyright (C) 2020, MinIO, Inc.
//
// This code is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License, version 3,
// as published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License, version 3,
// along with this program. If not, see <http://www.gnu.org/licenses/>
package statefulsets
import (
miniov2 "github.com/minio/operator/pkg/apis/minio.min.io/v2"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// KESMetadata Returns the KES pods metadata set in configuration.
// If a user specifies metadata in the spec we return that
// metadata.
func KESMetadata(t *miniov2.Tenant) metav1.ObjectMeta {
meta := metav1.ObjectMeta{}
meta.Labels = t.Spec.KES.Labels
meta.Annotations = t.Spec.KES.Annotations
if meta.Labels == nil {
meta.Labels = make(map[string]string)
}
for k, v := range t.KESPodLabels() {
meta.Labels[k] = v
}
return meta
}
// KESSelector Returns the KES pods selector set in configuration.
func KESSelector(t *miniov2.Tenant) *metav1.LabelSelector {
return &metav1.LabelSelector{
MatchLabels: t.KESPodLabels(),
}
}
// KESVolumeMounts builds the volume mounts for MinIO container.
func KESVolumeMounts(t *miniov2.Tenant) []corev1.VolumeMount {
return []corev1.VolumeMount{
{
Name: t.KESVolMountName(),
MountPath: miniov2.KESConfigMountPath,
},
}
}
// KESEnvironmentVars returns the KES environment variables set in configuration.
func KESEnvironmentVars(t *miniov2.Tenant) []corev1.EnvVar {
// pass the identity created while generating the MinIO client cert
return []corev1.EnvVar{
{
Name: "MINIO_KES_IDENTITY",
Value: miniov2.KESIdentity,
},
}
}
// KESServerContainer returns the KES container for a KES StatefulSet.
func KESServerContainer(t *miniov2.Tenant) corev1.Container {
// Args to start KES with config mounted at miniov2.KESConfigMountPath and require but don't verify mTLS authentication
args := []string{"server", "--config=" + miniov2.KESConfigMountPath + "/server-config.yaml", "--auth=off"}
return corev1.Container{
Name: miniov2.KESContainerName,
Image: t.Spec.KES.Image,
Ports: []corev1.ContainerPort{
{
ContainerPort: miniov2.KESPort,
},
},
ImagePullPolicy: t.Spec.KES.ImagePullPolicy,
VolumeMounts: KESVolumeMounts(t),
Args: args,
Env: KESEnvironmentVars(t),
Resources: t.Spec.KES.Resources,
}
}
// kesSecurityContext builds the security context for KES statefulset pods
func kesSecurityContext(t *miniov2.Tenant) *corev1.PodSecurityContext {
runAsNonRoot := true
var runAsUser int64 = 1000
var runAsGroup int64 = 1000
var fsGroup int64 = 1000
securityContext := corev1.PodSecurityContext{
RunAsNonRoot: &runAsNonRoot,
RunAsUser: &runAsUser,
RunAsGroup: &runAsGroup,
FSGroup: &fsGroup,
}
if t.HasKESEnabled() && t.Spec.KES.SecurityContext != nil {
securityContext = *t.Spec.KES.SecurityContext
}
return &securityContext
}
// NewForKES creates a new KES StatefulSet for the given Cluster.
func NewForKES(t *miniov2.Tenant, serviceName string) *appsv1.StatefulSet {
replicas := t.KESReplicas()
// certificate files used by the KES server
certPath := "server.crt"
keyPath := "server.key"
var volumeProjections []corev1.VolumeProjection
var serverCertSecret string
// clientCertSecret holds certificate files (public.crt, private.key and ca.crt) used by KES
// in mTLS with a KMS (eg: authentication with Vault)
var clientCertSecret string
serverCertPaths := []corev1.KeyToPath{
{Key: "public.crt", Path: certPath},
{Key: "private.key", Path: keyPath},
}
configPath := []corev1.KeyToPath{
{Key: "server-config.yaml", Path: "server-config.yaml"},
}
// External certificates will have priority over AutoCert generated certificates
if t.KESExternalCert() {
serverCertSecret = t.Spec.KES.ExternalCertSecret.Name
// This covers both secrets of type "kubernetes.io/tls" and
// "cert-manager.io/v1alpha2" because of same keys in both.
if t.Spec.KES.ExternalCertSecret.Type == "kubernetes.io/tls" || t.Spec.KES.ExternalCertSecret.Type == "cert-manager.io/v1alpha2" || t.Spec.KES.ExternalCertSecret.Type == "cert-manager.io/v1" {
serverCertPaths = []corev1.KeyToPath{
{Key: "tls.crt", Path: certPath},
{Key: "tls.key", Path: keyPath},
}
}
} else {
serverCertSecret = t.KESTLSSecretName()
}
if t.KESClientCert() {
clientCertSecret = t.Spec.KES.ClientCertSecret.Name
}
if t.Spec.KES.Configuration.Name != "" {
volumeProjections = append(volumeProjections, corev1.VolumeProjection{
Secret: &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{
Name: t.Spec.KES.Configuration.Name,
},
Items: configPath,
}, | }
if serverCertSecret != "" {
volumeProjections = append(volumeProjections, corev1.VolumeProjection{
Secret: &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{
Name: serverCertSecret,
},
Items: serverCertPaths,
},
})
}
if clientCertSecret != "" {
volumeProjections = append(volumeProjections, corev1.VolumeProjection{
Secret: &corev1.SecretProjection{
LocalObjectReference: corev1.LocalObjectReference{
Name: clientCertSecret,
},
},
})
}
podVolumes := []corev1.Volume{
{
Name: t.KESVolMountName(),
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: volumeProjections,
},
},
},
}
containers := []corev1.Container{KESServerContainer(t)}
ss := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: t.Namespace,
Name: t.KESStatefulSetName(),
OwnerReferences: t.OwnerRef(),
},
Spec: appsv1.StatefulSetSpec{
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: miniov2.DefaultUpdateStrategy,
},
PodManagementPolicy: t.Spec.PodManagementPolicy,
// KES is always matched via Tenant Name + KES prefix
Selector: KESSelector(t),
ServiceName: serviceName,
Replicas: &replicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: KESMetadata(t),
Spec: corev1.PodSpec{
ServiceAccountName: t.Spec.KES.ServiceAccountName,
Containers: containers,
Volumes: podVolumes,
RestartPolicy: corev1.RestartPolicyAlways,
SchedulerName: t.Scheduler.Name,
NodeSelector: t.Spec.KES.NodeSelector,
Tolerations: t.Spec.KES.Tolerations,
Affinity: t.Spec.KES.Affinity,
TopologySpreadConstraints: t.Spec.KES.TopologySpreadConstraints,
SecurityContext: kesSecurityContext(t),
},
},
},
}
// Address issue https://github.com/kubernetes/kubernetes/issues/85332
if t.Spec.ImagePullSecret.Name != "" {
ss.Spec.Template.Spec.ImagePullSecrets = []corev1.LocalObjectReference{t.Spec.ImagePullSecret}
}
return ss
} | }) |
to-RM-styles.js | 'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
exports.default = toRMStyles;
var _reactMotion = require('react-motion'); | function toRMStyles(styles) {
var rmStyles = {};
Object.keys(styles).forEach(function (key) {
var style = styles[key];
var isObject = (typeof style === 'undefined' ? 'undefined' : _typeof(style)) === 'object';
// check if user passed their own config
// if not default to a regular spring
rmStyles[key] = isObject ? _extends({}, style) : (0, _reactMotion.spring)(style);
});
return rmStyles;
} | |
server.rs | use crate::{MAX_NAMES, Result, ServerMetrics, IP_MASK, PORT_MASK, ADDR_EXPIRE_SECONDS, NAME_EXPIRE_SECONDS, MAX_ADDRS};
use std::{time::{Duration, Instant}, net::SocketAddr};
use serde::{Serialize,Deserialize};
use serde_cbor::Value as CborValue;
use tokio::net::UdpSocket;
pub async fn server(sa: SocketAddr) -> Result<()> {
use ttl_cache::TtlCache;
type Registry = TtlCache<String, TtlCache<SocketAddr, ()>>;
let mut registry: Registry = TtlCache::new(MAX_NAMES);
/// Server's view on control message.
#[derive(Serialize, Deserialize)]
struct ControlMessage {
na: String,
ip: Option<u32>,
po: Option<u16>,
/// client fields are hidden here.
#[serde(flatten)]
_rest: CborValue,
}
let mut metrics = ServerMetrics::default();
let mut u = UdpSocket::bind(sa).await?;
let mut buf = [0u8; 2048];
async fn | (
metrics: &mut ServerMetrics,
registry: &mut Registry,
u: &mut UdpSocket,
b: &[u8],
from: SocketAddr,
) -> Result<()> {
if b.len() == 0 {
metrics.zeropkt += 1;
return Ok(());
}
if b.len() == 1 && b[0] == b'?' {
metrics.me_rq+=1;
let v = serde_cbor::to_vec(metrics)?;
u.send_to(&v[..], from).await?;
return Ok(());
// metrics request
}
let mut p: ControlMessage = serde_cbor::from_slice(b)?;
p.ip = Some(match from.ip() {
std::net::IpAddr::V4(a) => Into::<u32>::into(a) ^ IP_MASK,
std::net::IpAddr::V6(_) => 0 ^ IP_MASK,
});
p.po = Some(from.port() ^ PORT_MASK);
let v = serde_cbor::to_vec(&p)?;
if p.na == "" {
metrics.ping += 1;
// Just send this back. Act as poor man's STUN.
u.send_to(&v[..], from).await?;
return Ok(());
}
metrics.named += 1;
let mut addrs = registry
.remove(&p.na)
.unwrap_or_else(|| {metrics.newnm+=1; TtlCache::new(MAX_ADDRS)});
// Broadcast this message (with filled in source address) to all other subscribed peers
let mut lone = true;
for (x, ()) in addrs.iter() {
if x == &from {
continue;
}
lone = false;
match u.send_to(&v[..], x).await {
Ok(_) => metrics.sent += 1,
Err(_) => metrics.snderr += 1,
}
}
if lone {
metrics.lone += 1;
}
addrs.insert(from, (), Duration::from_secs(ADDR_EXPIRE_SECONDS));
registry.insert(p.na, addrs, Duration::from_secs(NAME_EXPIRE_SECONDS));
Ok(())
}
let mut error_ratelimiter = Instant::now();
let mut maybereporterr = |e| {
if Instant::now() >= error_ratelimiter {
eprintln!("error: {}", e);
error_ratelimiter = Instant::now() + Duration::from_millis(50);
}
};
loop {
match u.recv_from(&mut buf[..]).await {
Ok((len, from)) => {
metrics.pkt_rcv+=1;
if let Err(e) =
handle_packet(&mut metrics, &mut registry, &mut u, &buf[0..len], from).await
{
metrics.han_err+=1;
maybereporterr(e);
}
}
Err(e) => {
metrics.rcv_err+=1;
maybereporterr(Box::new(e));
}
}
}
} | handle_packet |
auth.controller.ts | import { Body, Controller, Post, Req, UseGuards, ValidationPipe } from '@nestjs/common';
import { AuthGuard } from '@nestjs/passport';
import { AuthCredentialsDto } from './dto/auth-credentials-dto';
import { AuthService } from './auth.service';
import { GetUser } from './get-user.decorator';
import { User } from './user.entity';
@Controller('auth') | }
@Post('/signup')
signUp(@Body(ValidationPipe) authCrendentialsDto: AuthCredentialsDto): Promise<void> {
return this.authService.signUp(authCrendentialsDto);
}
@Post('/signin')
signIn(@Body(ValidationPipe) authCrendentialsDto: AuthCredentialsDto): Promise<{ accessToken: string }> {
return this.authService.signIn(authCrendentialsDto);
}
} | export class AuthController {
constructor(
private authService: AuthService,
) { |
rand.go | /*
Copyright 2018 Pressinfra SRL.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rand provide functions for securely generating random strings. It
// uses crypto/rand to securely generate random sequences of characters.
// It is adapted from https://gist.github.com/denisbrodbeck/635a644089868a51eccd6ae22b2eb800
// to support multiple character sets.
package rand
import (
"crypto/rand"
"fmt"
"io"
"math/big"
)
const (
lowerLetters = "abcdefghijklmnopqrstuvwxyz"
upperLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
letters = lowerLetters + upperLetters
digits = "0123456789"
alphanumerics = letters + digits
ascii = alphanumerics + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
)
// NewStringGenerator generate a cryptographically secure random sequence
// generator from given characters.
func NewStringGenerator(characters string) func(int) (string, error) {
return func(length int) (string, error) {
result := ""
for {
if len(result) >= length {
return result, nil
}
num, err := rand.Int(rand.Reader, big.NewInt(int64(len(characters))))
if err != nil {
return "", err
}
n := num.Int64()
result += string(characters[n])
}
}
}
var alphaNumericStringGenerator = NewStringGenerator(alphanumerics)
// AlphaNumericString returns a cryptographically secure random sequence of
// alphanumeric characters.
func AlphaNumericString(length int) (string, error) {
return alphaNumericStringGenerator(length)
}
var lowerAlphaNumericStringGenerator = NewStringGenerator(lowerLetters + digits)
// LowerAlphaNumericString returns a cryptographically secure random sequence of
// lower alphanumeric characters.
func LowerAlphaNumericString(length int) (string, error) {
return lowerAlphaNumericStringGenerator(length)
}
var asciiStringGenerator = NewStringGenerator(ascii)
// ASCIIString returns a cryptographically secure random sequence of
// printable ASCII characters, excluding space.
func ASCIIString(length int) (string, error) |
func init() {
assertAvailablePRNG()
}
func assertAvailablePRNG() {
// Assert that a cryptographically secure PRNG is available.
// Panic otherwise.
buf := make([]byte, 1)
_, err := io.ReadFull(rand.Reader, buf)
if err != nil {
panic(fmt.Sprintf("crypto/rand is unavailable: Read() failed with %#v", err))
}
}
| {
return asciiStringGenerator(length)
} |
paneview.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import 'vs/css!./paneview';
import { IDisposable, Disposable, DisposableStore } from 'vs/base/common/lifecycle';
import { Event, Emitter } from 'vs/base/common/event';
import { domEvent } from 'vs/base/browser/event';
import { StandardKeyboardEvent } from 'vs/base/browser/keyboardEvent';
import { KeyCode } from 'vs/base/common/keyCodes';
import { $, append, addClass, removeClass, toggleClass, trackFocus, EventHelper, clearNode } from 'vs/base/browser/dom';
import { firstIndex } from 'vs/base/common/arrays';
import { Color, RGBA } from 'vs/base/common/color';
import { SplitView, IView } from './splitview';
import { isFirefox } from 'vs/base/browser/browser';
import { DataTransfers } from 'vs/base/browser/dnd';
import { Orientation } from 'vs/base/browser/ui/sash/sash';
import { localize } from 'vs/nls';
export interface IPaneOptions {
minimumBodySize?: number;
maximumBodySize?: number;
expanded?: boolean;
orientation?: Orientation;
title: string;
}
export interface IPaneStyles {
dropBackground?: Color;
headerForeground?: Color;
headerBackground?: Color;
headerBorder?: Color;
}
/**
* A Pane is a structured SplitView view.
*
* WARNING: You must call `render()` after you contruct it.
* It can't be done automatically at the end of the ctor
* because of the order of property initialization in TypeScript.
* Subclasses wouldn't be able to set own properties
* before the `render()` call, thus forbiding their use.
*/
export abstract class Pane extends Disposable implements IView {
private static readonly HEADER_SIZE = 22;
readonly element: HTMLElement;
private header!: HTMLElement;
private body!: HTMLElement;
protected _expanded: boolean;
protected _orientation: Orientation;
private expandedSize: number | undefined = undefined;
private _headerVisible = true;
private _minimumBodySize: number;
private _maximumBodySize: number;
private ariaHeaderLabel: string;
private styles: IPaneStyles = {};
private animationTimer: number | undefined = undefined;
private readonly _onDidChange = this._register(new Emitter<number | undefined>());
readonly onDidChange: Event<number | undefined> = this._onDidChange.event;
private readonly _onDidChangeExpansionState = this._register(new Emitter<boolean>());
readonly onDidChangeExpansionState: Event<boolean> = this._onDidChangeExpansionState.event;
get draggableElement(): HTMLElement {
return this.header;
}
get dropTargetElement(): HTMLElement {
return this.element;
}
private _dropBackground: Color | undefined;
get dropBackground(): Color | undefined {
return this._dropBackground;
}
get minimumBodySize(): number {
return this._minimumBodySize;
}
set minimumBodySize(size: number) {
this._minimumBodySize = size;
this._onDidChange.fire(undefined);
}
get maximumBodySize(): number {
return this._maximumBodySize;
}
set maximumBodySize(size: number) {
this._maximumBodySize = size;
this._onDidChange.fire(undefined);
}
private get headerSize(): number {
return this.headerVisible ? Pane.HEADER_SIZE : 0;
}
get minimumSize(): number {
const headerSize = this.headerSize;
const expanded = !this.headerVisible || this.isExpanded();
const minimumBodySize = expanded ? this._minimumBodySize : this._orientation === Orientation.HORIZONTAL ? 50 : 0;
return headerSize + minimumBodySize;
}
get maximumSize(): number {
const headerSize = this.headerSize;
const expanded = !this.headerVisible || this.isExpanded();
const maximumBodySize = expanded ? this._maximumBodySize : this._orientation === Orientation.HORIZONTAL ? 50 : 0;
return headerSize + maximumBodySize;
}
orthogonalSize: number = 0;
constructor(options: IPaneOptions) {
super();
this._expanded = typeof options.expanded === 'undefined' ? true : !!options.expanded;
this._orientation = typeof options.orientation === 'undefined' ? Orientation.VERTICAL : options.orientation;
this.ariaHeaderLabel = localize('viewSection', "{0} Section", options.title);
this._minimumBodySize = typeof options.minimumBodySize === 'number' ? options.minimumBodySize : 120;
this._maximumBodySize = typeof options.maximumBodySize === 'number' ? options.maximumBodySize : Number.POSITIVE_INFINITY;
this.element = $('.pane');
}
isExpanded(): boolean {
return this._expanded;
}
setExpanded(expanded: boolean): boolean {
if (this._expanded === !!expanded) {
return false;
}
this._expanded = !!expanded;
this.updateHeader();
if (expanded) {
if (typeof this.animationTimer === 'number') {
clearTimeout(this.animationTimer);
}
append(this.element, this.body);
} else {
this.animationTimer = window.setTimeout(() => {
this.body.remove();
}, 200);
}
this._onDidChangeExpansionState.fire(expanded);
this._onDidChange.fire(expanded ? this.expandedSize : undefined);
return true;
}
get headerVisible(): boolean {
return this._headerVisible;
}
set headerVisible(visible: boolean) {
if (this._headerVisible === !!visible) {
return;
}
this._headerVisible = !!visible;
this.updateHeader();
this._onDidChange.fire(undefined);
}
get orientation(): Orientation {
return this._orientation;
}
set orientation(orientation: Orientation) {
if (this._orientation === orientation) {
return;
}
this._orientation = orientation;
}
render(): void {
this.header = $('.pane-header');
append(this.element, this.header);
this.header.setAttribute('tabindex', '0');
this.header.setAttribute('role', 'toolbar');
this.header.setAttribute('aria-label', this.ariaHeaderLabel);
this.renderHeader(this.header);
const focusTracker = trackFocus(this.header);
this._register(focusTracker);
this._register(focusTracker.onDidFocus(() => addClass(this.header, 'focused'), null));
this._register(focusTracker.onDidBlur(() => removeClass(this.header, 'focused'), null));
this.updateHeader();
const onHeaderKeyDown = Event.chain(domEvent(this.header, 'keydown'))
.map(e => new StandardKeyboardEvent(e));
this._register(onHeaderKeyDown.filter(e => e.keyCode === KeyCode.Enter || e.keyCode === KeyCode.Space)
.event(() => this.setExpanded(!this.isExpanded()), null));
this._register(onHeaderKeyDown.filter(e => e.keyCode === KeyCode.LeftArrow)
.event(() => this.setExpanded(false), null));
this._register(onHeaderKeyDown.filter(e => e.keyCode === KeyCode.RightArrow)
.event(() => this.setExpanded(true), null));
this._register(domEvent(this.header, 'click')
(() => this.setExpanded(!this.isExpanded()), null));
this.body = append(this.element, $('.pane-body'));
this.renderBody(this.body);
if (!this.isExpanded()) {
this.body.remove();
}
}
layout(size: number): void {
const headerSize = this.headerVisible ? Pane.HEADER_SIZE : 0;
const width = this._orientation === Orientation.VERTICAL ? this.orthogonalSize : size;
const height = this._orientation === Orientation.VERTICAL ? size - headerSize : this.orthogonalSize - headerSize;
if (this.isExpanded()) {
this.layoutBody(height, width);
this.expandedSize = size;
}
}
style(styles: IPaneStyles): void {
this.styles = styles;
if (!this.header) {
return;
}
this.updateHeader();
}
protected updateHeader(): void {
const expanded = !this.headerVisible || this.isExpanded();
this.header.style.height = `${this.headerSize}px`;
this.header.style.lineHeight = `${this.headerSize}px`;
toggleClass(this.header, 'hidden', !this.headerVisible);
toggleClass(this.header, 'expanded', expanded);
this.header.setAttribute('aria-expanded', String(expanded));
this.header.style.color = this.styles.headerForeground ? this.styles.headerForeground.toString() : '';
this.header.style.backgroundColor = this.styles.headerBackground ? this.styles.headerBackground.toString() : '';
this.header.style.borderTop = this.styles.headerBorder ? `1px solid ${this.styles.headerBorder}` : '';
this._dropBackground = this.styles.dropBackground;
}
protected abstract renderHeader(container: HTMLElement): void;
protected abstract renderBody(container: HTMLElement): void;
protected abstract layoutBody(height: number, width: number): void;
}
interface IDndContext {
draggable: PaneDraggable | null;
}
class | extends Disposable {
private static readonly DefaultDragOverBackgroundColor = new Color(new RGBA(128, 128, 128, 0.5));
private dragOverCounter = 0; // see https://github.com/Microsoft/vscode/issues/14470
private _onDidDrop = this._register(new Emitter<{ from: Pane, to: Pane }>());
readonly onDidDrop = this._onDidDrop.event;
constructor(private pane: Pane, private dnd: IPaneDndController, private context: IDndContext) {
super();
pane.draggableElement.draggable = true;
this._register(domEvent(pane.draggableElement, 'dragstart')(this.onDragStart, this));
this._register(domEvent(pane.dropTargetElement, 'dragenter')(this.onDragEnter, this));
this._register(domEvent(pane.dropTargetElement, 'dragleave')(this.onDragLeave, this));
this._register(domEvent(pane.dropTargetElement, 'dragend')(this.onDragEnd, this));
this._register(domEvent(pane.dropTargetElement, 'drop')(this.onDrop, this));
}
private onDragStart(e: DragEvent): void {
if (!this.dnd.canDrag(this.pane) || !e.dataTransfer) {
e.preventDefault();
e.stopPropagation();
return;
}
e.dataTransfer.effectAllowed = 'move';
if (isFirefox) {
// Firefox: requires to set a text data transfer to get going
e.dataTransfer?.setData(DataTransfers.TEXT, this.pane.draggableElement.textContent || '');
}
const dragImage = append(document.body, $('.monaco-drag-image', {}, this.pane.draggableElement.textContent || ''));
e.dataTransfer.setDragImage(dragImage, -10, -10);
setTimeout(() => document.body.removeChild(dragImage), 0);
this.context.draggable = this;
}
private onDragEnter(e: DragEvent): void {
if (!this.context.draggable || this.context.draggable === this) {
return;
}
if (!this.dnd.canDrop(this.context.draggable.pane, this.pane)) {
return;
}
this.dragOverCounter++;
this.render();
}
private onDragLeave(e: DragEvent): void {
if (!this.context.draggable || this.context.draggable === this) {
return;
}
if (!this.dnd.canDrop(this.context.draggable.pane, this.pane)) {
return;
}
this.dragOverCounter--;
if (this.dragOverCounter === 0) {
this.render();
}
}
private onDragEnd(e: DragEvent): void {
if (!this.context.draggable) {
return;
}
this.dragOverCounter = 0;
this.render();
this.context.draggable = null;
}
private onDrop(e: DragEvent): void {
if (!this.context.draggable) {
return;
}
EventHelper.stop(e);
this.dragOverCounter = 0;
this.render();
if (this.dnd.canDrop(this.context.draggable.pane, this.pane) && this.context.draggable !== this) {
this._onDidDrop.fire({ from: this.context.draggable.pane, to: this.pane });
}
this.context.draggable = null;
}
private render(): void {
let backgroundColor: string | null = null;
if (this.dragOverCounter > 0) {
backgroundColor = (this.pane.dropBackground || PaneDraggable.DefaultDragOverBackgroundColor).toString();
}
this.pane.dropTargetElement.style.backgroundColor = backgroundColor || '';
}
}
export interface IPaneDndController {
canDrag(pane: Pane): boolean;
canDrop(pane: Pane, overPane: Pane): boolean;
}
export class DefaultPaneDndController implements IPaneDndController {
canDrag(pane: Pane): boolean {
return true;
}
canDrop(pane: Pane, overPane: Pane): boolean {
return true;
}
}
export interface IPaneViewOptions {
dnd?: IPaneDndController;
orientation?: Orientation;
}
interface IPaneItem {
pane: Pane;
disposable: IDisposable;
}
export class PaneView extends Disposable {
private dnd: IPaneDndController | undefined;
private dndContext: IDndContext = { draggable: null };
private el: HTMLElement;
private paneItems: IPaneItem[] = [];
private orthogonalSize: number = 0;
private size: number = 0;
private splitview: SplitView;
private animationTimer: number | undefined = undefined;
private _onDidDrop = this._register(new Emitter<{ from: Pane, to: Pane }>());
readonly onDidDrop: Event<{ from: Pane, to: Pane }> = this._onDidDrop.event;
orientation: Orientation;
readonly onDidSashChange: Event<number>;
constructor(container: HTMLElement, options: IPaneViewOptions = {}) {
super();
this.dnd = options.dnd;
this.orientation = options.orientation ?? Orientation.VERTICAL;
this.el = append(container, $('.monaco-pane-view'));
this.splitview = this._register(new SplitView(this.el, { orientation: this.orientation }));
this.onDidSashChange = this.splitview.onDidSashChange;
}
addPane(pane: Pane, size: number, index = this.splitview.length): void {
const disposables = new DisposableStore();
pane.onDidChangeExpansionState(this.setupAnimation, this, disposables);
const paneItem = { pane: pane, disposable: disposables };
this.paneItems.splice(index, 0, paneItem);
pane.orientation = this.orientation;
pane.orthogonalSize = this.orthogonalSize;
this.splitview.addView(pane, size, index);
if (this.dnd) {
const draggable = new PaneDraggable(pane, this.dnd, this.dndContext);
disposables.add(draggable);
disposables.add(draggable.onDidDrop(this._onDidDrop.fire, this._onDidDrop));
}
}
removePane(pane: Pane): void {
const index = firstIndex(this.paneItems, item => item.pane === pane);
if (index === -1) {
return;
}
this.splitview.removeView(index);
const paneItem = this.paneItems.splice(index, 1)[0];
paneItem.disposable.dispose();
}
movePane(from: Pane, to: Pane): void {
const fromIndex = firstIndex(this.paneItems, item => item.pane === from);
const toIndex = firstIndex(this.paneItems, item => item.pane === to);
if (fromIndex === -1 || toIndex === -1) {
return;
}
const [paneItem] = this.paneItems.splice(fromIndex, 1);
this.paneItems.splice(toIndex, 0, paneItem);
this.splitview.moveView(fromIndex, toIndex);
}
resizePane(pane: Pane, size: number): void {
const index = firstIndex(this.paneItems, item => item.pane === pane);
if (index === -1) {
return;
}
this.splitview.resizeView(index, size);
}
getPaneSize(pane: Pane): number {
const index = firstIndex(this.paneItems, item => item.pane === pane);
if (index === -1) {
return -1;
}
return this.splitview.getViewSize(index);
}
layout(height: number, width: number): void {
this.orthogonalSize = this.orientation === Orientation.VERTICAL ? width : height;
this.size = this.orientation === Orientation.HORIZONTAL ? width : height;
for (const paneItem of this.paneItems) {
paneItem.pane.orthogonalSize = this.orthogonalSize;
}
this.splitview.layout(this.size);
}
flipOrientation(height: number, width: number): void {
this.orientation = this.orientation === Orientation.VERTICAL ? Orientation.HORIZONTAL : Orientation.VERTICAL;
const paneSizes = this.paneItems.map(pane => this.getPaneSize(pane.pane));
this.splitview.dispose();
clearNode(this.el);
this.splitview = this._register(new SplitView(this.el, { orientation: this.orientation }));
const newOrthogonalSize = this.orientation === Orientation.VERTICAL ? width : height;
const newSize = this.orientation === Orientation.HORIZONTAL ? width : height;
this.paneItems.forEach((pane, index) => {
pane.pane.orthogonalSize = newOrthogonalSize;
pane.pane.orientation = this.orientation;
const viewSize = this.size === 0 ? 0 : (newSize * paneSizes[index]) / this.size;
this.splitview.addView(pane.pane, viewSize, index);
});
this.size = newSize;
this.orthogonalSize = newOrthogonalSize;
this.splitview.layout(this.size);
}
private setupAnimation(): void {
if (typeof this.animationTimer === 'number') {
window.clearTimeout(this.animationTimer);
}
addClass(this.el, 'animated');
this.animationTimer = window.setTimeout(() => {
this.animationTimer = undefined;
removeClass(this.el, 'animated');
}, 200);
}
dispose(): void {
super.dispose();
this.paneItems.forEach(i => i.disposable.dispose());
}
}
| PaneDraggable |
fetchItemDetails.js | import { fetchJson, DENSHO_REST_BASE_URL } from '../shared';
export const FETCH_ITEM_DETAILS = 'fetch_item_details/fetch';
export const FETCH_ITEM_DETAILS_SUCCESS = 'fetch_item_details/fetch_success';
export const FETCH_ITEM_DETAILS_FAIL = 'fetch_item_details/fetch_fail';
const initialState = {
fetchObject: null,
id: '',
name: '',
description: '',
links: '',
isLoading: false,
format: '',
creation: '',
persons: [],
credit: '',
collectionId: '',
errMsg: ''
};
// reducer
export default (state = initialState, action) => {
const { payload } = action;
switch (action.type) {
case FETCH_ITEM_DETAILS:
return {
...state,
isLoading: true,
errMsg: ''
};
case FETCH_ITEM_DETAILS_SUCCESS:
return {
...state,
fetchObject: payload,
id: payload.id,
name: payload.title,
description: payload.description,
links: payload.links,
format: payload.format,
creation: payload.creation,
persons: payload.persons,
credit: payload.credit,
collectionId: payload.collection_id,
isLoading: false
};
case FETCH_ITEM_DETAILS_FAIL:
return {
...state,
isLoading: false,
errMsg: action.payload
};
default:
return state;
}
};
// action
export const fetchItemDetails = id => dispatch => {
dispatch({
type: FETCH_ITEM_DETAILS
});
fetchJson(`${DENSHO_REST_BASE_URL}${id}/`)
.then(json => {
dispatch({
type: FETCH_ITEM_DETAILS_SUCCESS,
payload: json
});
})
.catch(error => {
dispatch({
type: FETCH_ITEM_DETAILS_FAIL,
payload: error | });
});
}; | |
drag_helpers_test.tsx | import * as React from "react";
import { DragHelpers } from "../drag_helpers";
import { shallow } from "enzyme";
import { DragHelpersProps } from "../interfaces";
import { fakePlant } from "../../../__test_support__/fake_state/resources";
import { Color } from "../../../ui/index";
describe("<DragHelpers/>", () => {
function | (): DragHelpersProps {
return {
mapTransformProps: {
quadrant: 2, gridSize: { x: 3000, y: 1500 }
},
plant: fakePlant(),
dragging: false,
zoomLvl: 1.8,
activeDragXY: { x: undefined, y: undefined, z: undefined },
plantAreaOffset: { x: 100, y: 100 }
};
}
it("doesn't render drag helpers", () => {
const wrapper = shallow(<DragHelpers {...fakeProps() } />);
expect(wrapper.find("text").length).toEqual(0);
expect(wrapper.find("rect").length).toBeLessThanOrEqual(1);
expect(wrapper.find("use").length).toEqual(0);
});
it("renders drag helpers", () => {
const p = fakeProps();
p.dragging = true;
const wrapper = shallow(<DragHelpers {...p } />);
expect(wrapper.find("#coordinates-tooltip").length).toEqual(1);
expect(wrapper.find("#long-crosshair").length).toEqual(1);
expect(wrapper.find("#short-crosshair").length).toEqual(1);
expect(wrapper.find("#alignment-indicator").find("use").length).toBe(0);
expect(wrapper.find("#drag-helpers").props().fill).toEqual(Color.darkGray);
});
it("renders coordinates tooltip while dragging", () => {
const p = fakeProps();
p.dragging = true;
p.plant.body.x = 104;
p.plant.body.y = 199;
const wrapper = shallow(<DragHelpers {...p } />);
expect(wrapper.find("text").length).toEqual(1);
expect(wrapper.find("text").text()).toEqual("100, 200");
expect(wrapper.find("text").props().fontSize).toEqual("1.25rem");
expect(wrapper.find("text").props().dy).toEqual(-20);
});
it("renders coordinates tooltip while dragging: scaled", () => {
const p = fakeProps();
p.dragging = true;
p.zoomLvl = 0.9;
const wrapper = shallow(<DragHelpers {...p } />);
expect(wrapper.find("text").length).toEqual(1);
expect(wrapper.find("text").text()).toEqual("100, 200");
expect(wrapper.find("text").props().fontSize).toEqual("3rem");
expect(wrapper.find("text").props().dy).toEqual(-48);
});
it("renders crosshair while dragging", () => {
const p = fakeProps();
p.dragging = true;
p.plant.body.id = 5;
const wrapper = shallow(<DragHelpers {...p } />);
const crosshair = wrapper.find("#short-crosshair");
expect(crosshair.length).toEqual(1);
const segment = crosshair.find("#crosshair-segment-5");
expect(segment.length).toEqual(1);
expect(segment.find("rect").props())
.toEqual({ "height": 2, "width": 8, "x": 90, "y": 199 });
const segments = crosshair.find("use");
expect(segments.at(0).props().xlinkHref).toEqual("#crosshair-segment-5");
expect(segments.at(0).props().transform).toEqual("rotate(0, 100, 200)");
expect(segments.at(1).props().transform).toEqual("rotate(90, 100, 200)");
expect(segments.at(2).props().transform).toEqual("rotate(180, 100, 200)");
expect(segments.at(3).props().transform).toEqual("rotate(270, 100, 200)");
});
it("renders crosshair while dragging: scaled", () => {
const p = fakeProps();
p.dragging = true;
p.zoomLvl = 0.9;
const wrapper = shallow(<DragHelpers {...p } />);
const crosshair = wrapper.find("#short-crosshair");
expect(crosshair.length).toEqual(1);
expect(crosshair.find("rect").first().props())
.toEqual({ "height": 4.8, "width": 19.2, "x": 76, "y": 197.6 });
expect(crosshair.find("use").length).toEqual(4);
});
it("renders vertical alignment indicators", () => {
const p = fakeProps();
p.dragging = false;
p.plant.body.id = 5;
p.plant.body.x = 100;
p.plant.body.y = 100;
p.activeDragXY = { x: 100, y: 0, z: 0 };
const wrapper = shallow(<DragHelpers {...p } />);
const indicators = wrapper.find("#alignment-indicator");
expect(indicators.length).toEqual(1);
const segment = indicators.find("#alignment-indicator-segment-5");
expect(segment.length).toEqual(1);
expect(segment.find("rect").props())
.toEqual({ "height": 2, "width": 8, "x": 65, "y": 99 });
const segments = indicators.find("use");
expect(segments.length).toEqual(2);
expect(segments.at(0).props().xlinkHref).toEqual("#alignment-indicator-segment-5");
expect(segments.at(0).props().transform).toEqual("rotate(90, 100, 100)");
expect(segments.at(1).props().transform).toEqual("rotate(270, 100, 100)");
expect(indicators.props().fill).toEqual(Color.red);
});
it("renders horizontal alignment indicators", () => {
const p = fakeProps();
p.dragging = false;
p.plant.body.x = 100;
p.plant.body.y = 100;
p.activeDragXY = { x: 0, y: 100, z: 0 };
const wrapper = shallow(<DragHelpers {...p } />);
const indicator = wrapper.find("#alignment-indicator");
const segments = indicator.find("use");
expect(segments.length).toEqual(2);
expect(segments.at(0).props().transform).toEqual("rotate(0, 100, 100)");
expect(segments.at(1).props().transform).toEqual("rotate(180, 100, 100)");
expect(indicator.props().fill).toEqual(Color.red);
});
it("renders horizontal and vertical alignment indicators in quadrant 4", () => {
const p = fakeProps();
p.mapTransformProps.quadrant = 4;
p.dragging = false;
p.plant.body.id = 6;
p.plant.body.x = 100;
p.plant.body.y = 100;
p.activeDragXY = { x: 100, y: 100, z: 0 };
const wrapper = shallow(<DragHelpers {...p } />);
const indicator = wrapper.find("#alignment-indicator");
const masterSegment = indicator.find("#alignment-indicator-segment-6");
const segmentProps = masterSegment.find("rect").props();
expect(segmentProps.x).toEqual(2865);
expect(segmentProps.y).toEqual(1399);
const segments = indicator.find("use");
expect(segments.length).toEqual(4);
expect(segments.at(0).props().transform).toEqual("rotate(0, 2900, 1400)");
expect(segments.at(1).props().transform).toEqual("rotate(180, 2900, 1400)");
expect(segments.at(2).props().transform).toEqual("rotate(90, 2900, 1400)");
expect(segments.at(3).props().transform).toEqual("rotate(270, 2900, 1400)");
expect(indicator.props().fill).toEqual(Color.red);
});
});
| fakeProps |
test_inject.py | import unittest
import os
from SecretManagerEnvInjector import inject
class InjectTest(unittest.TestCase):
| @inject('arn:aws:secretsmanager:us-east-1:xxxxxxxxxxxxxx:secret:bogus-dj3g0R')
def test_inject(self):
self.assertEquals(os.getenv('bogus-dj3g0R'), 'test2') |
|
client.go | package k8s
import (
"fmt"
"net"
"os"
"strings"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
aadpodid "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1"
crd "github.com/Azure/aad-pod-identity/pkg/crd"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
getPodListRetries = 4
getPodListSleepTimeMilliseconds = 300
)
var (
// We only want to allow pod-identity with Pending or Running phase status
ignorePodPhaseStatuses = []string{"Succeeded", "Failed", "Unknown", "Completed", "CrashLoopBackOff"}
phaseStatusFilter = getPodPhaseFilter()
)
func getPodPhaseFilter() string {
return ",status.phase!=" + strings.Join(ignorePodPhaseStatuses, ",status.phase!=")
}
// Client api client
type Client interface {
// GetPodName return the matching azure identity or nil
GetPodName(podip string) (podns, podname string, err error)
// ListPodIds pod matching azure identity or nil
ListPodIds(podns, podname string) (map[string][]aadpodid.AzureIdentity, error)
// GetSecret returns secret the secretRef represents
GetSecret(secretRef *v1.SecretReference) (*v1.Secret, error)
}
// KubeClient k8s client
type KubeClient struct {
// Main Kubernetes client
ClientSet kubernetes.Interface
// Crd client used to access our CRD resources.
CrdClient *crd.Client
//PodListWatch is used to list the pods from cache
PodListWatch *cache.ListWatch
}
// NewKubeClient new kubernetes api client
func NewKubeClient() (Client, error) {
config, err := buildConfig()
if err != nil {
return nil, err
}
clientset, err := getkubeclient(config)
if err != nil {
return nil, err
}
crdclient, err := crd.NewCRDClientLite(config)
if err != nil {
return nil, err
}
optionsModifier := func(options *metav1.ListOptions) {}
podListWatch := cache.NewFilteredListWatchFromClient(
clientset.CoreV1().RESTClient(),
"pods",
v1.NamespaceAll,
optionsModifier,
)
kubeClient := &KubeClient{CrdClient: crdclient, ClientSet: clientset, PodListWatch: podListWatch}
return kubeClient, nil
}
// GetPodName get pod ns,name from apiserver
func (c *KubeClient) GetPodName(podip string) (podns, poddname string, err error) {
if podip == "" {
return "", "", fmt.Errorf("podip is empty")
}
podList, err := c.getPodListRetry(podip, getPodListRetries, getPodListSleepTimeMilliseconds)
if err != nil {
return "", "", err
}
numMatching := len(podList.Items)
if numMatching == 1 {
return podList.Items[0].Namespace, podList.Items[0].Name, nil
}
return "", "", fmt.Errorf("match failed, ip:%s matching pods:%v", podip, podList)
}
func (c *KubeClient) getPodList(podip string) (*v1.PodList, error) {
listObject, err := c.PodListWatch.List(metav1.ListOptions{
FieldSelector: "status.podIP==" + podip + phaseStatusFilter,
})
if err != nil {
return nil, err
}
// Confirm that we are able to cast properly.
podList, ok := listObject.(*v1.PodList)
if !ok {
return nil, fmt.Errorf("list object could not be converted to podlist")
}
if podList == nil {
return nil, fmt.Errorf("pod list nil")
}
if len(podList.Items) == 0 {
return nil, fmt.Errorf("pod list empty")
}
return podList, nil
}
func (c *KubeClient) getPodListRetry(podip string, retries int, sleeptime time.Duration) (*v1.PodList, error) {
var podList *v1.PodList
var err error
i := 0
for {
// Atleast run the getpodlist once.
podList, err = c.getPodList(podip)
if err == nil {
return podList, nil
}
if i >= retries {
break
}
i++
log.Warningf("List pod error: %+v. Retrying, attempt number: %d", err, i)
time.Sleep(sleeptime * time.Millisecond)
}
// We reach here only if there is an error and we have exhausted all retries.
// Return the last error
return nil, err
}
// GetLocalIP returns the non loopback local IP of the host
func | () (string, error) {
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", err
}
for _, address := range addrs {
// check the address type and if it is not a loopback
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String(), nil
}
}
}
return "", fmt.Errorf("non loopback ip address not found")
}
// ListPodIds lists matching ids for pod or error
func (c *KubeClient) ListPodIds(podns, podname string) (map[string][]aadpodid.AzureIdentity, error) {
return c.CrdClient.ListPodIds(podns, podname)
}
// GetSecret returns secret the secretRef represents
func (c *KubeClient) GetSecret(secretRef *v1.SecretReference) (*v1.Secret, error) {
secret, err := c.ClientSet.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return secret, nil
}
func getkubeclient(config *rest.Config) (*kubernetes.Clientset, error) {
// creates the clientset
kubeClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return kubeClient, err
}
// Create the client config. Use kubeconfig if given, otherwise assume in-cluster.
func buildConfig() (*rest.Config, error) {
kubeconfigPath := os.Getenv("KUBECONFIG")
if kubeconfigPath != "" {
return clientcmd.BuildConfigFromFlags("", kubeconfigPath)
}
return rest.InClusterConfig()
}
| GetLocalIP |
driver.py | """Entry point for WebDriver."""
import alert
import command
import searchcontext
import webelement
import base64
class WebDriver(searchcontext.SearchContext):
"""Controls a web browser."""
def __init__(self, host, required, desired, mode='strict'):
args = {'desiredCapabilities': desired}
if required:
args['requiredCapabilities'] = required
self._executor = command.CommandExecutor(host, mode)
resp = self._executor.execute(
'POST', '/session', None, 'newSession', args)
self.capabilities = resp['value']
self._session_id = resp['sessionId']
self.mode = mode
def execute(self, method, path, name, parameters=None):
|
def get(self, url):
"""Navigate to url."""
self.execute('POST', '/url', 'get', {'url': url})
def get_current_url(self):
"""Get the current value of the location bar."""
return self.execute('GET', '/url', 'getCurrentUrl')
def go_back(self):
"""Hit the browser back button."""
self.execute('POST', '/back', 'goBack')
def go_forward(self):
"""Hit the browser forward button."""
self.execute('POST', '/forward', 'goForward')
def refresh(self):
"""Refresh the current page in the browser."""
self.execute('POST', '/refresh', 'refresh')
def quit(self):
"""Shutdown the current WebDriver session."""
self.execute('DELETE', '', 'quit')
def get_window_handle(self):
"""Get the handle for the browser window/tab currently accepting
commands.
"""
return self.execute('GET', '/window_handle', 'getWindowHandle')
def get_window_handles(self):
"""Get handles for all open windows/tabs."""
return self.execute('GET', '/window_handles', 'getWindowHandles')
def close(self):
"""Close the current tab or window.
If this is the last tab or window, then this is the same as
calling quit.
"""
self.execute('DELETE', '/window', 'close')
def maximize_window(self):
"""Maximize the current window."""
return self._window_command('POST', '/maximize', 'maximize')
def get_window_size(self):
"""Get the dimensions of the current window."""
result = self._window_command('GET', '/size', 'getWindowSize')
return {'height': result[height], 'width': result[width]}
def set_window_size(self, height, width):
"""Set the size of the current window."""
self._window_command(
'POST',
'/size',
'setWindowSize',
{'height': height, 'width': width})
def fullscreen_window(self):
"""Make the current window fullscreen."""
pass # implement when end point is defined
def switch_to_window(self, name):
"""Switch to the window with the given handle or name."""
self.execute('POST', '/window', 'switchToWindow', {'name': name})
def switch_to_frame(self, id):
"""Switch to a frame.
id can be either a WebElement or an integer.
"""
self.execute('POST', '/frame', 'switchToFrame', {'id': id})
def switch_to_parent_frame(self):
"""Move to the browsing context containing the currently selected frame.
If in the top-level browsing context, this is a no-op.
"""
self.execute('POST', '/frame/parent', 'switchToParentFrame')
def switch_to_alert(self):
"""Return an Alert object to interact with a modal dialog."""
alert_ = alert.Alert(self)
alert_.get_text()
return alert_
def execute_script(self, script, args=[]):
"""Execute a Javascript script in the current browsing context."""
return self.execute(
'POST',
'/execute',
'executeScript',
{'script': script, 'args': args})
def execute_script_async(self, script, args=[]):
"""Execute a Javascript script in the current browsing context."""
return self.execute(
'POST',
'/execute_async',
'executeScriptAsync',
{'script': script, 'args': args})
def take_screenshot(self, element=None):
"""Take a screenshot.
If element is not provided, the screenshot should be of the
current page, otherwise the screenshot should be of the given element.
"""
if self.mode == 'strict':
pass # implement when endpoint is defined
elif self.mode == 'compatibility':
if element:
pass # element screenshots are unsupported in compatibility
else:
return base64.standard_b64decode(
self.execute('GET', '/screenshot', 'takeScreenshot'))
def add_cookie(self, cookie):
"""Add a cookie to the browser."""
self.execute('POST', '/cookie', 'addCookie', {'cookie': cookie})
def get_cookie(self, name=None):
"""Get the cookies accessible from the current page."""
if self.mode == 'compatibility':
cookies = self.execute('GET', '/cookie', 'getCookie')
if name:
cookies_ = []
for cookie in cookies:
if cookie['name'] == name:
cookies_.append(cookie)
return cookies_
return cookies
elif self.mode == 'strict':
pass # implement when wire protocol for this has been defined
def set_implicit_timeout(self, ms):
self._set_timeout('implicit', ms)
def set_page_load_timeout(self, ms):
self._set_timeout('page load', ms)
def set_script_timeout(self, ms):
self._set_timeout('script', ms)
def _set_timeout(self, type, ms):
params = {'type': type, 'ms': ms}
self.execute('POST', '/timeouts', 'timeouts', params)
def _window_command(self, method, path, name, parameters=None):
if self.mode == 'compatibility':
return self.execute(
method, '/window/current' + path, name, parameters)
elif self.mode == 'strict':
pass # implement this when end-points are defined in doc
def _object_hook(self, obj):
if 'ELEMENT' in obj:
return webelement.WebElement(self, obj['ELEMENT'])
return obj
| """Execute a command against the current WebDriver session."""
data = self._executor.execute(
method,
'/session/' + self._session_id + path,
self._session_id,
name,
parameters,
self._object_hook)
if data:
return data['value'] |
user_forms.py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
# from crispy_forms.helper import FormHelper
# from crispy_forms.layout import Submit
import sys
import os
from .models import User_Profile
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from orders.models import Location, Location_Info
from orders.locations import get_location_choices
class UserCreationForm(BaseUserCreationForm):
name = forms.CharField(label="Full Name")
location_id = forms.ChoiceField(
label="Hospital Location:",
help_text="Select an option from the menu above.",
choices=get_location_choices())
email = forms.EmailField(
required=False,
label="Email",
help_text = "(not required)" )
phone = forms.CharField(
required=False,
max_length=12,
label="Mobile Number",
help_text="(not required)")
class Meta:
model = User
fields = [
"name",
"username",
"password1",
"password2",
"location_id",
"email",
"phone"
]
def save(self, commit=True, *args, **kwargs):
user = super(UserCreationForm, self).save(commit=False, *args, **kwargs)
name = self.cleaned_data["name"]
if len(name.split()) >= 2:
user.first_name, user.last_name = (name.split()[0].title(), name.split()[-1].title())
elif len(name.split()) == 1:
user.first_name = name.title()
user.last_name = ""
user.set_password(self.cleaned_data["password1"])
user.email = self.cleaned_data["email"]
if commit:
user.save()
user.profile.phone = self.cleaned_data["phone"]
location_id = int(self.cleaned_data["location_id"])
loc = Location(
username = user.username,
location_id = location_id,
info = Location_Info.objects.filter(pk=location_id).first()
)
loc.save()
user.profile.location = loc
user.profile.save()
user.save()
return user
class UserUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
email = kwargs.get("instance").email
super(UserUpdateForm, self).__init__(*args, **kwargs)
self.initial["email"] = email
# self.helper = FormHelper(self)
# self.helper.add_input(Submit("submit", "Submit", css_class="btn btn-outline-info"))
# self.helper.form_method = "POST"
email = forms.EmailField(
required=False,
label="Email",
help_text = "(not required)")
class Meta:
model = User
fields = ["username", "email"]
class ProfileUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
|
location_id = forms.ChoiceField(
label="Hospital Location:",
help_text="Select an option from the menu above.",
choices=get_location_choices())
phone = forms.CharField(
required=False,
max_length=12,
label="Mobile Number",
help_text="(not required)")
class Meta:
model = User_Profile
fields = ["image", "location_id", "phone"]
def save(self, commit=True, *args, **kwargs):
profile = super(ProfileUpdateForm, self).save(commit=False, *args, **kwargs)
if commit:
profile.save()
profile.phone = self.cleaned_data["phone"]
new_location_id = int(self.cleaned_data["location_id"])
profile.location.delete()
new_location = Location(
username = self.instance.user.username,
location_id = new_location_id,
info = Location_Info.objects.filter(pk=new_location_id).first()
).save()
profile.location = new_location
profile.save()
return profile
| location_id = kwargs.get("instance").location.location_id
phone = kwargs.get("instance").phone
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
self.initial["location_id"] = location_id
self.initial["phone"] = phone
# self.helper = FormHelper(self)
# self.helper.add_input(Submit("submit", "Submit", css_class="btn btn-outline-info"))
# self.helper.form_method = "POST" |
pkg.develspace.context.pc.py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "robot_localization;roscpp;tf;tf2;tf2_ros;message_filters;std_msgs;std_srvs;geometry_msgs;nav_msgs;sensor_msgs;robotnik_msgs;mavros_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "summit_xl_localization"
PROJECT_SPACE_DIR = "/workspace/devel" | PROJECT_VERSION = "1.1.3" |
|
repository_memory.go | /*
* Copyright © 2017-2018 Aeneas Rekkas <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Aeneas Rekkas <[email protected]>
* @copyright 2017-2018 Aeneas Rekkas <[email protected]>
* @license Apache-2.0
*/
package rule
import (
"context"
"net/url"
"sync"
"github.com/pkg/errors"
"github.com/ory/x/viperx"
"github.com/ory/oathkeeper/helper"
"github.com/ory/oathkeeper/x"
"github.com/ory/x/pagination"
)
var _ Repository = new(RepositoryMemory)
type repositoryMemoryRegistry interface {
RuleValidator() Validator
x.RegistryLogger
}
type RepositoryMemory struct {
sync.RWMutex
rules []Rule
r repositoryMemoryRegistry
}
func NewRepositoryMemory(r repositoryMemoryRegistry) *RepositoryMemory {
return &RepositoryMemory{
r: r,
rules: make([]Rule, 0),
}
}
// WithRules sets rules without validation. For testing only.
func (m *RepositoryMemory) WithRules(rules []Rule) {
m.Lock()
m.rules = rules
m.Unlock()
}
func (m *RepositoryMemory) Count(ctx context.Context) (int, error) {
m.RLock()
defer m.RUnlock()
return len(m.rules), nil
}
func (m *RepositoryMemory) List(ctx context.Context, limit, offset int) ([]Rule, error) {
m.RLock()
defer m.RUnlock()
start, end := pagination.Index(limit, offset, len(m.rules))
return m.rules[start:end], nil
}
func (m *RepositoryMemory) Get(ctx context.Context, id string) (*Rule, error) {
m.RLock()
defer m.RUnlock()
for _, r := range m.rules {
if r.ID == id {
return &r, nil
}
}
return nil, errors.WithStack(helper.ErrResourceNotFound)
}
func (m *RepositoryMemory) Set(ctx context.Context, rules []Rule) error {
for _, check := range rules {
if err := m.r.RuleValidator().Validate(&check); err != nil {
viperx.LoggerWithValidationErrorFields(m.r.Logger(), err).WithError(err).
Errorf("A rule uses a malformed configuration and all URLs matching this rule will not work. You should resolve this issue now.")
}
}
m.Lock()
m.rules = rules
m.Unlock()
return nil
}
func (m *RepositoryMemory) Match(ctx context.Context, method string, u *url.URL) (*Rule, error) {
m.RLock()
defer m.RUnlock()
var rules []Rule
for _, rule := range m.rules {
if err := rule.IsMatching(method, u); err == nil { | }
if len(rules) == 0 {
return nil, errors.WithStack(helper.ErrMatchesNoRule)
}
return &rules[0], nil
}
|
rules = append(rules, rule)
}
|
consoleClass_20190522140909.js | import * as TYPE from "./checkType"
// 全局log变量
let isShowGlobalLog = true
// 设置模块开关(建议默认模块code>=1000)
let moduleArr = [
{
name: `login`,
code: 1000,
isShowLog: true
}
]
// 设置样式(暂时设置文字颜色)
let styleArr = {
red: `color:#f5576c`,
blue: `color:#005bea`,
green: `color:#00e3ae`,
gray: `color:#485563`,
pink: `color:#fe5196`
}
// 设置console的类
class Console {
constructor(...dataArr) {
// 定义默认数据
th | = []
// 设置定时器变量
this.timer = null
// 当前时间戳
this.onlyLog = true
// 设置时间戳数组开关
this.timeStampArr = []
}
// 最终选择是否打印数据
isShowLog(...data) {
if (isShowGlobalLog) {
// 如果开启全局开关,把参数给分个打印出来
if (!this.onlyLog) {
console.log.apply(null, this.data)
}
} else {
// 反之都不打印
return fasle
}
}
// 接收数据
log(...dataArr) {
// 获取传递来的参数
this.data = [...dataArr]
this.timer = setTimeout(() => {
if (this.onlyLog) {
console.log(`走了console >>>>>>>`, ...dataArr)
}
}, 30)
return this
}
format(type = `string`) {
if (type === `string`) {
let dataArr = []
this.data.forEach((item, index) => {
if (TYPE.isArray(item) || TYPE.isObj(item)) {
// 如果数据为数组和json,那么转化为字符串
dataArr.push(JSON.stringify(item))
} else {
// 其他的保持不变
dataArr.push(item)
}
return item
})
this.data = [...dataArr]
}
}
// 将数据字符序列化(主要是把数组和json序列化,其他的暂时不变)
toString(...data) {
this.onlyLog = false
this.format()
this.isShowLog()
}
// 设置数据显示指定的颜色(数据都得先全部转为字符串才可以)
toColor(color) {
this.onlyLog = false
this.format()
console.group("style >>>>>>>>>>")
this.data.forEach((item, index) => {
this.style(item, color)
})
console.groupEnd()
}
// 根据数据类型设置打印形式
style(item, color) {
if (typeof item === `string`) {
console.log(`%c${item}`, styleArr[color])
} else {
console.log(`%c${JSON.stringify(item)}`, styleArr[color])
}
}
}
// 创建一个示例
let dsxConsole = new Console()
// 设置log方法
export let Log = (...dataArr) => {
dsxConsole.log(...dataArr)
// 链式调用
return dsxConsole
}
| is.data |
gen_jdisk.py | #!/usr/bin/python
import noise
import numpy as np
from PIL import Image
size = (2000, 2000)
ratio = 16
bb_col = Image.open("../resources/bb-scale.jpg", 'r')
out_img = Image.new('RGBA', size, (0, 0, 0, 0))
for y in range(0, size[1]):
if y % 100 == 0:
print y
for x in range(0, size[0]):
val = noise.snoise2(float(x)/size[0] * ratio,
float(y)/size[1] * ratio,
repeatx=ratio, | val = val * 255
val = int(val)
col = (val, val, val, 255)
out_img.putpixel((x, y), col)
out_img.save("out.png", "PNG") | repeaty=ratio)
val = (val + 1) / 2.0 |
main_20210629130424.py | from Calculator import make_root
|
if __name__ == '__main__':
main() | def main
|
issue-9814.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Verify that single-variant enums cant be de-referenced
// Regression test for issue #9814
enum | { Bar(int) }
fn main() {
let _ = *Bar(2); //~ ERROR type `Foo` cannot be dereferenced
}
| Foo |
animate_color_type.py | from dataclasses import dataclass, field
from decimal import Decimal
from typing import Dict, List, Optional, Union
from bindings.csw.anim_mode_attrs_calc_mode import AnimModeAttrsCalcMode
from bindings.csw.animate_color_prototype import AnimateColorPrototype
from bindings.csw.fill_default_type import FillDefaultType
from bindings.csw.fill_timing_attrs_type import FillTimingAttrsType
from bindings.csw.lang_value import LangValue
from bindings.csw.restart_default_type import RestartDefaultType
from bindings.csw.restart_timing_type import RestartTimingType
from bindings.csw.sync_behavior_default_type import SyncBehaviorDefaultType
from bindings.csw.sync_behavior_type import SyncBehaviorType
__NAMESPACE__ = "http://www.w3.org/2001/SMIL20/Language"
@dataclass
class AnimateColorType(AnimateColorPrototype):
class Meta:
name = "animateColorType"
other_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##other",
},
)
id: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
) | class_value: Optional[str] = field(
default=None,
metadata={
"name": "class",
"type": "Attribute",
},
)
lang: Optional[Union[str, LangValue]] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/XML/1998/namespace",
},
)
alt: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
longdesc: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
begin: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
end: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
dur: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
repeat_dur: Optional[str] = field(
default=None,
metadata={
"name": "repeatDur",
"type": "Attribute",
},
)
repeat_count: Optional[Decimal] = field(
default=None,
metadata={
"name": "repeatCount",
"type": "Attribute",
"min_inclusive": Decimal("0.0"),
},
)
repeat: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
},
)
min: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
max: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
},
)
sync_behavior: SyncBehaviorType = field(
default=SyncBehaviorType.DEFAULT,
metadata={
"name": "syncBehavior",
"type": "Attribute",
},
)
sync_tolerance: Optional[str] = field(
default=None,
metadata={
"name": "syncTolerance",
"type": "Attribute",
},
)
sync_behavior_default: SyncBehaviorDefaultType = field(
default=SyncBehaviorDefaultType.INHERIT,
metadata={
"name": "syncBehaviorDefault",
"type": "Attribute",
},
)
sync_tolerance_default: str = field(
default="inherit",
metadata={
"name": "syncToleranceDefault",
"type": "Attribute",
},
)
restart: RestartTimingType = field(
default=RestartTimingType.DEFAULT,
metadata={
"type": "Attribute",
},
)
restart_default: RestartDefaultType = field(
default=RestartDefaultType.INHERIT,
metadata={
"name": "restartDefault",
"type": "Attribute",
},
)
fill: FillTimingAttrsType = field(
default=FillTimingAttrsType.DEFAULT,
metadata={
"type": "Attribute",
},
)
fill_default: FillDefaultType = field(
default=FillDefaultType.INHERIT,
metadata={
"name": "fillDefault",
"type": "Attribute",
},
)
target_element: Optional[str] = field(
default=None,
metadata={
"name": "targetElement",
"type": "Attribute",
},
)
calc_mode: AnimModeAttrsCalcMode = field(
default=AnimModeAttrsCalcMode.LINEAR,
metadata={
"name": "calcMode",
"type": "Attribute",
},
)
skip_content: bool = field(
default=True,
metadata={
"name": "skip-content",
"type": "Attribute",
},
)
any_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##any",
},
) | |
test_poetry.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from poetry.poetry import Poetry
from poetry.utils._compat import PY2
from poetry.utils._compat import Path
from poetry.utils.toml_file import TomlFile
fixtures_dir = Path(__file__).parent / "fixtures"
def test_poetry():
poetry = Poetry.create(str(fixtures_dir / "sample_project"))
package = poetry.package
assert package.name == "my-package"
assert package.version.text == "1.2.3"
assert package.description == "Some description."
assert package.authors == ["Sébastien Eustace <[email protected]>"]
assert package.license.id == "MIT"
assert (
package.readme.relative_to(fixtures_dir).as_posix()
== "sample_project/README.rst"
)
assert package.homepage == "https://poetry.eustace.io"
assert package.repository_url == "https://github.com/sdispater/poetry"
assert package.keywords == ["packaging", "dependency", "poetry"]
assert package.python_versions == "~2.7 || ^3.6"
assert str(package.python_constraint) == ">=2.7,<2.8 || >=3.6,<4.0"
dependencies = {}
for dep in package.requires:
dependencies[dep.name] = dep
cleo = dependencies["cleo"]
assert cleo.pretty_constraint == "^0.6"
assert not cleo.is_optional()
pendulum = dependencies["pendulum"]
assert pendulum.pretty_constraint == "branch 2.0"
assert pendulum.is_vcs()
assert pendulum.vcs == "git"
assert pendulum.branch == "2.0"
assert pendulum.source == "https://github.com/sdispater/pendulum.git"
assert pendulum.allows_prereleases()
requests = dependencies["requests"]
assert requests.pretty_constraint == "^2.18"
assert not requests.is_vcs()
assert not requests.allows_prereleases()
assert requests.is_optional()
assert requests.extras == ["security"]
pathlib2 = dependencies["pathlib2"]
assert pathlib2.pretty_constraint == "^2.2"
assert pathlib2.python_versions == "~2.7"
assert not pathlib2.is_optional()
demo = dependencies["demo"]
assert demo.is_file()
assert not demo.is_vcs()
assert demo.name == "demo"
assert demo.pretty_constraint == "*"
demo = dependencies["my-package"]
assert not demo.is_file()
assert demo.is_directory()
assert not demo.is_vcs()
assert demo.name == "my-package"
assert demo.pretty_constraint == "*"
simple_project = dependencies["simple-project"]
assert not simple_project.is_file()
assert simple_project.is_directory()
assert not simple_project.is_vcs()
assert simple_project.name == "simple-project"
assert simple_project.pretty_constraint == "*"
functools32 = dependencies["functools32"]
assert functools32.name == "functools32"
assert functools32.pretty_constraint == "^3.2.3"
assert (
str(functools32.marker)
== 'python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"'
)
assert "db" in package.extras
classifiers = package.classifiers
assert classifiers == [
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
assert package.all_classifiers == [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def test_poetry_with_packages_and_includes():
poetry = Poetry.create(
str(fixtures_dir.parent / "masonry" / "builders" / "fixtures" / "with-include")
)
package = poetry.package
assert package.packages == [
{"include": "extra_dir/**/*.py"},
{"include": "extra_dir/**/*.py"},
{"include": "my_module.py"},
{"include": "package_with_include"},
{"include": "tests", "format": "sdist"},
{"include": "for_wheel_only", "format": ["wheel"]},
]
assert package.include == ["extra_dir/vcs_excluded.txt", "notes.txt"]
def test_poetry_with_multi_constraints_dependency():
poetry = Poetry.create(
str(fixtures_dir / "project_with_multi_constraints_dependency")
)
package = poetry.package
assert len(package.requires) == 2
def test_poetry_with_default_source():
poetry = Poetry.create(fixtures_dir / "with_default_source")
assert 1 == len(poetry.pool.repositories)
def test_poetry_with_two_default_sources():
with pytest.raises(ValueError) as e:
Poetry.create(fixtures_dir / "with_two_default_sources")
assert "Only one repository can be the default" == str(e.value)
def test_check():
complete = TomlFile(fixtures_dir / "complete.toml")
content = complete.read()["tool"]["poetry"]
assert Poetry.check(content) == {"errors": [], "warnings": []}
def t | ):
complete = TomlFile(fixtures_dir / "complete.toml")
content = complete.read()["tool"]["poetry"]
content["this key is not in the schema"] = ""
if PY2:
expected = (
"Additional properties are not allowed "
"(u'this key is not in the schema' was unexpected)"
)
else:
expected = (
"Additional properties are not allowed "
"('this key is not in the schema' was unexpected)"
)
assert Poetry.check(content) == {"errors": [expected], "warnings": []}
def test_create_fails_on_invalid_configuration():
with pytest.raises(RuntimeError) as e:
Poetry.create(
Path(__file__).parent / "fixtures" / "invalid_pyproject" / "pyproject.toml"
)
if PY2:
expected = """\
The Poetry configuration is invalid:
- u'description' is a required property
"""
else:
expected = """\
The Poetry configuration is invalid:
- 'description' is a required property
"""
assert expected == str(e.value)
| est_check_fails( |
redissingleton.go | package redis
import "sync"
// SingletonSession handles connection pool for Redis
type SingletonSession struct {
Session *RedisSession
Err error
server string
initMutex sync.Mutex
}
// Create a new Singleton
func Singleton(server string) *SingletonSession |
// Connect connects to Redis and holds the Session and Err object
// in the SingletonSession struct
func (r *SingletonSession) Connect() (*RedisSession, error) {
r.initMutex.Lock()
defer r.initMutex.Unlock()
if r.Session != nil && r.Err == nil {
return r.Session, nil
}
r.Session, r.Err = NewRedisSession(&RedisConf{Server: r.server})
return r.Session, r.Err
}
// Close clears the connection to redis
func (r *SingletonSession) Close() {
r.initMutex.Lock()
defer r.initMutex.Unlock()
r.Session.Close()
r.Session = nil
r.Err = nil
}
| {
return &SingletonSession{
server: server,
}
} |
emergency.py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 18:19:15 2019
@author: pepo
"""
import libardrone
import pygame
from time import sleep
import time
import cv2
drone = libardrone.ARDrone()
def operation(sleep):
t1 = time.time()
t2=t1
while t2-t1<sleep:
drone.turn_left()
t2=time.time()
def main():
|
if __name__ == '__main__':
main() | drone.land() |
tx.go | // Copyright (C) 2019-2021, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package snowstorm
import (
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/snow/choices"
)
// Whitelister defines the interface for specifying whitelisted operations.
type Whitelister interface {
// Returns [true] if the underlying instance does implement whitelisted
// conflicts.
HasWhitelist() bool |
// Whitelist returns the set of transaction IDs that are explicitly
// whitelisted. Transactions that are not explicitly whitelisted are
// considered conflicting.
Whitelist() (ids.Set, error)
}
// Tx consumes state.
type Tx interface {
choices.Decidable
Whitelister
// Dependencies is a list of transactions upon which this transaction
// depends. Each element of Dependencies must be verified before Verify is
// called on this transaction.
//
// Similarly, each element of Dependencies must be accepted before this
// transaction is accepted.
Dependencies() ([]Tx, error)
// InputIDs is a set where each element is the ID of a piece of state that
// will be consumed if this transaction is accepted.
//
// In the context of a UTXO-based payments system, for example, this would
// be the IDs of the UTXOs consumed by this transaction
InputIDs() []ids.ID
// Verify that the state transition this transaction would make if it were
// accepted is valid. If the state transition is invalid, a non-nil error
// should be returned.
//
// It is guaranteed that when Verify is called, all the dependencies of
// this transaction have already been successfully verified.
Verify() error
// Bytes returns the binary representation of this transaction.
//
// This is used for sending transactions to peers. Another node should be
// able to parse these bytes to the same transaction.
Bytes() []byte
} | |
0002_auto_20160704_1112.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-04 16:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| ('tags', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='Category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tags.Category'),
),
migrations.AddField(
model_name='tag',
name='regex',
field=models.CharField(default=None, max_length=100),
preserve_default=False,
),
] | class Migration(migrations.Migration):
dependencies = [ |
astencode.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
// FIXME: remove this after snapshot, and Results are handled
#![allow(unused_must_use)]
use metadata::common as c;
use metadata::cstore as cstore;
use driver::session::Session;
use metadata::decoder;
use middle::def;
use metadata::encoder as e;
use middle::region;
use metadata::tydecode;
use metadata::tydecode::{DefIdSource, NominalType, TypeWithId, TypeParameter};
use metadata::tydecode::{RegionParameter};
use metadata::tyencode;
use middle::mem_categorization::Typer;
use middle::subst;
use middle::subst::VecPerParamSpace;
use middle::typeck::{MethodCall, MethodCallee, MethodOrigin};
use middle::{ty, typeck};
use util::ppaux::ty_to_string;
use syntax::{ast, ast_map, ast_util, codemap, fold};
use syntax::ast_util::PostExpansionMethod;
use syntax::codemap::Span;
use syntax::fold::Folder;
use syntax::parse::token;
use syntax::ptr::P;
use syntax;
use libc;
use std::io::Seek;
use std::mem;
use std::rc::Rc;
use rbml::io::SeekableMemWriter;
use rbml::{reader, writer};
use rbml;
use serialize;
use serialize::{Decodable, Decoder, DecoderHelpers, Encodable};
use serialize::{EncoderHelpers};
#[cfg(test)] use syntax::parse;
#[cfg(test)] use syntax::print::pprust;
struct DecodeContext<'a, 'b, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
cdata: &'b cstore::crate_metadata,
from_id_range: ast_util::IdRange,
to_id_range: ast_util::IdRange
}
trait tr {
fn tr(&self, dcx: &DecodeContext) -> Self;
}
trait tr_intern {
fn tr_intern(&self, dcx: &DecodeContext) -> ast::DefId;
}
pub type Encoder<'a> = writer::Encoder<'a, SeekableMemWriter>;
// ______________________________________________________________________
// Top-level methods.
pub fn encode_inlined_item(ecx: &e::EncodeContext,
rbml_w: &mut Encoder,
ii: e::InlinedItemRef) {
let id = match ii {
e::IIItemRef(i) => i.id,
e::IIForeignRef(i) => i.id,
e::IITraitItemRef(_, &ast::ProvidedMethod(ref m)) => m.id,
e::IITraitItemRef(_, &ast::RequiredMethod(ref m)) => m.id,
e::IITraitItemRef(_, &ast::TypeTraitItem(ref ti)) => ti.id,
e::IIImplItemRef(_, &ast::MethodImplItem(ref m)) => m.id,
e::IIImplItemRef(_, &ast::TypeImplItem(ref ti)) => ti.id,
};
debug!("> Encoding inlined item: {} ({})",
ecx.tcx.map.path_to_string(id),
rbml_w.writer.tell());
// Folding could be avoided with a smarter encoder.
let ii = simplify_ast(ii);
let id_range = ast_util::compute_id_range_for_inlined_item(&ii);
rbml_w.start_tag(c::tag_ast as uint);
id_range.encode(rbml_w);
encode_ast(rbml_w, &ii);
encode_side_tables_for_ii(ecx, rbml_w, &ii);
rbml_w.end_tag();
debug!("< Encoded inlined fn: {} ({})",
ecx.tcx.map.path_to_string(id),
rbml_w.writer.tell());
}
impl<'a, 'b, 'c, 'tcx> ast_map::FoldOps for &'a DecodeContext<'b, 'c, 'tcx> {
fn new_id(&self, id: ast::NodeId) -> ast::NodeId {
if id == ast::DUMMY_NODE_ID {
// Used by ast_map to map the NodeInlinedParent.
self.tcx.sess.next_node_id()
} else {
self.tr_id(id)
}
}
fn new_def_id(&self, def_id: ast::DefId) -> ast::DefId {
self.tr_def_id(def_id)
}
fn new_span(&self, span: Span) -> Span {
self.tr_span(span)
}
}
pub fn decode_inlined_item<'tcx>(cdata: &cstore::crate_metadata,
tcx: &ty::ctxt<'tcx>,
path: Vec<ast_map::PathElem>,
par_doc: rbml::Doc)
-> Result<&'tcx ast::InlinedItem, Vec<ast_map::PathElem>> {
match par_doc.opt_child(c::tag_ast) {
None => Err(path),
Some(ast_doc) => {
let mut path_as_str = None;
debug!("> Decoding inlined fn: {}::?",
{
// Do an Option dance to use the path after it is moved below.
let s = ast_map::path_to_string(ast_map::Values(path.iter()));
path_as_str = Some(s);
path_as_str.as_ref().map(|x| x.as_slice())
});
let mut ast_dsr = reader::Decoder::new(ast_doc);
let from_id_range = Decodable::decode(&mut ast_dsr).unwrap();
let to_id_range = reserve_id_range(&tcx.sess, from_id_range);
let dcx = &DecodeContext {
cdata: cdata,
tcx: tcx,
from_id_range: from_id_range,
to_id_range: to_id_range
};
let raw_ii = decode_ast(ast_doc);
let ii = ast_map::map_decoded_item(&dcx.tcx.map, path, raw_ii, dcx);
let ident = match *ii {
ast::IIItem(ref i) => i.ident,
ast::IIForeign(ref i) => i.ident,
ast::IITraitItem(_, ref ti) => {
match *ti {
ast::ProvidedMethod(ref m) => m.pe_ident(),
ast::RequiredMethod(ref ty_m) => ty_m.ident,
ast::TypeTraitItem(ref ti) => ti.ident,
}
},
ast::IIImplItem(_, ref m) => {
match *m {
ast::MethodImplItem(ref m) => m.pe_ident(),
ast::TypeImplItem(ref ti) => ti.ident,
}
}
};
debug!("Fn named: {}", token::get_ident(ident));
debug!("< Decoded inlined fn: {}::{}",
path_as_str.unwrap(),
token::get_ident(ident));
region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii);
decode_side_tables(dcx, ast_doc);
match *ii {
ast::IIItem(ref i) => {
debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<",
syntax::print::pprust::item_to_string(&**i));
}
_ => { }
}
Ok(ii)
}
}
}
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
fn reserve_id_range(sess: &Session,
from_id_range: ast_util::IdRange) -> ast_util::IdRange {
// Handle the case of an empty range:
if from_id_range.empty() { return from_id_range; }
let cnt = from_id_range.max - from_id_range.min;
let to_id_min = sess.reserve_node_ids(cnt);
let to_id_max = to_id_min + cnt;
ast_util::IdRange { min: to_id_min, max: to_id_max }
}
impl<'a, 'b, 'tcx> DecodeContext<'a, 'b, 'tcx> {
pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId {
/*!
* Translates an internal id, meaning a node id that is known
* to refer to some part of the item currently being inlined,
* such as a local variable or argument. All naked node-ids
* that appear in types have this property, since if something
* might refer to an external item we would use a def-id to
* allow for the possibility that the item resides in another
* crate.
*/
// from_id_range should be non-empty
assert!(!self.from_id_range.empty());
(id - self.from_id_range.min + self.to_id_range.min)
}
pub fn tr_def_id(&self, did: ast::DefId) -> ast::DefId {
/*!
* Translates an EXTERNAL def-id, converting the crate number
* from the one used in the encoded data to the current crate
* numbers.. By external, I mean that it be translated to a
* reference to the item in its original crate, as opposed to
* being translated to a reference to the inlined version of
* the item. This is typically, but not always, what you
* want, because most def-ids refer to external things like
* types or other fns that may or may not be inlined. Note
* that even when the inlined function is referencing itself
* recursively, we would want `tr_def_id` for that
* reference--- conceptually the function calls the original,
* non-inlined version, and trans deals with linking that
* recursive call to the inlined copy.
*
* However, there are a *few* cases where def-ids are used but
* we know that the thing being referenced is in fact *internal*
* to the item being inlined. In those cases, you should use
* `tr_intern_def_id()` below.
*/
decoder::translate_def_id(self.cdata, did)
}
pub fn tr_intern_def_id(&self, did: ast::DefId) -> ast::DefId {
/*!
* Translates an INTERNAL def-id, meaning a def-id that is
* known to refer to some part of the item currently being
* inlined. In that case, we want to convert the def-id to
* refer to the current crate and to the new, inlined node-id.
*/
assert_eq!(did.krate, ast::LOCAL_CRATE);
ast::DefId { krate: ast::LOCAL_CRATE, node: self.tr_id(did.node) }
}
pub fn tr_span(&self, _span: Span) -> Span {
codemap::DUMMY_SP // FIXME (#1972): handle span properly
}
}
impl tr_intern for ast::DefId {
fn tr_intern(&self, dcx: &DecodeContext) -> ast::DefId {
dcx.tr_intern_def_id(*self)
}
}
impl tr for ast::DefId {
fn tr(&self, dcx: &DecodeContext) -> ast::DefId {
dcx.tr_def_id(*self)
}
}
impl tr for Option<ast::DefId> {
fn tr(&self, dcx: &DecodeContext) -> Option<ast::DefId> {
self.map(|d| dcx.tr_def_id(d))
}
}
impl tr for Span {
fn tr(&self, dcx: &DecodeContext) -> Span {
dcx.tr_span(*self)
}
}
trait def_id_encoder_helpers {
fn emit_def_id(&mut self, did: ast::DefId);
}
impl<S:serialize::Encoder<E>, E> def_id_encoder_helpers for S {
fn emit_def_id(&mut self, did: ast::DefId) {
did.encode(self).ok().unwrap()
}
}
trait def_id_decoder_helpers {
fn read_def_id(&mut self, dcx: &DecodeContext) -> ast::DefId;
fn read_def_id_nodcx(&mut self,
cdata: &cstore::crate_metadata) -> ast::DefId;
}
impl<D:serialize::Decoder<E>, E> def_id_decoder_helpers for D {
fn read_def_id(&mut self, dcx: &DecodeContext) -> ast::DefId {
let did: ast::DefId = Decodable::decode(self).ok().unwrap();
did.tr(dcx)
}
fn read_def_id_nodcx(&mut self,
cdata: &cstore::crate_metadata) -> ast::DefId {
let did: ast::DefId = Decodable::decode(self).ok().unwrap();
decoder::translate_def_id(cdata, did)
}
}
// ______________________________________________________________________
// Encoding and decoding the AST itself
//
// The hard work is done by an autogenerated module astencode_gen. To
// regenerate astencode_gen, run src/etc/gen-astencode. It will
// replace astencode_gen with a dummy file and regenerate its
// contents. If you get compile errors, the dummy file
// remains---resolve the errors and then rerun astencode_gen.
// Annoying, I know, but hopefully only temporary.
//
// When decoding, we have to renumber the AST so that the node ids that
// appear within are disjoint from the node ids in our existing ASTs.
// We also have to adjust the spans: for now we just insert a dummy span,
// but eventually we should add entries to the local codemap as required.
fn encode_ast(rbml_w: &mut Encoder, item: &ast::InlinedItem) {
rbml_w.start_tag(c::tag_tree as uint);
item.encode(rbml_w);
rbml_w.end_tag();
}
struct NestedItemsDropper;
impl Folder for NestedItemsDropper {
fn fold_block(&mut self, blk: P<ast::Block>) -> P<ast::Block> {
blk.and_then(|ast::Block {id, stmts, expr, rules, span, ..}| {
let stmts_sans_items = stmts.into_iter().filter_map(|stmt| {
let use_stmt = match stmt.node {
ast::StmtExpr(_, _) | ast::StmtSemi(_, _) => true,
ast::StmtDecl(ref decl, _) => {
match decl.node {
ast::DeclLocal(_) => true,
ast::DeclItem(_) => false,
}
}
ast::StmtMac(..) => fail!("unexpanded macro in astencode")
};
if use_stmt {
Some(stmt)
} else {
None
}
}).collect();
let blk_sans_items = P(ast::Block {
view_items: Vec::new(), // I don't know if we need the view_items
// here, but it doesn't break tests!
stmts: stmts_sans_items,
expr: expr,
id: id,
rules: rules,
span: span,
});
fold::noop_fold_block(blk_sans_items, self)
})
}
}
// Produces a simplified copy of the AST which does not include things
// that we do not need to or do not want to export. For example, we
// do not include any nested items: if these nested items are to be
// inlined, their AST will be exported separately (this only makes
// sense because, in Rust, nested items are independent except for
// their visibility).
//
// As it happens, trans relies on the fact that we do not export
// nested items, as otherwise it would get confused when translating
// inlined items.
fn simplify_ast(ii: e::InlinedItemRef) -> ast::InlinedItem {
let mut fld = NestedItemsDropper;
match ii {
// HACK we're not dropping items.
e::IIItemRef(i) => {
ast::IIItem(fold::noop_fold_item(P(i.clone()), &mut fld)
.expect_one("expected one item"))
}
e::IITraitItemRef(d, ti) => {
ast::IITraitItem(d, match *ti {
ast::ProvidedMethod(ref m) => {
ast::ProvidedMethod(
fold::noop_fold_method(m.clone(), &mut fld)
.expect_one("noop_fold_method must produce \
exactly one method"))
}
ast::RequiredMethod(ref ty_m) => {
ast::RequiredMethod(
fold::noop_fold_type_method(ty_m.clone(), &mut fld))
}
ast::TypeTraitItem(ref associated_type) => {
ast::TypeTraitItem(
P(fold::noop_fold_associated_type(
(**associated_type).clone(),
&mut fld)))
}
})
}
e::IIImplItemRef(d, m) => {
ast::IIImplItem(d, match *m {
ast::MethodImplItem(ref m) => {
ast::MethodImplItem(
fold::noop_fold_method(m.clone(), &mut fld)
.expect_one("noop_fold_method must produce \
exactly one method"))
}
ast::TypeImplItem(ref td) => {
ast::TypeImplItem(
P(fold::noop_fold_typedef((**td).clone(), &mut fld)))
}
})
}
e::IIForeignRef(i) => {
ast::IIForeign(fold::noop_fold_foreign_item(P(i.clone()), &mut fld))
}
}
}
fn decode_ast(par_doc: rbml::Doc) -> ast::InlinedItem {
let chi_doc = par_doc.get(c::tag_tree as uint);
let mut d = reader::Decoder::new(chi_doc);
Decodable::decode(&mut d).unwrap()
}
// ______________________________________________________________________
// Encoding and decoding of ast::def
fn decode_def(dcx: &DecodeContext, doc: rbml::Doc) -> def::Def {
let mut dsr = reader::Decoder::new(doc);
let def: def::Def = Decodable::decode(&mut dsr).unwrap();
def.tr(dcx)
}
impl tr for def::Def {
fn tr(&self, dcx: &DecodeContext) -> def::Def {
match *self {
def::DefFn(did, p, is_ctor) => def::DefFn(did.tr(dcx), p, is_ctor),
def::DefStaticMethod(did, wrapped_did2, p) => {
def::DefStaticMethod(did.tr(dcx),
match wrapped_did2 {
def::FromTrait(did2) => {
def::FromTrait(did2.tr(dcx))
}
def::FromImpl(did2) => {
def::FromImpl(did2.tr(dcx))
}
},
p)
}
def::DefMethod(did0, did1, p) => {
def::DefMethod(did0.tr(dcx), did1.map(|did1| did1.tr(dcx)), p)
}
def::DefSelfTy(nid) => { def::DefSelfTy(dcx.tr_id(nid)) }
def::DefMod(did) => { def::DefMod(did.tr(dcx)) }
def::DefForeignMod(did) => { def::DefForeignMod(did.tr(dcx)) }
def::DefStatic(did, m) => { def::DefStatic(did.tr(dcx), m) }
def::DefConst(did) => { def::DefConst(did.tr(dcx)) }
def::DefLocal(nid) => { def::DefLocal(dcx.tr_id(nid)) }
def::DefVariant(e_did, v_did, is_s) => {
def::DefVariant(e_did.tr(dcx), v_did.tr(dcx), is_s)
},
def::DefTrait(did) => def::DefTrait(did.tr(dcx)),
def::DefTy(did, is_enum) => def::DefTy(did.tr(dcx), is_enum),
def::DefAssociatedTy(did) => def::DefAssociatedTy(did.tr(dcx)),
def::DefPrimTy(p) => def::DefPrimTy(p),
def::DefTyParam(s, did, v) => def::DefTyParam(s, did.tr(dcx), v),
def::DefUse(did) => def::DefUse(did.tr(dcx)),
def::DefUpvar(nid1, nid2, nid3) => {
def::DefUpvar(dcx.tr_id(nid1),
dcx.tr_id(nid2),
dcx.tr_id(nid3))
}
def::DefStruct(did) => def::DefStruct(did.tr(dcx)),
def::DefRegion(nid) => def::DefRegion(dcx.tr_id(nid)),
def::DefTyParamBinder(nid) => {
def::DefTyParamBinder(dcx.tr_id(nid))
}
def::DefLabel(nid) => def::DefLabel(dcx.tr_id(nid))
}
}
}
// ______________________________________________________________________
// Encoding and decoding of ancillary information
impl tr for ty::Region {
fn tr(&self, dcx: &DecodeContext) -> ty::Region {
match *self {
ty::ReLateBound(id, br) => {
ty::ReLateBound(dcx.tr_id(id), br.tr(dcx))
}
ty::ReEarlyBound(id, space, index, ident) => {
ty::ReEarlyBound(dcx.tr_id(id), space, index, ident)
}
ty::ReScope(id) => {
ty::ReScope(dcx.tr_id(id))
}
ty::ReEmpty | ty::ReStatic | ty::ReInfer(..) => {
*self
}
ty::ReFree(ref fr) => {
ty::ReFree(ty::FreeRegion {scope_id: dcx.tr_id(fr.scope_id),
bound_region: fr.bound_region.tr(dcx)})
}
}
}
}
impl tr for ty::BoundRegion {
fn tr(&self, dcx: &DecodeContext) -> ty::BoundRegion {
match *self {
ty::BrAnon(_) |
ty::BrFresh(_) |
ty::BrEnv => *self,
ty::BrNamed(id, ident) => ty::BrNamed(dcx.tr_def_id(id),
ident),
}
}
}
impl tr for ty::TraitStore {
fn tr(&self, dcx: &DecodeContext) -> ty::TraitStore {
match *self {
ty::RegionTraitStore(r, m) => {
ty::RegionTraitStore(r.tr(dcx), m)
}
ty::UniqTraitStore => ty::UniqTraitStore
}
}
}
// ______________________________________________________________________
// Encoding and decoding of freevar information
fn encode_freevar_entry(rbml_w: &mut Encoder, fv: &ty::Freevar) {
(*fv).encode(rbml_w).unwrap();
}
fn encode_capture_mode(rbml_w: &mut Encoder, cm: ast::CaptureClause) {
cm.encode(rbml_w).unwrap();
}
trait rbml_decoder_helper {
fn read_freevar_entry(&mut self, dcx: &DecodeContext)
-> ty::Freevar;
fn read_capture_mode(&mut self) -> ast::CaptureClause;
}
impl<'a> rbml_decoder_helper for reader::Decoder<'a> {
fn read_freevar_entry(&mut self, dcx: &DecodeContext)
-> ty::Freevar {
let fv: ty::Freevar = Decodable::decode(self).unwrap();
fv.tr(dcx)
}
fn read_capture_mode(&mut self) -> ast::CaptureClause {
let cm: ast::CaptureClause = Decodable::decode(self).unwrap();
cm
}
}
impl tr for ty::Freevar {
fn tr(&self, dcx: &DecodeContext) -> ty::Freevar {
ty::Freevar {
def: self.def.tr(dcx),
span: self.span.tr(dcx),
}
}
}
impl tr for ty::UpvarBorrow {
fn tr(&self, dcx: &DecodeContext) -> ty::UpvarBorrow {
ty::UpvarBorrow {
kind: self.kind,
region: self.region.tr(dcx)
}
}
}
// ______________________________________________________________________
// Encoding and decoding of MethodCallee
trait read_method_callee_helper {
fn read_method_callee(&mut self, dcx: &DecodeContext)
-> (typeck::ExprAdjustment, MethodCallee);
}
fn encode_method_callee(ecx: &e::EncodeContext,
rbml_w: &mut Encoder,
adjustment: typeck::ExprAdjustment,
method: &MethodCallee) {
use serialize::Encoder;
rbml_w.emit_struct("MethodCallee", 4, |rbml_w| {
rbml_w.emit_struct_field("adjustment", 0u, |rbml_w| {
adjustment.encode(rbml_w)
});
rbml_w.emit_struct_field("origin", 1u, |rbml_w| {
Ok(rbml_w.emit_method_origin(ecx, &method.origin))
});
rbml_w.emit_struct_field("ty", 2u, |rbml_w| {
Ok(rbml_w.emit_ty(ecx, method.ty))
});
rbml_w.emit_struct_field("substs", 3u, |rbml_w| {
Ok(rbml_w.emit_substs(ecx, &method.substs))
})
}).unwrap();
}
impl<'a> read_method_callee_helper for reader::Decoder<'a> {
fn read_method_callee(&mut self, dcx: &DecodeContext)
-> (typeck::ExprAdjustment, MethodCallee) {
self.read_struct("MethodCallee", 4, |this| {
let adjustment = this.read_struct_field("adjustment", 0, |this| {
Decodable::decode(this)
}).unwrap();
Ok((adjustment, MethodCallee {
origin: this.read_struct_field("origin", 1, |this| {
Ok(this.read_method_origin(dcx))
}).unwrap(),
ty: this.read_struct_field("ty", 2, |this| {
Ok(this.read_ty(dcx))
}).unwrap(),
substs: this.read_struct_field("substs", 3, |this| {
Ok(this.read_substs(dcx))
}).unwrap()
}))
}).unwrap()
}
}
impl tr for MethodOrigin {
fn tr(&self, dcx: &DecodeContext) -> MethodOrigin {
match *self {
typeck::MethodStatic(did) => typeck::MethodStatic(did.tr(dcx)),
typeck::MethodStaticUnboxedClosure(did) => {
typeck::MethodStaticUnboxedClosure(did.tr(dcx))
}
typeck::MethodTypeParam(ref mp) => {
typeck::MethodTypeParam(
typeck::MethodParam {
// def-id is already translated when we read it out
trait_ref: mp.trait_ref.clone(),
method_num: mp.method_num,
}
)
}
typeck::MethodTraitObject(ref mo) => {
typeck::MethodTraitObject(
typeck::MethodObject {
trait_ref: mo.trait_ref.clone(),
.. *mo
}
)
}
}
}
}
pub fn encode_unboxed_closure_kind(ebml_w: &mut Encoder,
kind: ty::UnboxedClosureKind) {
use serialize::Encoder;
ebml_w.emit_enum("UnboxedClosureKind", |ebml_w| {
match kind {
ty::FnUnboxedClosureKind => {
ebml_w.emit_enum_variant("FnUnboxedClosureKind", 0, 3, |_| {
Ok(())
})
}
ty::FnMutUnboxedClosureKind => {
ebml_w.emit_enum_variant("FnMutUnboxedClosureKind", 1, 3, |_| {
Ok(())
})
}
ty::FnOnceUnboxedClosureKind => {
ebml_w.emit_enum_variant("FnOnceUnboxedClosureKind",
2,
3,
|_| {
Ok(())
})
}
}
}).unwrap()
}
pub trait vtable_decoder_helpers {
fn read_vec_per_param_space<T>(&mut self,
f: |&mut Self| -> T)
-> VecPerParamSpace<T>;
fn read_vtable_res_with_key(&mut self,
tcx: &ty::ctxt,
cdata: &cstore::crate_metadata)
-> (typeck::ExprAdjustment, typeck::vtable_res);
fn read_vtable_res(&mut self,
tcx: &ty::ctxt, cdata: &cstore::crate_metadata)
-> typeck::vtable_res;
fn read_vtable_param_res(&mut self,
tcx: &ty::ctxt, cdata: &cstore::crate_metadata)
-> typeck::vtable_param_res;
fn read_vtable_origin(&mut self,
tcx: &ty::ctxt, cdata: &cstore::crate_metadata)
-> typeck::vtable_origin;
}
impl<'a> vtable_decoder_helpers for reader::Decoder<'a> {
fn read_vec_per_param_space<T>(&mut self,
f: |&mut reader::Decoder<'a>| -> T)
-> VecPerParamSpace<T>
{
let types = self.read_to_vec(|this| Ok(f(this))).unwrap();
let selfs = self.read_to_vec(|this| Ok(f(this))).unwrap();
let fns = self.read_to_vec(|this| Ok(f(this))).unwrap();
VecPerParamSpace::new(types, selfs, fns)
}
fn read_vtable_res_with_key(&mut self,
tcx: &ty::ctxt,
cdata: &cstore::crate_metadata)
-> (typeck::ExprAdjustment, typeck::vtable_res) {
self.read_struct("VtableWithKey", 2, |this| {
let adjustment = this.read_struct_field("adjustment", 0, |this| {
Decodable::decode(this)
}).unwrap();
Ok((adjustment, this.read_struct_field("vtable_res", 1, |this| {
Ok(this.read_vtable_res(tcx, cdata))
}).unwrap()))
}).unwrap()
}
fn read_vtable_res(&mut self,
tcx: &ty::ctxt,
cdata: &cstore::crate_metadata)
-> typeck::vtable_res
{
self.read_vec_per_param_space(
|this| this.read_vtable_param_res(tcx, cdata))
}
fn read_vtable_param_res(&mut self,
tcx: &ty::ctxt, cdata: &cstore::crate_metadata)
-> typeck::vtable_param_res {
self.read_to_vec(|this| Ok(this.read_vtable_origin(tcx, cdata)))
.unwrap().into_iter().collect()
}
fn read_vtable_origin(&mut self,
tcx: &ty::ctxt, cdata: &cstore::crate_metadata)
-> typeck::vtable_origin {
self.read_enum("vtable_origin", |this| {
this.read_enum_variant(["vtable_static",
"vtable_param",
"vtable_error",
"vtable_unboxed_closure"],
|this, i| {
Ok(match i {
0 => {
typeck::vtable_static(
this.read_enum_variant_arg(0u, |this| {
Ok(this.read_def_id_nodcx(cdata))
}).unwrap(),
this.read_enum_variant_arg(1u, |this| {
Ok(this.read_substs_nodcx(tcx, cdata))
}).unwrap(),
this.read_enum_variant_arg(2u, |this| {
Ok(this.read_vtable_res(tcx, cdata))
}).unwrap()
)
}
1 => {
typeck::vtable_param(
this.read_enum_variant_arg(0u, |this| {
Decodable::decode(this)
}).unwrap(),
this.read_enum_variant_arg(1u, |this| {
this.read_uint()
}).unwrap()
)
}
2 => {
typeck::vtable_unboxed_closure(
this.read_enum_variant_arg(0u, |this| {
Ok(this.read_def_id_nodcx(cdata))
}).unwrap()
)
}
3 => {
typeck::vtable_error
}
_ => fail!("bad enum variant")
})
})
}).unwrap()
}
}
// ___________________________________________________________________________
//
fn encode_vec_per_param_space<T>(rbml_w: &mut Encoder,
v: &subst::VecPerParamSpace<T>,
f: |&mut Encoder, &T|) {
for &space in subst::ParamSpace::all().iter() {
rbml_w.emit_from_vec(v.get_slice(space),
|rbml_w, n| Ok(f(rbml_w, n))).unwrap();
}
}
// ______________________________________________________________________
// Encoding and decoding the side tables
trait get_ty_str_ctxt<'tcx> {
fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a, 'tcx>;
}
impl<'a, 'tcx> get_ty_str_ctxt<'tcx> for e::EncodeContext<'a, 'tcx> {
fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a, 'tcx> |
}
trait rbml_writer_helpers {
fn emit_closure_type(&mut self,
ecx: &e::EncodeContext,
closure_type: &ty::ClosureTy);
fn emit_method_origin(&mut self,
ecx: &e::EncodeContext,
method_origin: &typeck::MethodOrigin);
fn emit_ty(&mut self, ecx: &e::EncodeContext, ty: ty::t);
fn emit_tys(&mut self, ecx: &e::EncodeContext, tys: &[ty::t]);
fn emit_type_param_def(&mut self,
ecx: &e::EncodeContext,
type_param_def: &ty::TypeParameterDef);
fn emit_trait_ref(&mut self, ecx: &e::EncodeContext, ty: &ty::TraitRef);
fn emit_polytype(&mut self,
ecx: &e::EncodeContext,
pty: ty::Polytype);
fn emit_substs(&mut self, ecx: &e::EncodeContext, substs: &subst::Substs);
fn emit_existential_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::ExistentialBounds);
fn emit_builtin_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::BuiltinBounds);
fn emit_auto_adjustment(&mut self, ecx: &e::EncodeContext, adj: &ty::AutoAdjustment);
fn emit_autoref(&mut self, ecx: &e::EncodeContext, autoref: &ty::AutoRef);
fn emit_auto_deref_ref(&mut self, ecx: &e::EncodeContext, auto_deref_ref: &ty::AutoDerefRef);
fn emit_unsize_kind(&mut self, ecx: &e::EncodeContext, uk: &ty::UnsizeKind);
}
impl<'a> rbml_writer_helpers for Encoder<'a> {
fn emit_closure_type(&mut self,
ecx: &e::EncodeContext,
closure_type: &ty::ClosureTy) {
self.emit_opaque(|this| {
Ok(e::write_closure_type(ecx, this, closure_type))
});
}
fn emit_method_origin(&mut self,
ecx: &e::EncodeContext,
method_origin: &typeck::MethodOrigin)
{
use serialize::Encoder;
self.emit_enum("MethodOrigin", |this| {
match *method_origin {
typeck::MethodStatic(def_id) => {
this.emit_enum_variant("MethodStatic", 0, 1, |this| {
Ok(this.emit_def_id(def_id))
})
}
typeck::MethodStaticUnboxedClosure(def_id) => {
this.emit_enum_variant("MethodStaticUnboxedClosure", 1, 1, |this| {
Ok(this.emit_def_id(def_id))
})
}
typeck::MethodTypeParam(ref p) => {
this.emit_enum_variant("MethodTypeParam", 2, 1, |this| {
this.emit_struct("MethodParam", 2, |this| {
try!(this.emit_struct_field("trait_ref", 0, |this| {
Ok(this.emit_trait_ref(ecx, &*p.trait_ref))
}));
try!(this.emit_struct_field("method_num", 0, |this| {
this.emit_uint(p.method_num)
}));
Ok(())
})
})
}
typeck::MethodTraitObject(ref o) => {
this.emit_enum_variant("MethodTraitObject", 3, 1, |this| {
this.emit_struct("MethodObject", 2, |this| {
try!(this.emit_struct_field("trait_ref", 0, |this| {
Ok(this.emit_trait_ref(ecx, &*o.trait_ref))
}));
try!(this.emit_struct_field("object_trait_id", 0, |this| {
Ok(this.emit_def_id(o.object_trait_id))
}));
try!(this.emit_struct_field("method_num", 0, |this| {
this.emit_uint(o.method_num)
}));
try!(this.emit_struct_field("real_index", 0, |this| {
this.emit_uint(o.real_index)
}));
Ok(())
})
})
}
}
});
}
fn emit_ty(&mut self, ecx: &e::EncodeContext, ty: ty::t) {
self.emit_opaque(|this| Ok(e::write_type(ecx, this, ty)));
}
fn emit_tys(&mut self, ecx: &e::EncodeContext, tys: &[ty::t]) {
self.emit_from_vec(tys, |this, ty| Ok(this.emit_ty(ecx, *ty)));
}
fn emit_trait_ref(&mut self,
ecx: &e::EncodeContext,
trait_ref: &ty::TraitRef) {
self.emit_opaque(|this| Ok(e::write_trait_ref(ecx, this, trait_ref)));
}
fn emit_type_param_def(&mut self,
ecx: &e::EncodeContext,
type_param_def: &ty::TypeParameterDef) {
self.emit_opaque(|this| {
Ok(tyencode::enc_type_param_def(this.writer,
&ecx.ty_str_ctxt(),
type_param_def))
});
}
fn emit_polytype(&mut self,
ecx: &e::EncodeContext,
pty: ty::Polytype) {
use serialize::Encoder;
self.emit_struct("Polytype", 2, |this| {
this.emit_struct_field("generics", 0, |this| {
this.emit_struct("Generics", 2, |this| {
this.emit_struct_field("types", 0, |this| {
Ok(encode_vec_per_param_space(
this, &pty.generics.types,
|this, def| this.emit_type_param_def(ecx, def)))
});
this.emit_struct_field("regions", 1, |this| {
Ok(encode_vec_per_param_space(
this, &pty.generics.regions,
|this, def| def.encode(this).unwrap()))
})
})
});
this.emit_struct_field("ty", 1, |this| {
Ok(this.emit_ty(ecx, pty.ty))
})
});
}
fn emit_existential_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::ExistentialBounds) {
self.emit_opaque(|this| Ok(tyencode::enc_existential_bounds(this.writer,
&ecx.ty_str_ctxt(),
bounds)));
}
fn emit_builtin_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::BuiltinBounds) {
self.emit_opaque(|this| Ok(tyencode::enc_builtin_bounds(this.writer,
&ecx.ty_str_ctxt(),
bounds)));
}
fn emit_substs(&mut self, ecx: &e::EncodeContext, substs: &subst::Substs) {
self.emit_opaque(|this| Ok(tyencode::enc_substs(this.writer,
&ecx.ty_str_ctxt(),
substs)));
}
fn emit_auto_adjustment(&mut self, ecx: &e::EncodeContext, adj: &ty::AutoAdjustment) {
use serialize::Encoder;
self.emit_enum("AutoAdjustment", |this| {
match *adj {
ty::AdjustAddEnv(store) => {
this.emit_enum_variant("AutoAddEnv", 0, 1, |this| {
this.emit_enum_variant_arg(0, |this| store.encode(this))
})
}
ty::AdjustDerefRef(ref auto_deref_ref) => {
this.emit_enum_variant("AutoDerefRef", 1, 1, |this| {
this.emit_enum_variant_arg(0,
|this| Ok(this.emit_auto_deref_ref(ecx, auto_deref_ref)))
})
}
}
});
}
fn emit_autoref(&mut self, ecx: &e::EncodeContext, autoref: &ty::AutoRef) {
use serialize::Encoder;
self.emit_enum("AutoRef", |this| {
match autoref {
&ty::AutoPtr(r, m, None) => {
this.emit_enum_variant("AutoPtr", 0, 3, |this| {
this.emit_enum_variant_arg(0, |this| r.encode(this));
this.emit_enum_variant_arg(1, |this| m.encode(this));
this.emit_enum_variant_arg(2,
|this| this.emit_option(|this| this.emit_option_none()))
})
}
&ty::AutoPtr(r, m, Some(box ref a)) => {
this.emit_enum_variant("AutoPtr", 0, 3, |this| {
this.emit_enum_variant_arg(0, |this| r.encode(this));
this.emit_enum_variant_arg(1, |this| m.encode(this));
this.emit_enum_variant_arg(2, |this| this.emit_option(
|this| this.emit_option_some(|this| Ok(this.emit_autoref(ecx, a)))))
})
}
&ty::AutoUnsize(ref uk) => {
this.emit_enum_variant("AutoUnsize", 1, 1, |this| {
this.emit_enum_variant_arg(0, |this| Ok(this.emit_unsize_kind(ecx, uk)))
})
}
&ty::AutoUnsizeUniq(ref uk) => {
this.emit_enum_variant("AutoUnsizeUniq", 2, 1, |this| {
this.emit_enum_variant_arg(0, |this| Ok(this.emit_unsize_kind(ecx, uk)))
})
}
&ty::AutoUnsafe(m, None) => {
this.emit_enum_variant("AutoUnsafe", 3, 2, |this| {
this.emit_enum_variant_arg(0, |this| m.encode(this));
this.emit_enum_variant_arg(1,
|this| this.emit_option(|this| this.emit_option_none()))
})
}
&ty::AutoUnsafe(m, Some(box ref a)) => {
this.emit_enum_variant("AutoUnsafe", 3, 2, |this| {
this.emit_enum_variant_arg(0, |this| m.encode(this));
this.emit_enum_variant_arg(1, |this| this.emit_option(
|this| this.emit_option_some(|this| Ok(this.emit_autoref(ecx, a)))))
})
}
}
});
}
fn emit_auto_deref_ref(&mut self, ecx: &e::EncodeContext, auto_deref_ref: &ty::AutoDerefRef) {
use serialize::Encoder;
self.emit_struct("AutoDerefRef", 2, |this| {
this.emit_struct_field("autoderefs", 0, |this| auto_deref_ref.autoderefs.encode(this));
this.emit_struct_field("autoref", 1, |this| {
this.emit_option(|this| {
match auto_deref_ref.autoref {
None => this.emit_option_none(),
Some(ref a) => this.emit_option_some(|this| Ok(this.emit_autoref(ecx, a))),
}
})
})
});
}
fn emit_unsize_kind(&mut self, ecx: &e::EncodeContext, uk: &ty::UnsizeKind) {
use serialize::Encoder;
self.emit_enum("UnsizeKind", |this| {
match *uk {
ty::UnsizeLength(len) => {
this.emit_enum_variant("UnsizeLength", 0, 1, |this| {
this.emit_enum_variant_arg(0, |this| len.encode(this))
})
}
ty::UnsizeStruct(box ref uk, idx) => {
this.emit_enum_variant("UnsizeStruct", 1, 2, |this| {
this.emit_enum_variant_arg(0, |this| Ok(this.emit_unsize_kind(ecx, uk)));
this.emit_enum_variant_arg(1, |this| idx.encode(this))
})
}
ty::UnsizeVtable(ty::TyTrait { def_id: def_id,
bounds: ref b,
substs: ref substs },
self_ty) => {
this.emit_enum_variant("UnsizeVtable", 2, 4, |this| {
this.emit_enum_variant_arg(
0, |this| Ok(this.emit_existential_bounds(ecx, b)));
this.emit_enum_variant_arg(1, |this| def_id.encode(this));
this.emit_enum_variant_arg(2, |this| Ok(this.emit_ty(ecx, self_ty)));
this.emit_enum_variant_arg(3, |this| Ok(this.emit_substs(ecx, substs)))
})
}
}
});
}
}
trait write_tag_and_id {
fn tag(&mut self, tag_id: c::astencode_tag, f: |&mut Self|);
fn id(&mut self, id: ast::NodeId);
}
impl<'a> write_tag_and_id for Encoder<'a> {
fn tag(&mut self,
tag_id: c::astencode_tag,
f: |&mut Encoder<'a>|) {
self.start_tag(tag_id as uint);
f(self);
self.end_tag();
}
fn id(&mut self, id: ast::NodeId) {
self.wr_tagged_u64(c::tag_table_id as uint, id as u64);
}
}
struct SideTableEncodingIdVisitor<'a,'b:'a> {
ecx_ptr: *const libc::c_void,
new_rbml_w: &'a mut Encoder<'b>,
}
impl<'a,'b> ast_util::IdVisitingOperation for
SideTableEncodingIdVisitor<'a,'b> {
fn visit_id(&self, id: ast::NodeId) {
// Note: this will cause a copy of rbml_w, which is bad as
// it is mutable. But I believe it's harmless since we generate
// balanced EBML.
//
// FIXME(pcwalton): Don't copy this way.
let mut new_rbml_w = unsafe {
self.new_rbml_w.unsafe_clone()
};
// See above
let ecx: &e::EncodeContext = unsafe {
mem::transmute(self.ecx_ptr)
};
encode_side_tables_for_id(ecx, &mut new_rbml_w, id)
}
}
fn encode_side_tables_for_ii(ecx: &e::EncodeContext,
rbml_w: &mut Encoder,
ii: &ast::InlinedItem) {
rbml_w.start_tag(c::tag_table as uint);
let mut new_rbml_w = unsafe {
rbml_w.unsafe_clone()
};
// Because the ast visitor uses @IdVisitingOperation, I can't pass in
// ecx directly, but /I/ know that it'll be fine since the lifetime is
// tied to the CrateContext that lives throughout this entire section.
ast_util::visit_ids_for_inlined_item(ii, &SideTableEncodingIdVisitor {
ecx_ptr: unsafe {
mem::transmute(ecx)
},
new_rbml_w: &mut new_rbml_w,
});
rbml_w.end_tag();
}
fn encode_side_tables_for_id(ecx: &e::EncodeContext,
rbml_w: &mut Encoder,
id: ast::NodeId) {
let tcx = ecx.tcx;
debug!("Encoding side tables for id {}", id);
for def in tcx.def_map.borrow().find(&id).iter() {
rbml_w.tag(c::tag_table_def, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| (*def).encode(rbml_w).unwrap());
})
}
for &ty in tcx.node_types.borrow().find(&(id as uint)).iter() {
rbml_w.tag(c::tag_table_node_type, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_ty(ecx, *ty);
})
})
}
for &item_substs in tcx.item_substs.borrow().find(&id).iter() {
rbml_w.tag(c::tag_table_item_subst, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_substs(ecx, &item_substs.substs);
})
})
}
for &fv in tcx.freevars.borrow().find(&id).iter() {
rbml_w.tag(c::tag_table_freevars, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_from_vec(fv.as_slice(), |rbml_w, fv_entry| {
Ok(encode_freevar_entry(rbml_w, fv_entry))
});
})
});
for freevar in fv.iter() {
match tcx.capture_mode(id) {
ast::CaptureByRef => {
rbml_w.tag(c::tag_table_upvar_borrow_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
let var_id = freevar.def.def_id().node;
let upvar_id = ty::UpvarId {
var_id: var_id,
closure_expr_id: id
};
let upvar_borrow = tcx.upvar_borrow_map.borrow()
.get_copy(&upvar_id);
var_id.encode(rbml_w);
upvar_borrow.encode(rbml_w);
})
})
}
_ => {}
}
}
}
for &cm in tcx.capture_modes.borrow().find(&id).iter() {
rbml_w.tag(c::tag_table_capture_modes, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
encode_capture_mode(rbml_w, *cm);
})
})
}
let lid = ast::DefId { krate: ast::LOCAL_CRATE, node: id };
for &pty in tcx.tcache.borrow().find(&lid).iter() {
rbml_w.tag(c::tag_table_tcache, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_polytype(ecx, pty.clone());
})
})
}
for &type_param_def in tcx.ty_param_defs.borrow().find(&id).iter() {
rbml_w.tag(c::tag_table_param_defs, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_type_param_def(ecx, type_param_def)
})
})
}
let method_call = MethodCall::expr(id);
for &method in tcx.method_map.borrow().find(&method_call).iter() {
rbml_w.tag(c::tag_table_method_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
encode_method_callee(ecx, rbml_w, method_call.adjustment, method)
})
})
}
for &trait_ref in tcx.object_cast_map.borrow().find(&id).iter() {
rbml_w.tag(c::tag_table_object_cast_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_trait_ref(ecx, &**trait_ref);
})
})
}
for &adjustment in tcx.adjustments.borrow().find(&id).iter() {
match *adjustment {
_ if ty::adjust_is_object(adjustment) => {
let method_call = MethodCall::autoobject(id);
for &method in tcx.method_map.borrow().find(&method_call).iter() {
rbml_w.tag(c::tag_table_method_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
encode_method_callee(ecx, rbml_w, method_call.adjustment, method)
})
})
}
}
ty::AdjustDerefRef(ref adj) => {
assert!(!ty::adjust_is_object(adjustment));
for autoderef in range(0, adj.autoderefs) {
let method_call = MethodCall::autoderef(id, autoderef);
for &method in tcx.method_map.borrow().find(&method_call).iter() {
rbml_w.tag(c::tag_table_method_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
encode_method_callee(ecx, rbml_w,
method_call.adjustment, method)
})
})
}
}
}
_ => {
assert!(!ty::adjust_is_object(adjustment));
}
}
rbml_w.tag(c::tag_table_adjustments, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_auto_adjustment(ecx, adjustment);
})
})
}
for unboxed_closure in tcx.unboxed_closures
.borrow()
.find(&ast_util::local_def(id))
.iter() {
rbml_w.tag(c::tag_table_unboxed_closures, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
rbml_w.emit_closure_type(ecx, &unboxed_closure.closure_type);
encode_unboxed_closure_kind(rbml_w, unboxed_closure.kind)
})
})
}
}
trait doc_decoder_helpers {
fn as_int(&self) -> int;
fn opt_child(&self, tag: c::astencode_tag) -> Option<Self>;
}
impl<'a> doc_decoder_helpers for rbml::Doc<'a> {
fn as_int(&self) -> int { reader::doc_as_u64(*self) as int }
fn opt_child(&self, tag: c::astencode_tag) -> Option<rbml::Doc<'a>> {
reader::maybe_get_doc(*self, tag as uint)
}
}
trait rbml_decoder_decoder_helpers {
fn read_method_origin(&mut self, dcx: &DecodeContext) -> typeck::MethodOrigin;
fn read_ty(&mut self, dcx: &DecodeContext) -> ty::t;
fn read_tys(&mut self, dcx: &DecodeContext) -> Vec<ty::t>;
fn read_trait_ref(&mut self, dcx: &DecodeContext) -> Rc<ty::TraitRef>;
fn read_type_param_def(&mut self, dcx: &DecodeContext)
-> ty::TypeParameterDef;
fn read_polytype(&mut self, dcx: &DecodeContext)
-> ty::Polytype;
fn read_existential_bounds(&mut self, dcx: &DecodeContext) -> ty::ExistentialBounds;
fn read_substs(&mut self, dcx: &DecodeContext) -> subst::Substs;
fn read_auto_adjustment(&mut self, dcx: &DecodeContext) -> ty::AutoAdjustment;
fn read_unboxed_closure(&mut self, dcx: &DecodeContext)
-> ty::UnboxedClosure;
fn read_auto_deref_ref(&mut self, dcx: &DecodeContext) -> ty::AutoDerefRef;
fn read_autoref(&mut self, dcx: &DecodeContext) -> ty::AutoRef;
fn read_unsize_kind(&mut self, dcx: &DecodeContext) -> ty::UnsizeKind;
fn convert_def_id(&mut self,
dcx: &DecodeContext,
source: DefIdSource,
did: ast::DefId)
-> ast::DefId;
// Versions of the type reading functions that don't need the full
// DecodeContext.
fn read_ty_nodcx(&mut self,
tcx: &ty::ctxt, cdata: &cstore::crate_metadata) -> ty::t;
fn read_tys_nodcx(&mut self,
tcx: &ty::ctxt,
cdata: &cstore::crate_metadata) -> Vec<ty::t>;
fn read_substs_nodcx(&mut self, tcx: &ty::ctxt,
cdata: &cstore::crate_metadata)
-> subst::Substs;
}
impl<'a> rbml_decoder_decoder_helpers for reader::Decoder<'a> {
fn read_ty_nodcx(&mut self,
tcx: &ty::ctxt, cdata: &cstore::crate_metadata) -> ty::t {
self.read_opaque(|_, doc| {
Ok(tydecode::parse_ty_data(
doc.data,
cdata.cnum,
doc.start,
tcx,
|_, id| decoder::translate_def_id(cdata, id)))
}).unwrap()
}
fn read_tys_nodcx(&mut self,
tcx: &ty::ctxt,
cdata: &cstore::crate_metadata) -> Vec<ty::t> {
self.read_to_vec(|this| Ok(this.read_ty_nodcx(tcx, cdata)) )
.unwrap()
.into_iter()
.collect()
}
fn read_substs_nodcx(&mut self,
tcx: &ty::ctxt,
cdata: &cstore::crate_metadata)
-> subst::Substs
{
self.read_opaque(|_, doc| {
Ok(tydecode::parse_substs_data(
doc.data,
cdata.cnum,
doc.start,
tcx,
|_, id| decoder::translate_def_id(cdata, id)))
}).unwrap()
}
fn read_method_origin(&mut self, dcx: &DecodeContext)
-> typeck::MethodOrigin
{
self.read_enum("MethodOrigin", |this| {
let variants = ["MethodStatic", "MethodStaticUnboxedClosure",
"MethodTypeParam", "MethodTraitObject"];
this.read_enum_variant(variants, |this, i| {
Ok(match i {
0 => {
let def_id = this.read_def_id(dcx);
typeck::MethodStatic(def_id)
}
1 => {
let def_id = this.read_def_id(dcx);
typeck::MethodStaticUnboxedClosure(def_id)
}
2 => {
this.read_struct("MethodTypeParam", 2, |this| {
Ok(typeck::MethodTypeParam(
typeck::MethodParam {
trait_ref: {
this.read_struct_field("trait_ref", 0, |this| {
Ok(this.read_trait_ref(dcx))
}).unwrap()
},
method_num: {
this.read_struct_field("method_num", 1, |this| {
this.read_uint()
}).unwrap()
}
}))
}).unwrap()
}
3 => {
this.read_struct("MethodTraitObject", 2, |this| {
Ok(typeck::MethodTraitObject(
typeck::MethodObject {
trait_ref: {
this.read_struct_field("trait_ref", 0, |this| {
Ok(this.read_trait_ref(dcx))
}).unwrap()
},
object_trait_id: {
this.read_struct_field("object_trait_id", 1, |this| {
Ok(this.read_def_id(dcx))
}).unwrap()
},
method_num: {
this.read_struct_field("method_num", 2, |this| {
this.read_uint()
}).unwrap()
},
real_index: {
this.read_struct_field("real_index", 3, |this| {
this.read_uint()
}).unwrap()
},
}))
}).unwrap()
}
_ => fail!("..")
})
})
}).unwrap()
}
fn read_ty(&mut self, dcx: &DecodeContext) -> ty::t {
// Note: regions types embed local node ids. In principle, we
// should translate these node ids into the new decode
// context. However, we do not bother, because region types
// are not used during trans.
return self.read_opaque(|this, doc| {
debug!("read_ty({})", type_string(doc));
let ty = tydecode::parse_ty_data(
doc.data,
dcx.cdata.cnum,
doc.start,
dcx.tcx,
|s, a| this.convert_def_id(dcx, s, a));
Ok(ty)
}).unwrap();
fn type_string(doc: rbml::Doc) -> String {
let mut str = String::new();
for i in range(doc.start, doc.end) {
str.push(doc.data[i] as char);
}
str
}
}
fn read_tys(&mut self, dcx: &DecodeContext) -> Vec<ty::t> {
self.read_to_vec(|this| Ok(this.read_ty(dcx))).unwrap().into_iter().collect()
}
fn read_trait_ref(&mut self, dcx: &DecodeContext) -> Rc<ty::TraitRef> {
Rc::new(self.read_opaque(|this, doc| {
let ty = tydecode::parse_trait_ref_data(
doc.data,
dcx.cdata.cnum,
doc.start,
dcx.tcx,
|s, a| this.convert_def_id(dcx, s, a));
Ok(ty)
}).unwrap())
}
fn read_type_param_def(&mut self, dcx: &DecodeContext)
-> ty::TypeParameterDef {
self.read_opaque(|this, doc| {
Ok(tydecode::parse_type_param_def_data(
doc.data,
doc.start,
dcx.cdata.cnum,
dcx.tcx,
|s, a| this.convert_def_id(dcx, s, a)))
}).unwrap()
}
fn read_polytype(&mut self, dcx: &DecodeContext)
-> ty::Polytype {
self.read_struct("Polytype", 2, |this| {
Ok(ty::Polytype {
generics: this.read_struct_field("generics", 0, |this| {
this.read_struct("Generics", 2, |this| {
Ok(ty::Generics {
types:
this.read_struct_field("types", 0, |this| {
Ok(this.read_vec_per_param_space(
|this| this.read_type_param_def(dcx)))
}).unwrap(),
regions:
this.read_struct_field("regions", 1, |this| {
Ok(this.read_vec_per_param_space(
|this| Decodable::decode(this).unwrap()))
}).unwrap()
})
})
}).unwrap(),
ty: this.read_struct_field("ty", 1, |this| {
Ok(this.read_ty(dcx))
}).unwrap()
})
}).unwrap()
}
fn read_existential_bounds(&mut self, dcx: &DecodeContext) -> ty::ExistentialBounds
{
self.read_opaque(|this, doc| {
Ok(tydecode::parse_existential_bounds_data(doc.data,
dcx.cdata.cnum,
doc.start,
dcx.tcx,
|s, a| this.convert_def_id(dcx, s, a)))
}).unwrap()
}
fn read_substs(&mut self, dcx: &DecodeContext) -> subst::Substs {
self.read_opaque(|this, doc| {
Ok(tydecode::parse_substs_data(doc.data,
dcx.cdata.cnum,
doc.start,
dcx.tcx,
|s, a| this.convert_def_id(dcx, s, a)))
}).unwrap()
}
fn read_auto_adjustment(&mut self, dcx: &DecodeContext) -> ty::AutoAdjustment {
self.read_enum("AutoAdjustment", |this| {
let variants = ["AutoAddEnv", "AutoDerefRef"];
this.read_enum_variant(variants, |this, i| {
Ok(match i {
0 => {
let store: ty::TraitStore =
this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap();
ty::AdjustAddEnv(store.tr(dcx))
}
1 => {
let auto_deref_ref: ty::AutoDerefRef =
this.read_enum_variant_arg(0,
|this| Ok(this.read_auto_deref_ref(dcx))).unwrap();
ty::AdjustDerefRef(auto_deref_ref)
}
_ => fail!("bad enum variant for ty::AutoAdjustment")
})
})
}).unwrap()
}
fn read_auto_deref_ref(&mut self, dcx: &DecodeContext) -> ty::AutoDerefRef {
self.read_struct("AutoDerefRef", 2, |this| {
Ok(ty::AutoDerefRef {
autoderefs: this.read_struct_field("autoderefs", 0, |this| {
Decodable::decode(this)
}).unwrap(),
autoref: this.read_struct_field("autoref", 1, |this| {
this.read_option(|this, b| {
if b {
Ok(Some(this.read_autoref(dcx)))
} else {
Ok(None)
}
})
}).unwrap(),
})
}).unwrap()
}
fn read_autoref(&mut self, dcx: &DecodeContext) -> ty::AutoRef {
self.read_enum("AutoRef", |this| {
let variants = ["AutoPtr",
"AutoUnsize",
"AutoUnsizeUniq",
"AutoUnsafe"];
this.read_enum_variant(variants, |this, i| {
Ok(match i {
0 => {
let r: ty::Region =
this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap();
let m: ast::Mutability =
this.read_enum_variant_arg(1, |this| Decodable::decode(this)).unwrap();
let a: Option<Box<ty::AutoRef>> =
this.read_enum_variant_arg(2, |this| this.read_option(|this, b| {
if b {
Ok(Some(box this.read_autoref(dcx)))
} else {
Ok(None)
}
})).unwrap();
ty::AutoPtr(r.tr(dcx), m, a)
}
1 => {
let uk: ty::UnsizeKind =
this.read_enum_variant_arg(0,
|this| Ok(this.read_unsize_kind(dcx))).unwrap();
ty::AutoUnsize(uk)
}
2 => {
let uk: ty::UnsizeKind =
this.read_enum_variant_arg(0,
|this| Ok(this.read_unsize_kind(dcx))).unwrap();
ty::AutoUnsizeUniq(uk)
}
3 => {
let m: ast::Mutability =
this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap();
let a: Option<Box<ty::AutoRef>> =
this.read_enum_variant_arg(1, |this| this.read_option(|this, b| {
if b {
Ok(Some(box this.read_autoref(dcx)))
} else {
Ok(None)
}
})).unwrap();
ty::AutoUnsafe(m, a)
}
_ => fail!("bad enum variant for ty::AutoRef")
})
})
}).unwrap()
}
fn read_unsize_kind(&mut self, dcx: &DecodeContext) -> ty::UnsizeKind {
self.read_enum("UnsizeKind", |this| {
let variants = ["UnsizeLength", "UnsizeStruct", "UnsizeVtable"];
this.read_enum_variant(variants, |this, i| {
Ok(match i {
0 => {
let len: uint =
this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap();
ty::UnsizeLength(len)
}
1 => {
let uk: ty::UnsizeKind =
this.read_enum_variant_arg(0,
|this| Ok(this.read_unsize_kind(dcx))).unwrap();
let idx: uint =
this.read_enum_variant_arg(1, |this| Decodable::decode(this)).unwrap();
ty::UnsizeStruct(box uk, idx)
}
2 => {
let b =
this.read_enum_variant_arg(
0, |this| Ok(this.read_existential_bounds(dcx))).unwrap();
let def_id: ast::DefId =
this.read_enum_variant_arg(1, |this| Decodable::decode(this)).unwrap();
let self_ty =
this.read_enum_variant_arg(2, |this| Ok(this.read_ty(dcx))).unwrap();
let substs = this.read_enum_variant_arg(3,
|this| Ok(this.read_substs(dcx))).unwrap();
let ty_trait = ty::TyTrait { def_id: def_id.tr(dcx),
bounds: b,
substs: substs };
ty::UnsizeVtable(ty_trait, self_ty)
}
_ => fail!("bad enum variant for ty::UnsizeKind")
})
})
}).unwrap()
}
fn read_unboxed_closure(&mut self, dcx: &DecodeContext)
-> ty::UnboxedClosure {
let closure_type = self.read_opaque(|this, doc| {
Ok(tydecode::parse_ty_closure_data(
doc.data,
dcx.cdata.cnum,
doc.start,
dcx.tcx,
|s, a| this.convert_def_id(dcx, s, a)))
}).unwrap();
let variants = [
"FnUnboxedClosureKind",
"FnMutUnboxedClosureKind",
"FnOnceUnboxedClosureKind"
];
let kind = self.read_enum_variant(variants, |_, i| {
Ok(match i {
0 => ty::FnUnboxedClosureKind,
1 => ty::FnMutUnboxedClosureKind,
2 => ty::FnOnceUnboxedClosureKind,
_ => fail!("bad enum variant for ty::UnboxedClosureKind"),
})
}).unwrap();
ty::UnboxedClosure {
closure_type: closure_type,
kind: kind,
}
}
fn convert_def_id(&mut self,
dcx: &DecodeContext,
source: tydecode::DefIdSource,
did: ast::DefId)
-> ast::DefId {
/*!
* Converts a def-id that appears in a type. The correct
* translation will depend on what kind of def-id this is.
* This is a subtle point: type definitions are not
* inlined into the current crate, so if the def-id names
* a nominal type or type alias, then it should be
* translated to refer to the source crate.
*
* However, *type parameters* are cloned along with the function
* they are attached to. So we should translate those def-ids
* to refer to the new, cloned copy of the type parameter.
* We only see references to free type parameters in the body of
* an inlined function. In such cases, we need the def-id to
* be a local id so that the TypeContents code is able to lookup
* the relevant info in the ty_param_defs table.
*
* *Region parameters*, unfortunately, are another kettle of fish.
* In such cases, def_id's can appear in types to distinguish
* shadowed bound regions and so forth. It doesn't actually
* matter so much what we do to these, since regions are erased
* at trans time, but it's good to keep them consistent just in
* case. We translate them with `tr_def_id()` which will map
* the crate numbers back to the original source crate.
*
* It'd be really nice to refactor the type repr to not include
* def-ids so that all these distinctions were unnecessary.
*/
let r = match source {
NominalType | TypeWithId | RegionParameter => dcx.tr_def_id(did),
TypeParameter => dcx.tr_intern_def_id(did)
};
debug!("convert_def_id(source={}, did={})={}", source, did, r);
return r;
}
}
fn decode_side_tables(dcx: &DecodeContext,
ast_doc: rbml::Doc) {
let tbl_doc = ast_doc.get(c::tag_table as uint);
reader::docs(tbl_doc, |tag, entry_doc| {
let id0 = entry_doc.get(c::tag_table_id as uint).as_int();
let id = dcx.tr_id(id0 as ast::NodeId);
debug!(">> Side table document with tag 0x{:x} \
found for id {} (orig {})",
tag, id, id0);
match c::astencode_tag::from_uint(tag) {
None => {
dcx.tcx.sess.bug(
format!("unknown tag found in side tables: {:x}",
tag).as_slice());
}
Some(value) => {
let val_doc = entry_doc.get(c::tag_table_val as uint);
let mut val_dsr = reader::Decoder::new(val_doc);
let val_dsr = &mut val_dsr;
match value {
c::tag_table_def => {
let def = decode_def(dcx, val_doc);
dcx.tcx.def_map.borrow_mut().insert(id, def);
}
c::tag_table_node_type => {
let ty = val_dsr.read_ty(dcx);
debug!("inserting ty for node {}: {}",
id, ty_to_string(dcx.tcx, ty));
dcx.tcx.node_types.borrow_mut().insert(id as uint, ty);
}
c::tag_table_item_subst => {
let item_substs = ty::ItemSubsts {
substs: val_dsr.read_substs(dcx)
};
dcx.tcx.item_substs.borrow_mut().insert(
id, item_substs);
}
c::tag_table_freevars => {
let fv_info = val_dsr.read_to_vec(|val_dsr| {
Ok(val_dsr.read_freevar_entry(dcx))
}).unwrap().into_iter().collect();
dcx.tcx.freevars.borrow_mut().insert(id, fv_info);
}
c::tag_table_upvar_borrow_map => {
let var_id: ast::NodeId = Decodable::decode(val_dsr).unwrap();
let upvar_id = ty::UpvarId {
var_id: dcx.tr_id(var_id),
closure_expr_id: id
};
let ub: ty::UpvarBorrow = Decodable::decode(val_dsr).unwrap();
dcx.tcx.upvar_borrow_map.borrow_mut().insert(upvar_id, ub.tr(dcx));
}
c::tag_table_capture_modes => {
let capture_mode = val_dsr.read_capture_mode();
dcx.tcx
.capture_modes
.borrow_mut()
.insert(id, capture_mode);
}
c::tag_table_tcache => {
let pty = val_dsr.read_polytype(dcx);
let lid = ast::DefId { krate: ast::LOCAL_CRATE, node: id };
dcx.tcx.tcache.borrow_mut().insert(lid, pty);
}
c::tag_table_param_defs => {
let bounds = val_dsr.read_type_param_def(dcx);
dcx.tcx.ty_param_defs.borrow_mut().insert(id, bounds);
}
c::tag_table_method_map => {
let (adjustment, method) = val_dsr.read_method_callee(dcx);
let method_call = MethodCall {
expr_id: id,
adjustment: adjustment
};
dcx.tcx.method_map.borrow_mut().insert(method_call, method);
}
c::tag_table_object_cast_map => {
let trait_ref = val_dsr.read_trait_ref(dcx);
dcx.tcx.object_cast_map.borrow_mut()
.insert(id, trait_ref);
}
c::tag_table_adjustments => {
let adj: ty::AutoAdjustment = val_dsr.read_auto_adjustment(dcx);
dcx.tcx.adjustments.borrow_mut().insert(id, adj);
}
c::tag_table_unboxed_closures => {
let unboxed_closure =
val_dsr.read_unboxed_closure(dcx);
dcx.tcx
.unboxed_closures
.borrow_mut()
.insert(ast_util::local_def(id),
unboxed_closure);
}
_ => {
dcx.tcx.sess.bug(
format!("unknown tag found in side tables: {:x}",
tag).as_slice());
}
}
}
}
debug!(">< Side table doc loaded");
true
});
}
// ______________________________________________________________________
// Testing of astencode_gen
#[cfg(test)]
fn encode_item_ast(rbml_w: &mut Encoder, item: &ast::Item) {
rbml_w.start_tag(c::tag_tree as uint);
(*item).encode(rbml_w);
rbml_w.end_tag();
}
#[cfg(test)]
fn decode_item_ast(par_doc: rbml::Doc) -> ast::Item {
let chi_doc = par_doc.get(c::tag_tree as uint);
let mut d = reader::Decoder::new(chi_doc);
Decodable::decode(&mut d).unwrap()
}
#[cfg(test)]
trait fake_ext_ctxt {
fn cfg(&self) -> ast::CrateConfig;
fn parse_sess<'a>(&'a self) -> &'a parse::ParseSess;
fn call_site(&self) -> Span;
fn ident_of(&self, st: &str) -> ast::Ident;
}
#[cfg(test)]
impl fake_ext_ctxt for parse::ParseSess {
fn cfg(&self) -> ast::CrateConfig {
Vec::new()
}
fn parse_sess<'a>(&'a self) -> &'a parse::ParseSess { self }
fn call_site(&self) -> Span {
codemap::Span {
lo: codemap::BytePos(0),
hi: codemap::BytePos(0),
expn_id: codemap::NO_EXPANSION
}
}
fn ident_of(&self, st: &str) -> ast::Ident {
token::str_to_ident(st)
}
}
#[cfg(test)]
fn mk_ctxt() -> parse::ParseSess {
parse::new_parse_sess()
}
#[cfg(test)]
fn roundtrip(in_item: Option<P<ast::Item>>) {
let in_item = in_item.unwrap();
let mut wr = SeekableMemWriter::new();
encode_item_ast(&mut writer::Encoder::new(&mut wr), &*in_item);
let rbml_doc = rbml::Doc::new(wr.get_ref());
let out_item = decode_item_ast(rbml_doc);
assert!(*in_item == out_item);
}
#[test]
fn test_basic() {
let cx = mk_ctxt();
roundtrip(quote_item!(&cx,
fn foo() {}
));
}
/* NOTE: When there's a snapshot, update this (yay quasiquoter!)
#[test]
fn test_smalltalk() {
let cx = mk_ctxt();
roundtrip(quote_item!(&cx,
fn foo() -> int { 3 + 4 } // first smalltalk program ever executed.
));
}
*/
#[test]
fn test_more() {
let cx = mk_ctxt();
roundtrip(quote_item!(&cx,
fn foo(x: uint, y: uint) -> uint {
let z = x + y;
return z;
}
));
}
#[test]
fn test_simplification() {
let cx = mk_ctxt();
let item = quote_item!(&cx,
fn new_int_alist<B>() -> alist<int, B> {
fn eq_int(a: int, b: int) -> bool { a == b }
return alist {eq_fn: eq_int, data: Vec::new()};
}
).unwrap();
let item_in = e::IIItemRef(&*item);
let item_out = simplify_ast(item_in);
let item_exp = ast::IIItem(quote_item!(&cx,
fn new_int_alist<B>() -> alist<int, B> {
return alist {eq_fn: eq_int, data: Vec::new()};
}
).unwrap());
match (item_out, item_exp) {
(ast::IIItem(item_out), ast::IIItem(item_exp)) => {
assert!(pprust::item_to_string(&*item_out) ==
pprust::item_to_string(&*item_exp));
}
_ => fail!()
}
}
| {
tyencode::ctxt {
diag: self.tcx.sess.diagnostic(),
ds: e::def_to_string,
tcx: self.tcx,
abbrevs: &self.type_abbrevs
}
} |
client_test.go | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/tikvpb"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
)
func | (t *testing.T) {
CustomVerboseFlag = true
TestingT(t)
}
type testClientSuite struct {
OneByOneSuite
}
type testClientSerialSuite struct {
OneByOneSuite
}
var _ = Suite(&testClientSuite{})
var _ = SerialSuites(&testClientFailSuite{})
var _ = SerialSuites(&testClientSerialSuite{})
func setMaxBatchSize(size uint) {
newConf := config.NewConfig()
newConf.TiKVClient.MaxBatchSize = size
config.StoreGlobalConfig(newConf)
}
func (s *testClientSerialSuite) TestConn(c *C) {
maxBatchSize := config.GetGlobalConfig().TiKVClient.MaxBatchSize
setMaxBatchSize(0)
client := newRPCClient(config.Security{})
addr := "127.0.0.1:6379"
conn1, err := client.getConnArray(addr, true)
c.Assert(err, IsNil)
conn2, err := client.getConnArray(addr, true)
c.Assert(err, IsNil)
c.Assert(conn2.Get(), Not(Equals), conn1.Get())
client.Close()
conn3, err := client.getConnArray(addr, true)
c.Assert(err, NotNil)
c.Assert(conn3, IsNil)
setMaxBatchSize(maxBatchSize)
}
func (s *testClientSuite) TestRemoveCanceledRequests(c *C) {
req := new(tikvpb.BatchCommandsRequest_Request)
entries := []*batchCommandsEntry{
{canceled: 1, req: req},
{canceled: 0, req: req},
{canceled: 1, req: req},
{canceled: 1, req: req},
{canceled: 0, req: req},
}
entryPtr := &entries[0]
requests := make([]*tikvpb.BatchCommandsRequest_Request, len(entries))
for i := range entries {
requests[i] = entries[i].req
}
entries, requests = removeCanceledRequests(entries, requests)
c.Assert(len(entries), Equals, 2)
for _, e := range entries {
c.Assert(e.isCanceled(), IsFalse)
}
c.Assert(len(requests), Equals, 2)
newEntryPtr := &entries[0]
c.Assert(entryPtr, Equals, newEntryPtr)
}
func (s *testClientSuite) TestCancelTimeoutRetErr(c *C) {
req := new(tikvpb.BatchCommandsRequest_Request)
a := newBatchConn(1, 1, nil)
ctx, cancel := context.WithCancel(context.TODO())
cancel()
_, err := sendBatchRequest(ctx, "", a, req, 2*time.Second)
c.Assert(errors.Cause(err), Equals, context.Canceled)
_, err = sendBatchRequest(context.Background(), "", a, req, 0)
c.Assert(errors.Cause(err), Equals, context.DeadlineExceeded)
}
func (s *testClientSuite) TestSendWhenReconnect(c *C) {
server, port := startMockTikvService()
c.Assert(port > 0, IsTrue)
rpcClient := newRPCClient(config.Security{})
addr := fmt.Sprintf("%s:%d", "127.0.0.1", port)
conn, err := rpcClient.getConnArray(addr, true)
c.Assert(err, IsNil)
// Suppose all connections are re-establishing.
for _, client := range conn.batchConn.batchCommandsClients {
client.lockForRecreate()
}
req := tikvrpc.NewRequest(tikvrpc.CmdEmpty, &tikvpb.BatchCommandsEmptyRequest{})
_, err = rpcClient.SendRequest(context.Background(), addr, req, 100*time.Second)
c.Assert(err.Error() == "no available connections", IsTrue)
conn.Close()
server.Stop()
}
func (s *testClientSuite) TestIdleHeartbeat(c *C) {
server, port := startMockTikvService()
c.Assert(port > 0, IsTrue)
defer server.Stop()
rpcClient := newRPCClient(config.Security{})
addr := fmt.Sprintf("%s:%d", "127.0.0.1", port)
conn, err := rpcClient.getConnArray(addr, true)
c.Assert(err, IsNil)
sendIdleReq := "github.com/pingcap/tidb/store/tikv/sendIdleHeartbeatReq"
noStripResp := "github.com/pingcap/tidb/store/tikv/forceReturnIdleHeartbeatResp"
noAvConn := "github.com/pingcap/tidb/store/tikv/noAvConn"
failBeforeSend := "github.com/pingcap/tidb/store/tikv/failBeforeSend"
c.Assert(failpoint.Enable(sendIdleReq, `return()`), IsNil)
c.Assert(failpoint.Enable(noStripResp, `return()`), IsNil)
c.Assert(failpoint.Enable(noAvConn, `return()`), IsNil)
c.Assert(failpoint.Enable(failBeforeSend, `return()`), IsNil)
defer func() {
c.Assert(failpoint.Disable(sendIdleReq), IsNil)
c.Assert(failpoint.Disable(noStripResp), IsNil)
c.Assert(failpoint.Disable(noAvConn), IsNil)
c.Assert(failpoint.Disable(failBeforeSend), IsNil)
}()
// 1. test trigger idle heartbeat and return success by a live store.
ctx := failpoint.WithHook(context.TODO(), func(ctx context.Context, fpname string) bool {
if fpname == sendIdleReq || fpname == noStripResp {
return true
}
return false
})
req := tikvrpc.NewRequest(tikvrpc.CmdEmpty, &tikvpb.BatchCommandsEmptyRequest{}).ToBatchCommandsRequest()
_, err = sendBatchRequest(ctx, addr, conn.batchConn, req, 100*time.Second)
c.Assert(err, IsNil)
// 2. test trigger idle heartbeat and cannot found any conn.
ctx = failpoint.WithHook(context.TODO(), func(ctx context.Context, fpname string) bool {
if fpname == sendIdleReq || fpname == noStripResp || fpname == noAvConn {
return true
}
return false
})
var dieNode []string
rpcClient.dieEventListener = func(addr []string) {
dieNode = append(dieNode, addr...)
}
_, err = sendBatchRequest(ctx, addr, conn.batchConn, req, 100*time.Second)
c.Assert(err, NotNil) // no available connections
c.Assert(conn.batchConn.isDie(), IsTrue)
c.Assert(atomic.LoadUint32(conn.batchConn.dieNotify), Equals, uint32(1))
rpcClient.recycleDieConnArray()
c.Assert(len(dieNode), Equals, 1)
c.Assert(dieNode[0], Equals, addr)
// 3. test trigger idle heartbeat and send fail before send.
conn, err = rpcClient.getConnArray(addr, true)
c.Assert(err, IsNil)
ctx = failpoint.WithHook(context.TODO(), func(ctx context.Context, fpname string) bool {
if fpname == sendIdleReq || fpname == noStripResp || fpname == failBeforeSend {
return true
}
return false
})
dieNode = dieNode[:0]
rpcClient.dieEventListener = func(addr []string) {
dieNode = append(dieNode, addr...)
}
_, err = sendBatchRequest(ctx, addr, conn.batchConn, req, 100*time.Second)
c.Assert(err, NotNil) // no available connections
c.Assert(conn.batchConn.isDie(), IsTrue)
c.Assert(atomic.LoadUint32(conn.batchConn.dieNotify), Equals, uint32(1))
rpcClient.recycleDieConnArray()
c.Assert(len(dieNode), Greater, 0)
c.Assert(dieNode[0], Equals, addr)
rpcClient.recycleDieConnArray()
c.Assert(len(dieNode), Equals, 1)
c.Assert(dieNode[0], Equals, addr)
}
| TestT |
response.controller.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) AIDITTO AB. All rights reserved.
* Licensed under the MIT License. See LICENSE in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import {authenticate} from "@loopback/authentication";
import {service} from "@loopback/core";
import {Count, CountSchema, Filter, FilterExcludingWhere, Where} from "@loopback/repository";
import {del, get, getModelSchemaRef, param, patch, post, put, requestBody} from "@loopback/rest";
import {Response} from "../models";
import {ResponseService} from "../services";
@authenticate("jwt")
export class | {
constructor(
@service(ResponseService)
public resopnseService: ResponseService
) {}
@authenticate.skip()
@post("/responses", {
responses: {
"200": {
description: "Response model instance",
content: {"application/json": {schema: getModelSchemaRef(Response)}},
},
},
})
async create(
@requestBody({
content: {
"application/json": {
schema: getModelSchemaRef(Response, {
title: "NewResponse",
exclude: ["id"],
}),
},
},
})
response: Omit<Response, "id">
): Promise<Response> {
return this.resopnseService.create(response);
}
@get("/responses/count", {
responses: {
"200": {
description: "Response model count",
content: {"application/json": {schema: CountSchema}},
},
},
})
async count(@param.where(Response) where?: Where<Response>): Promise<Count> {
return this.resopnseService.count(where);
}
@get("/responses", {
responses: {
"200": {
description: "Array of Response model instances",
content: {
"application/json": {
schema: {
type: "array",
items: getModelSchemaRef(Response, {includeRelations: true}),
},
},
},
},
},
})
async find(@param.filter(Response) filter?: Filter<Response>): Promise<Response[]> {
return this.resopnseService.find(filter);
}
@authenticate.skip()
@patch("/responses", {
responses: {
"200": {
description: "Response PATCH success count",
content: {"application/json": {schema: CountSchema}},
},
},
})
async updateAll(
@requestBody({
content: {
"application/json": {
schema: getModelSchemaRef(Response, {partial: true}),
},
},
})
response: Response,
@param.where(Response) where?: Where<Response>
): Promise<Count> {
return this.resopnseService.updateAll(response, where);
}
@get("/responses/{id}", {
responses: {
"200": {
description: "Response model instance",
content: {
"application/json": {
schema: getModelSchemaRef(Response, {includeRelations: true}),
},
},
},
},
})
async findById(
@param.path.number("id") id: number,
@param.filter(Response, {exclude: "where"}) filter?: FilterExcludingWhere<Response>
): Promise<Response> {
return this.resopnseService.findById(id, filter);
}
@patch("/responses/{id}", {
responses: {
"204": {
description: "Response PATCH success",
},
},
})
async updateById(
@param.path.number("id") id: number,
@requestBody({
content: {
"application/json": {
schema: getModelSchemaRef(Response, {partial: true}),
},
},
})
response: Response
): Promise<void> {
await this.resopnseService.updateById(id, response);
}
@put("/responses/{id}", {
responses: {
"204": {
description: "Response PUT success",
},
},
})
async replaceById(@param.path.number("id") id: number, @requestBody() response: Response): Promise<void> {
await this.resopnseService.replaceById(id, response);
}
@del("/responses/{id}", {
responses: {
"204": {
description: "Response DELETE success",
},
},
})
async deleteById(@param.path.number("id") id: number): Promise<void> {
await this.resopnseService.deleteById(id);
}
}
| ResponseController |
subscriptions.go | package cockroach
import (
"context"
"database/sql"
"fmt"
"github.com/steeling/InterUSS-Platform/pkg/dss/models"
dsserr "github.com/steeling/InterUSS-Platform/pkg/errors"
"github.com/golang/geo/s2"
"github.com/lib/pq"
"go.uber.org/multierr"
"go.uber.org/zap"
)
const (
// Defined in requirement DSS0030.
maxSubscriptionsPerArea = 10
)
var subscriptionFields = "subscriptions.id, subscriptions.owner, subscriptions.url, subscriptions.notification_index, subscriptions.starts_at, subscriptions.ends_at, subscriptions.updated_at"
var subscriptionFieldsWithoutPrefix = "id, owner, url, notification_index, starts_at, ends_at, updated_at"
func (c *Store) fetchSubscriptions(ctx context.Context, q queryable, query string, args ...interface{}) ([]*models.Subscription, error) {
rows, err := q.QueryContext(ctx, query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
var payload []*models.Subscription
for rows.Next() {
s := new(models.Subscription)
err := rows.Scan(
&s.ID,
&s.Owner,
&s.Url,
&s.NotificationIndex,
&s.StartTime,
&s.EndTime,
&s.Version,
)
if err != nil {
return nil, err
}
payload = append(payload, s)
}
if err := rows.Err(); err != nil {
return nil, err
}
return payload, nil
}
func (c *Store) fetchSubscriptionsByCellsWithoutOwner(ctx context.Context, q queryable, cells []int64, owner models.Owner) ([]*models.Subscription, error) {
var subscriptionsQuery = fmt.Sprintf(`
SELECT
%s
FROM
subscriptions
LEFT JOIN
(SELECT DISTINCT subscription_id FROM cells_subscriptions WHERE cell_id = ANY($1))
AS
unique_subscription_ids
ON
subscriptions.id = unique_subscription_ids.subscription_id
WHERE
subscriptions.owner != $2`, subscriptionFields)
return c.fetchSubscriptions(ctx, q, subscriptionsQuery, pq.Array(cells), owner)
}
func (c *Store) fetchSubscription(ctx context.Context, q queryable, query string, args ...interface{}) (*models.Subscription, error) {
subs, err := c.fetchSubscriptions(ctx, q, query, args...)
if err != nil {
return nil, err
}
if len(subs) > 1 {
return nil, multierr.Combine(err, fmt.Errorf("query returned %d subscriptions", len(subs)))
}
if len(subs) == 0 {
return nil, sql.ErrNoRows
}
return subs[0], nil
}
func (c *Store) fetchSubscriptionByID(ctx context.Context, q queryable, id models.ID) (*models.Subscription, error) {
var query = fmt.Sprintf(`SELECT %s FROM subscriptions WHERE id = $1`, subscriptionFields)
return c.fetchSubscription(ctx, q, query, id)
}
func (c *Store) fetchSubscriptionByIDAndOwner(ctx context.Context, q queryable, id models.ID, owner models.Owner) (*models.Subscription, error) {
var query = fmt.Sprintf(`
SELECT %s FROM
subscriptions
WHERE
id = $1
AND owner = $2`, subscriptionFields)
return c.fetchSubscription(ctx, q, query, id, owner)
}
// fetchMaxSubscriptionCountByCellAndOwner counts how many subscriptions the
// owner has in each one of these cells, and returns the number of subscriptions
// in the cell with the highest number of subscriptions.
func (c *Store) fetchMaxSubscriptionCountByCellAndOwner(
ctx context.Context, q queryable, cells s2.CellUnion, owner models.Owner) (int, error) {
var query = `
SELECT
MAX(subscriptions_per_cell_id)
FROM (
SELECT
COUNT(*) AS subscriptions_per_cell_id
FROM
subscriptions AS s,
cells_subscriptions as c
WHERE
s.id = c.subscription_id AND
s.owner = $1 AND
c.cell_id = ANY($2)
GROUP BY c.cell_id
)`
cids := make([]int64, len(cells))
for i, cell := range cells {
cids[i] = int64(cell)
}
row := q.QueryRowContext(ctx, query, owner, pq.Array(cids))
var ret int
err := row.Scan(&ret)
return ret, err
}
func (c *Store) pushSubscription(ctx context.Context, q queryable, s *models.Subscription) (*models.Subscription, error) {
var (
upsertQuery = fmt.Sprintf(`
UPSERT INTO
subscriptions
(%s)
VALUES
($1, $2, $3, $4, $5, $6, transaction_timestamp())
RETURNING
%s`, subscriptionFieldsWithoutPrefix, subscriptionFields)
subscriptionCellQuery = `
UPSERT INTO
cells_subscriptions
(cell_id, cell_level, subscription_id)
VALUES
($1, $2, $3)
`
deleteLeftOverCellsForSubscriptionQuery = `
DELETE FROM
cells_subscriptions
WHERE
cell_id != ALL($1)
AND
subscription_id = $2`
)
cids := make([]int64, len(s.Cells))
clevels := make([]int, len(s.Cells))
for i, cell := range s.Cells {
cids[i] = int64(cell)
clevels[i] = cell.Level()
}
cells := s.Cells
s, err := c.fetchSubscription(ctx, q, upsertQuery,
s.ID,
s.Owner,
s.Url,
s.NotificationIndex,
s.StartTime,
s.EndTime)
if err != nil {
return nil, err
}
s.Cells = cells
for i := range cids {
if _, err := q.ExecContext(ctx, subscriptionCellQuery, cids[i], clevels[i], s.ID); err != nil {
return nil, err
}
}
if _, err := q.ExecContext(ctx, deleteLeftOverCellsForSubscriptionQuery, pq.Array(cids), s.ID); err != nil {
return nil, err
}
return s, nil
} | }
// Insert inserts subscription into the store and returns
// the resulting subscription including its ID.
func (c *Store) InsertSubscription(ctx context.Context, s models.Subscription) (*models.Subscription, error) {
tx, err := c.Begin()
if err != nil {
return nil, err
}
old, err := c.fetchSubscriptionByID(ctx, tx, s.ID)
switch {
case err == sql.ErrNoRows:
break
case err != nil:
return nil, multierr.Combine(err, tx.Rollback())
}
switch {
case old == nil && !s.Version.Empty():
// The user wants to update an existing subscription, but one wasn't found.
return nil, multierr.Combine(dsserr.NotFound(s.ID.String()), tx.Rollback())
case old != nil && s.Version.Empty():
// The user wants to create a new subscription but it already exists.
return nil, multierr.Combine(dsserr.AlreadyExists(s.ID.String()), tx.Rollback())
case old != nil && !s.Version.Matches(old.Version):
// The user wants to update a subscription but the version doesn't match.
return nil, multierr.Combine(dsserr.VersionMismatch("old version"), tx.Rollback())
}
// Validate and perhaps correct StartTime and EndTime.
if err := s.AdjustTimeRange(c.clock.Now(), old); err != nil {
return nil, multierr.Combine(err, tx.Rollback())
}
// Check the user hasn't created too many subscriptions in this area.
if old == nil {
count, err := c.fetchMaxSubscriptionCountByCellAndOwner(ctx, tx, s.Cells, s.Owner)
if err != nil {
c.logger.Warn("Error fetching max subscription count", zap.Error(err))
} else if count >= maxSubscriptionsPerArea {
return nil, multierr.Combine(dsserr.Exhausted(
"too many existing subscriptions in this area"), tx.Rollback())
}
}
newSubscription, err := c.pushSubscription(ctx, tx, &s)
if err != nil {
return nil, err
}
if err := tx.Commit(); err != nil {
return nil, err
}
return newSubscription, nil
}
// DeleteSubscription deletes the subscription identified by "id" and
// returns the deleted subscription.
func (c *Store) DeleteSubscription(ctx context.Context, id models.ID, owner models.Owner, version *models.Version) (*models.Subscription, error) {
const (
query = `
DELETE FROM
subscriptions
WHERE
id = $1
AND owner = $2`
)
tx, err := c.Begin()
if err != nil {
return nil, err
}
// We fetch to know whether to return a concurrency error, or a not found error
old, err := c.fetchSubscriptionByIDAndOwner(ctx, tx, id, owner)
switch {
case err == sql.ErrNoRows: // Return a 404 here.
return nil, multierr.Combine(dsserr.NotFound(id.String()), tx.Rollback())
case err != nil:
return nil, multierr.Combine(err, tx.Rollback())
case !version.Empty() && !version.Matches(old.Version):
return nil, multierr.Combine(dsserr.VersionMismatch("old version"), tx.Rollback())
}
if _, err := tx.ExecContext(ctx, query, id, owner); err != nil {
return nil, multierr.Combine(err, tx.Rollback())
}
if err := tx.Commit(); err != nil {
return nil, err
}
return old, nil
}
// SearchSubscriptions returns all subscriptions in "cells".
func (c *Store) SearchSubscriptions(ctx context.Context, cells s2.CellUnion, owner models.Owner) ([]*models.Subscription, error) {
var (
query = fmt.Sprintf(`
SELECT
%s
FROM
subscriptions
LEFT JOIN
(SELECT DISTINCT cells_subscriptions.subscription_id FROM cells_subscriptions WHERE cells_subscriptions.cell_id = ANY($1))
AS
unique_subscription_ids
ON
subscriptions.id = unique_subscription_ids.subscription_id
WHERE
subscriptions.owner = $2`, subscriptionFields)
)
if len(cells) == 0 {
return nil, dsserr.BadRequest("no location provided")
}
tx, err := c.Begin()
if err != nil {
return nil, err
}
cids := make([]int64, len(cells))
for i, cell := range cells {
cids[i] = int64(cell)
}
subscriptions, err := c.fetchSubscriptions(ctx, tx, query, pq.Array(cids), owner)
if err != nil {
return nil, multierr.Combine(err, tx.Rollback())
}
if err := tx.Commit(); err != nil {
return nil, err
}
return subscriptions, nil
} |
// Get returns the subscription identified by "id".
func (c *Store) GetSubscription(ctx context.Context, id models.ID) (*models.Subscription, error) {
return c.fetchSubscriptionByID(ctx, c.DB, id) |
image_similarity.rs | use opencv::core::{Mat, Scalar, Size_, dct, CV_64FC1};
use opencv::imgcodecs::imread;
use opencv::imgproc::{self, cvt_color, resize, COLOR_RGB2GRAY, COLOR_RGBA2GRAY};
use super::error::ImageSimilarityError;
use walkdir::WalkDir;
/// Compute the similarity of two given image
///
/// # Example
/// ```rust
/// let image_a = opencv::imgcodecs::imread("/PATH/TO/IMAGE/A", 0).expect("Invaild image file a");
/// let image_b = opencv::imgcodecs::imread("/PATH/TO/IMAGE/B", 0).expect("Invaild image file b");
/// match similarity(&image_a, &image_b, 64, 16) {
/// Ok(similarity) => println!("{}", similarity),
/// Err(e) => println!("{}", e),
/// }
/// ```
pub fn similarity(img_a: &Mat, img_b: &Mat, length: i32, dct_length: i32) -> Result<f64, ImageSimilarityError> {
// of course length and dct_length should be greater than 0
if length <= 0 { return Err(ImageSimilarityError { reason: format!("length should be a positive number instead of {}", length)}) }
if dct_length <= 0 { return Err(ImageSimilarityError { reason: format!("dct_length should be a positive number instead of {}", length)}) }
// try to compute phash for `img_a` and `img_b`
let phash_img_a = compute_phash(img_a, length, dct_length)?;
let phash_img_b = compute_phash(img_b, length, dct_length)?;
// compute their hamming distance
Ok(hamming_distance(&phash_img_a, &phash_img_b))
}
/// Compute similarities of all images with allowed extensions in given directory
///
/// # Example
/// ```rust
/// match similarity_directory("/PATH/TO/A/DIRECTORY", &vec!["png", "jpg", "jpeg"]) {
/// Some(result) => println!("{:#?}", result),
/// None => println!("No available images with given extensions in the given directory"),
/// };
/// ```
pub fn similarity_directory(directory: &str, allowed_ext: &Vec<&str>) -> Option<Vec<(f64, String, String)>> {
// compute all phashes in directory with given allowed file extensions
let all_image_file = compute_phash_directory(directory, allowed_ext);
// the result should be an array of tuple (similarity, image a, image b)
let mut result: Vec<(f64, String, String)> = Vec::new();
match all_image_file.len() {
// 0 is boring
0 => None,
// so is 1
1 => {
result.push((1.0, all_image_file[0].0.clone(), all_image_file[0].0.clone()));
Some(result)
},
_ => {
// compute hamming distance for all image pairs
for a_index in 0..(all_image_file.len() - 1) {
for b_index in (a_index + 1)..all_image_file.len() {
let img_a_data = &all_image_file[a_index];
let img_b_data = &all_image_file[b_index];
result.push((hamming_distance(&img_a_data.1, &img_b_data.1), img_a_data.0.clone(), img_b_data.0.clone()));
}
}
// sort by similarity desc
result.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
Some(result)
}
}
}
/// Compute similarities of given image with all images that ends in allowed extensions in given directory
///
/// # Example
/// ```rust
/// let image = opencv::imgcodecs::imread("/PATH/TO/IMAGE", 0).expect("Invaild image file");
/// match similarity_file_directory(&image, "/PATH/TO/A/DIRECTORY", &vec!["png", "jpg", "jpeg"]) {
/// Some(result) => println!("{:#?}", result),
/// None => println!("No available images with given extensions in the given directory"),
/// };
/// ```
pub fn similarity_file_directory(image: &Mat, directory: &str, allowed_ext: &Vec<&str>) -> Result<Option<Vec<(f64, String)>>, ImageSimilarityError> {
let image_phash = compute_phash(&image, 64, 16)?;
// compute all phashes in directory with given allowed file extensions
let all_image_file = compute_phash_directory(directory, allowed_ext);
match all_image_file.len() {
// 0 is boring
0 => Ok(None),
_ => {
// compute hamming distance for all image pairs
// the result should be an array of tuple (similarity, image in directory)
let mut result: Vec<(f64, String)> = all_image_file.iter().map(|image_data| {
(hamming_distance(&image_phash, &image_data.1), image_data.0.clone())
}).collect();
// sort by similarity desc
result.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
Ok(Some(result))
}
}
}
/// Compute all phashes in directory with given allowed file extensions
///
/// # Example
/// ```rust
/// println!("{:#?}", compute_phash_directory("/PATH/TO/A/DIRECTORY"));
/// ```
fn | (directory: &str, allowed_ext: &Vec<&str>) -> Vec<(String, String)> {
// walk given directory
WalkDir::new(directory).into_iter()
.filter_map(|e| e.ok()) // keep all ok files
.filter_map(|file_entry| {
// filter by user given allowed file extensions
// store path to the file
let filepath = file_entry.path().to_str().unwrap();
// split file path by `.`
let parts: Vec<&str> = filepath.split('.').collect();
// check whether the extension is allowed
if let Some(_) = allowed_ext.iter().find(|&&ext| ext == parts[parts.len() - 1]) {
// keep
Some(String::from(filepath))
} else {
// no
None
}
}).filter_map(|file| {
// with all files with allowed extensions
// try to load the file as image
let img = match imread(&file, 0) {
// proceed next step if successfully opened
Ok(img) => img,
// otherwise throw this file
Err(_) => return None,
};
// compute phash of this file with resize length 64 and dct length 16
match compute_phash(&img, 64, 16) {
// if nothing goes wrong while computing phash
// then return a tuple, (filepath, phash)
Ok(phash) => Some((file, phash)),
// otherwise throw this file
Err(_) => None
}
}).collect()
}
/// Compute pHash of given image
///
/// # Example
/// ```rust
/// let image = opencv::imgcodecs::imread("/PATH/TO/IMAGE", 0).expect("Invaild image file");
/// match compute_phash(&image, 64, 16) {
/// Ok(phash) => println!("{}", phash),
/// Err(e) => println!("{}", e),
/// }
/// ```
fn compute_phash(img: &Mat, length: i32, dct_length: i32) -> Result<String, ImageSimilarityError> {
// we need the image to be grayscale and resized to a reasonable size
fn assert_gray_and_size(img: &Mat, length: i32) -> Result<Mat, ImageSimilarityError> {
// create a new Mat for the gray image
let mut gray = Mat::default()?;
// check number of channels of orginal image
match img.channels()? {
// it's already a grayscale image
// just copy it
1 => gray = img.clone()?,
// for image with 3 or 4 channels,
// convert it to grayscale
3 => cvt_color(&img, &mut gray, COLOR_RGB2GRAY, 0)?,
4 => cvt_color(&img, &mut gray, COLOR_RGBA2GRAY, 0)?,
// we don't support image with any other number of channels
_ => return Err(ImageSimilarityError { reason: format!("Image with {} channels is not supported yet", img.channels().unwrap()) }),
};
// create a new Mat for the resized image
let mut resized = Mat::default()?;
// specific size
let size = Size_::new(length, length);
// and resize the original image
resize(&gray, &mut resized, size, 0.0, 0.0, imgproc::INTER_LINEAR)?;
Ok(resized)
}
// try to get the resized and grayscale image
let resized_gray = assert_gray_and_size(&img, length)?;
// convert the underlaying type of resized_gray into double
let mut double_type_img = Mat::new_rows_cols_with_default(resized_gray.rows()?, resized_gray.cols()?, CV_64FC1, Scalar::new(0.0, 0.0, 0.0, 0.0))?;
Mat::convert_to(&resized_gray, &mut double_type_img, CV_64FC1, 1.0, 0.0)?;
// and then do dct
let mut dct_img = Mat::default()?;
dct(&double_type_img, &mut dct_img, 0)?;
// compute the mean value of dct image
let mut mean: f64 = 0.0;
for row in 0..dct_length {
for col in 0..dct_length {
mean += dct_img.at(row + col * length)?;
}
}
// remember to substract the first value of dct
mean -= dct_img.at(0)?;
mean /= (length * length - 1) as f64;
// build the phash string of the given image
let mut phash = String::new();
for row in 0..dct_length {
for col in 0..dct_length {
let value: &f64 = dct_img.at(row + col * length)?;
if value < &mean {
phash.push_str("0");
} else {
phash.push_str("1");
}
}
}
Ok(phash)
}
/// Compute hamming distance of two given string
///
/// # Example
/// ```rust
/// println!("{}", hamming_distance(&String::from("111"), &String::from("101")));
/// ```
fn hamming_distance(a: &String, b: &String) -> f64 {
// get length of two strings
let len1 = a.len();
let len2 = b.len();
// we only compute the hamming distance if the lengths are equal, but expect 0
match (len1, len2, len1 - len2) {
(_, _, 0) => {
let mut dist: f64 = 0.0;
for i in 0..len1 {
if a.chars().nth(i) != b.chars().nth(i) {
dist += 1.0;
}
}
1.0 - dist / (len1 as f64)
},
(0, _, _) => 0.0,
(_, 0, _) => 0.0,
(_, _, _) => 0.0,
}
}
| compute_phash_directory |
pandas_dataset.py | from __future__ import division
import inspect
import json
import re
from datetime import datetime
from functools import wraps
import jsonschema
import sys
import numpy as np
import pandas as pd
from dateutil.parser import parse
from scipy import stats
from six import PY3, integer_types, string_types
from numbers import Number
from .dataset import Dataset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import \
is_valid_partition_object, is_valid_categorical_partition_object, is_valid_continuous_partition_object, \
_scipy_distribution_positional_args_from_dict, validate_distribution_parameters
class MetaPandasDataset(Dataset):
"""MetaPandasDataset is a thin layer between Dataset and PandasDataset.
This two-layer inheritance is required to make @classmethod decorators work.
Practically speaking, that means that MetaPandasDataset implements \
expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`, \
and PandasDataset implements the expectation methods themselves.
"""
def __init__(self, *args, **kwargs):
super(MetaPandasDataset, self).__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The MetaPandasDataset implementation replaces the "column" parameter supplied by the user with a pandas Series
object containing the actual column from the relevant pandas dataframe. This simplifies the implementing expectation
logic while preserving the standard Dataset signature and expected behavior.
See :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>` \
for full documentation of this function.
"""
if PY3:
argspec = inspect.getfullargspec(func)[0][1:]
else:
argspec = inspect.getargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(self, column, mostly=None, result_format=None, *args, **kwargs):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
# FIXME temporary fix for missing/ignored value
ignore_values = [None, np.nan]
if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:
ignore_values = []
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
result_format['partial_unexpected_count'] = 0
series = self[column]
# FIXME rename to mapped_ignore_values?
if len(ignore_values) == 0:
boolean_mapped_null_values = np.array(
[False for value in series])
else:
boolean_mapped_null_values = np.array([True if (value in ignore_values) or (pd.isnull(value)) else False
for value in series])
element_count = int(len(series))
# FIXME rename nonnull to non_ignored?
nonnull_values = series[boolean_mapped_null_values == False]
nonnull_count = int((boolean_mapped_null_values == False).sum())
boolean_mapped_success_values = func(
self, nonnull_values, *args, **kwargs)
success_count = np.count_nonzero(boolean_mapped_success_values)
unexpected_list = list(
nonnull_values[boolean_mapped_success_values == False])
unexpected_index_list = list(
nonnull_values[boolean_mapped_success_values == False].index)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly)
return_obj = self._format_map_output(
result_format, success,
element_count, nonnull_count,
len(unexpected_list),
unexpected_list, unexpected_index_list
)
# FIXME Temp fix for result format
if func.__name__ in ['expect_column_values_to_not_be_null', 'expect_column_values_to_be_null']:
del return_obj['result']['unexpected_percent_nonmissing']
try:
del return_obj['result']['partial_unexpected_counts']
del return_obj['result']['partial_unexpected_list']
except KeyError:
pass
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def column_pair_map_expectation(cls, func):
"""
The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per row basis across a pair of columns.
"""
if PY3:
argspec = inspect.getfullargspec(func)[0][1:]
else:
argspec = inspect.getargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(self, column_A, column_B, mostly=None, ignore_row_if="both_values_are_missing", result_format=None, *args, **kwargs):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
series_A = self[column_A]
series_B = self[column_B]
if ignore_row_if == "both_values_are_missing":
boolean_mapped_null_values = series_A.isnull() & series_B.isnull()
elif ignore_row_if == "either_value_is_missing":
boolean_mapped_null_values = series_A.isnull() | series_B.isnull()
elif ignore_row_if == "never":
boolean_mapped_null_values = series_A.map(lambda x: False)
else:
raise ValueError(
"Unknown value of ignore_row_if: %s", (ignore_row_if,))
assert len(series_A) == len(
series_B), "Series A and B must be the same length"
# This next bit only works if series_A and _B are the same length
element_count = int(len(series_A))
nonnull_count = (boolean_mapped_null_values == False).sum()
nonnull_values_A = series_A[boolean_mapped_null_values == False]
nonnull_values_B = series_B[boolean_mapped_null_values == False]
nonnull_values = [value_pair for value_pair in zip(
list(nonnull_values_A),
list(nonnull_values_B)
)]
boolean_mapped_success_values = func(
self, nonnull_values_A, nonnull_values_B, *args, **kwargs)
success_count = boolean_mapped_success_values.sum()
unexpected_list = [value_pair for value_pair in zip(
list(series_A[(boolean_mapped_success_values == False) & (
boolean_mapped_null_values == False)]),
list(series_B[(boolean_mapped_success_values == False) & (
boolean_mapped_null_values == False)])
)]
unexpected_index_list = list(series_A[(boolean_mapped_success_values == False) & (
boolean_mapped_null_values == False)].index)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly)
return_obj = self._format_map_output(
result_format, success,
element_count, nonnull_count,
len(unexpected_list),
unexpected_list, unexpected_index_list
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def multicolumn_map_expectation(cls, func):
"""
The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of
evaluating truthiness of some condition on a per row basis across a set of columns.
"""
if PY3:
argspec = inspect.getfullargspec(func)[0][1:]
else:
argspec = inspect.getargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(self, column_list, mostly=None, ignore_row_if="all_values_are_missing",
result_format=None, *args, **kwargs):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
test_df = self[column_list]
if ignore_row_if == "all_values_are_missing":
boolean_mapped_skip_values = test_df.isnull().all(axis=1)
elif ignore_row_if == "any_value_is_missing":
boolean_mapped_skip_values = test_df.isnull().any(axis=1)
elif ignore_row_if == "never":
boolean_mapped_skip_values = pd.Series([False] * len(test_df))
else:
raise ValueError(
"Unknown value of ignore_row_if: %s", (ignore_row_if,))
boolean_mapped_success_values = func(
self, test_df[boolean_mapped_skip_values == False], *args, **kwargs)
success_count = boolean_mapped_success_values.sum()
nonnull_count = (~boolean_mapped_skip_values).sum()
element_count = len(test_df)
unexpected_list = test_df[(boolean_mapped_skip_values == False) & (boolean_mapped_success_values == False)]
unexpected_index_list = list(unexpected_list.index)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly)
return_obj = self._format_map_output(
result_format, success,
element_count, nonnull_count,
len(unexpected_list),
unexpected_list.to_dict(orient='records'), unexpected_index_list
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
class PandasDataset(MetaPandasDataset, pd.DataFrame):
"""
PandasDataset instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.
For the full API reference, please see :func:`Dataset <great_expectations.data_asset.dataset.Dataset>`
Notes:
1. Samples and Subsets of PandaDataSet have ALL the expectations of the original \
data frame unless the user specifies the ``discard_subset_failing_expectations = True`` \
property on the original data frame.
2. Concatenations, joins, and merges of PandaDataSets contain NO expectations (since no autoinspection
is performed by default).
"""
# this is necessary to subclass pandas in a proper way.
# NOTE: specifying added properties in this way means that they will NOT be carried over when
# the dataframe is manipulated, which we might want. To specify properties that are carried over
# to manipulation results, we would just use `_metadata = ['row_count', ...]` here. The most likely
# case is that we want the former, but also want to re-initialize these values to None so we don't
# get an attribute error when trying to access them (I think this could be done in __finalize__?)
_internal_names = pd.DataFrame._internal_names + [
'caching',
]
_internal_names_set = set(_internal_names)
# We may want to expand or alter support for subclassing dataframes in the future:
# See http://pandas.pydata.org/pandas-docs/stable/extending.html#extending-subclassing-pandas
@property
def _constructor(self):
return self.__class__
def __finalize__(self, other, method=None, **kwargs):
if isinstance(other, PandasDataset):
self._initialize_expectations(other.get_expectations_config(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_configs_kwargs=False,
discard_catch_exceptions_kwargs=False))
# If other was coerced to be a PandasDataset (e.g. via _constructor call during self.copy() operation)
# then it may not have discard_subset_failing_expectations set. Default to self value
self.discard_subset_failing_expectations = getattr(other, "discard_subset_failing_expectations",
self.discard_subset_failing_expectations)
if self.discard_subset_failing_expectations:
self.discard_failing_expectations()
super(PandasDataset, self).__finalize__(other, method, **kwargs)
return self
def __init__(self, *args, **kwargs):
super(PandasDataset, self).__init__(*args, **kwargs)
self.discard_subset_failing_expectations = kwargs.get(
'discard_subset_failing_expectations', False)
def get_row_count(self):
return self.shape[0]
def get_table_columns(self):
return list(self.columns)
def get_column_sum(self, column):
return self[column].sum()
def get_column_max(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.max()
def get_column_min(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.min()
def get_column_mean(self, column):
return self[column].mean()
def get_column_nonnull_count(self, column):
series = self[column]
null_indexes = series.isnull()
nonnull_values = series[null_indexes == False]
return len(nonnull_values)
def get_column_value_counts(self, column):
return self[column].value_counts()
def get_column_unique_count(self, column):
return self.get_column_value_counts(column).shape[0]
def get_column_modes(self, column):
return list(self[column].mode().values)
def get_column_median(self, column):
return self[column].median()
def get_column_stdev(self, column):
return self[column].std()
def get_column_hist(self, column, bins):
hist, bin_edges = np.histogram(self[column], bins, density=False)
return list(hist)
def get_column_count_in_range(self, column, min_val=None, max_val=None, min_strictly=False, max_strictly=True):
# TODO this logic could probably go in the non-underscore version if we want to cache
if min_val is None and max_val is None:
raise ValueError('Must specify either min or max value')
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError('Min value must be <= to max value')
result = self[column]
if min_val is not None:
if min_strictly:
result = result[result > min_val]
else:
result = result[result >= min_val]
if max_val is not None:
if max_strictly:
result = result[result < max_val]
else:
result = result[result <= max_val]
return len(result)
### Expectation methods ###
@DocInherit
@MetaPandasDataset.column_map_expectation
def | (self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
return ~column.duplicated(keep=False)
# @Dataset.expectation(['column', 'mostly', 'result_format'])
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_null(self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None, include_nulls=True):
return ~column.isnull()
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_null(self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
return column.isnull()
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_of_type(self, column, type_,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
# Target Datasource {numpy, python} was removed in favor of a simpler type mapping
type_map = {
"null": [type(None), np.nan],
"boolean": [bool, np.bool_],
"int": [int, np.int64] + list(integer_types),
"long": [int, np.longdouble] + list(integer_types),
"float": [float, np.float_],
"double": [float, np.longdouble],
"bytes": [bytes, np.bytes_],
"string": [string_types, np.string_]
}
target_type = type_map[type_]
return column.map(lambda x: isinstance(x, tuple(target_type)))
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_in_type_list(self, column, type_list,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
# Target Datasource {numpy, python} was removed in favor of a simpler type mapping
type_map = {
"null": [type(None), np.nan],
"boolean": [bool, np.bool_],
"int": [int, np.int64] + list(integer_types),
"long": [int, np.longdouble] + list(integer_types),
"float": [float, np.float_],
"double": [float, np.longdouble],
"bytes": [bytes, np.bytes_],
"string": [string_types, np.string_]
}
# Build one type list with each specified type list from type_map
target_type_list = list()
for type_ in type_list:
target_type_list += type_map[type_]
return column.map(lambda x: isinstance(x, tuple(target_type_list)))
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_in_set(self, column, value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_in_set(self, column, value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return ~column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_between(self,
column,
min_value=None, max_value=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
allow_cross_type_comparisons=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if parse_strings_as_datetimes:
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
temp_column = column.map(parse)
else:
temp_column = column
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
def is_between(val):
# TODO Might be worth explicitly defining comparisons between types (for example, between strings and ints).
# Ensure types can be compared since some types in Python 3 cannot be logically compared.
# print type(val), type(min_value), type(max_value), val, min_value, max_value
if type(val) == None:
return False
else:
if min_value is not None and max_value is not None:
if allow_cross_type_comparisons:
try:
return (min_value <= val) and (val <= max_value)
except TypeError:
return False
else:
if (isinstance(val, string_types) != isinstance(min_value, string_types)) or (isinstance(val, string_types) != isinstance(max_value, string_types)):
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type.")
return (min_value <= val) and (val <= max_value)
elif min_value is None and max_value is not None:
if allow_cross_type_comparisons:
try:
return val <= max_value
except TypeError:
return False
else:
if isinstance(val, string_types) != isinstance(max_value, string_types):
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type.")
return val <= max_value
elif min_value is not None and max_value is None:
if allow_cross_type_comparisons:
try:
return min_value <= val
except TypeError:
return False
else:
if isinstance(val, string_types) != isinstance(min_value, string_types):
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type.")
return min_value <= val
else:
return False
return temp_column.map(is_between)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_increasing(self, column, strictly=None, parse_strings_as_datetimes=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(1)
if strictly:
return col_diff > pd.Timedelta(0)
else:
return col_diff >= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = 1
if strictly:
return col_diff > 0
else:
return col_diff >= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_decreasing(self, column, strictly=None, parse_strings_as_datetimes=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(-1)
if strictly:
return col_diff < pd.Timedelta(0)
else:
return col_diff <= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = -1
if strictly:
return col_diff < 0
else:
return col_diff <= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_be_between(self, column, min_value=None, max_value=None,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# Assert that min_value and max_value are integers
try:
if min_value is not None and not float(min_value).is_integer():
raise ValueError("min_value and max_value must be integers")
if max_value is not None and not float(max_value).is_integer():
raise ValueError("min_value and max_value must be integers")
except ValueError:
raise ValueError("min_value and max_value must be integers")
column_lengths = column.astype(str).str.len()
if min_value is not None and max_value is not None:
return column_lengths.between(min_value, max_value)
elif min_value is None and max_value is not None:
return column_lengths <= max_value
elif min_value is not None and max_value is None:
return column_lengths >= min_value
else:
return False
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_equal(self, column, value,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
return column.str.len() == value
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex(self, column, regex,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
return column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex(self, column, regex,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
return ~column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex_list(self, column, regex_list, match_on="any",
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
if match_on == "any":
return regex_match_df.any(axis='columns')
elif match_on == "all":
return regex_match_df.all(axis='columns')
else:
raise ValueError("match_on must be either 'any' or 'all'")
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex_list(self, column, regex_list,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
return ~regex_match_df.any(axis='columns')
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_strftime_format(self, column, strftime_format,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None,
meta=None):
# Below is a simple validation that the provided format can both format and parse a datetime object.
# %D is an example of a format that can format but not parse, e.g.
try:
datetime.strptime(datetime.strftime(
datetime.now(), strftime_format), strftime_format)
except ValueError as e:
raise ValueError(
"Unable to use provided strftime_format. " + e.message)
def is_parseable_by_format(val):
try:
datetime.strptime(val, strftime_format)
return True
except TypeError as e:
raise TypeError("Values passed to expect_column_values_to_match_strftime_format must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format.")
except ValueError as e:
return False
return column.map(is_parseable_by_format)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_dateutil_parseable(self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
def is_parseable(val):
try:
if type(val) != str:
raise TypeError(
"Values passed to expect_column_values_to_be_dateutil_parseable must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format.")
parse(val)
return True
except (ValueError, OverflowError):
return False
return column.map(is_parseable)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_json_parseable(self, column,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
def is_json(val):
try:
json.loads(val)
return True
except:
return False
return column.map(is_json)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_json_schema(self, column, json_schema,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
def matches_json_schema(val):
try:
val_json = json.loads(val)
jsonschema.validate(val_json, json_schema)
# jsonschema.validate raises an error if validation fails.
# So if we make it this far, we know that the validation succeeded.
return True
except jsonschema.ValidationError:
return False
except jsonschema.SchemaError:
raise
except:
raise
return column.map(matches_json_schema)
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(self, column, distribution,
p_value=0.05, params=None,
result_format=None,
include_config=False,
catch_exceptions=None, meta=None):
column = self[column]
if p_value <= 0 or p_value >= 1:
raise ValueError("p_value must be between 0 and 1 exclusive")
# Validate params
try:
validate_distribution_parameters(
distribution=distribution, params=params)
except ValueError as e:
raise e
# Format arguments for scipy.kstest
if (isinstance(params, dict)):
positional_parameters = _scipy_distribution_positional_args_from_dict(
distribution, params)
else:
positional_parameters = params
# K-S Test
ks_result = stats.kstest(column, distribution,
args=positional_parameters)
return {
"success": ks_result[1] >= p_value,
"result": {
"observed_value": ks_result[1],
"details": {
"expected_params": positional_parameters,
"observed_ks_result": ks_result
}
}
}
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(self, column, partition_object=None, p=0.05, bootstrap_samples=None, bootstrap_sample_size=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None):
column = self[column]
if not is_valid_continuous_partition_object(partition_object):
raise ValueError("Invalid continuous partition object.")
# TODO: consider changing this into a check that tail_weights does not exist exclusively, by moving this check into is_valid_continuous_partition_object
if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):
raise ValueError("Partition endpoints must be finite.")
if "tail_weights" in partition_object and np.sum(partition_object["tail_weights"]) > 0:
raise ValueError("Partition cannot have tail weights -- endpoints must be finite.")
test_cdf = np.append(np.array([0]), np.cumsum(
partition_object['weights']))
def estimated_cdf(x):
return np.interp(x, partition_object['bins'], test_cdf)
if bootstrap_samples is None:
bootstrap_samples = 1000
if bootstrap_sample_size is None:
# Sampling too many elements (or not bootstrapping) will make the test too sensitive to the fact that we've
# compressed via a partition.
# Sampling too few elements will make the test insensitive to significant differences, especially
# for nonoverlapping ranges.
bootstrap_sample_size = len(partition_object['weights']) * 2
results = [stats.kstest(
np.random.choice(column, size=bootstrap_sample_size, replace=True),
estimated_cdf)[1]
for k in range(bootstrap_samples)]
test_result = (1 + sum(x >= p for x in results)) / \
(bootstrap_samples + 1)
hist, bin_edges = np.histogram(column, partition_object['bins'])
below_partition = len(
np.where(column < partition_object['bins'][0])[0])
above_partition = len(
np.where(column > partition_object['bins'][-1])[0])
# Expand observed partition to report, if necessary
if below_partition > 0 and above_partition > 0:
observed_bins = [np.min(column)] + \
partition_object['bins'] + [np.max(column)]
observed_weights = np.concatenate(
([below_partition], hist, [above_partition])) / len(column)
elif below_partition > 0:
observed_bins = [np.min(column)] + partition_object['bins']
observed_weights = np.concatenate(
([below_partition], hist)) / len(column)
elif above_partition > 0:
observed_bins = partition_object['bins'] + [np.max(column)]
observed_weights = np.concatenate(
(hist, [above_partition])) / len(column)
else:
observed_bins = partition_object['bins']
observed_weights = hist / len(column)
observed_cdf_values = np.cumsum(observed_weights)
return_obj = {
"success": test_result > p,
"result": {
"observed_value": test_result,
"details": {
"bootstrap_samples": bootstrap_samples,
"bootstrap_sample_size": bootstrap_sample_size,
"observed_partition": {
"bins": observed_bins,
"weights": observed_weights.tolist()
},
"expected_partition": {
"bins": partition_object['bins'],
"weights": partition_object['weights']
},
"observed_cdf": {
"x": observed_bins,
"cdf_values": [0] + observed_cdf_values.tolist()
},
"expected_cdf": {
"x": partition_object['bins'],
"cdf_values": test_cdf.tolist()
}
}
}
}
return return_obj
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_equal(self,
column_A,
column_B,
ignore_row_if="both_values_are_missing",
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
return column_A == column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_A_to_be_greater_than_B(self,
column_A,
column_B,
or_equal=None,
parse_strings_as_datetimes=None,
allow_cross_type_comparisons=None,
ignore_row_if="both_values_are_missing",
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
# FIXME
if allow_cross_type_comparisons == True:
raise NotImplementedError
if parse_strings_as_datetimes:
temp_column_A = column_A.map(parse)
temp_column_B = column_B.map(parse)
else:
temp_column_A = column_A
temp_column_B = column_B
if or_equal == True:
return temp_column_A >= temp_column_B
else:
return temp_column_A > temp_column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_in_set(self,
column_A,
column_B,
value_pairs_set,
ignore_row_if="both_values_are_missing",
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
temp_df = pd.DataFrame({"A": column_A, "B": column_B})
value_pairs_set = {(x, y) for x, y in value_pairs_set}
results = []
for i, t in temp_df.iterrows():
if pd.isnull(t["A"]):
a = None
else:
a = t["A"]
if pd.isnull(t["B"]):
b = None
else:
b = t["B"]
results.append((a, b) in value_pairs_set)
return pd.Series(results, temp_df.index)
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_multicolumn_values_to_be_unique(self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
threshold = len(column_list.columns)
# Do not dropna here, since we have separately dealt with na in decorator
return column_list.nunique(dropna=False, axis=1) >= threshold
| expect_column_values_to_be_unique |
zz_generated.deepcopy.go | // +build !ignore_autogenerated
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package rest
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConnectRequest) DeepCopyInto(out *ConnectRequest) {
*out = *in
if in.Options == nil {
out.Options = nil
} else {
out.Options = in.Options.DeepCopyObject()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectRequest.
func (in *ConnectRequest) DeepCopy() *ConnectRequest {
if in == nil {
return nil
}
out := new(ConnectRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ConnectRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c | } | }
return nil |
postProcessFiles.py | import argparse
import collections
import math
import os
import matplotlib.pyplot as plt
import pandas as pd
def my_plot_function(showGraph=False, x_axis="episodes", algo="ppo", env=''):
masterDataFrameList = {}
dfs = []
divBy = 0
avg = None
for subdir, dirs, files in os.walk(os.path.join(storagePath, args.dir)):
for sd in dirs:
df_ = pd.read_csv(os.path.join(storagePath, args.dir, sd, "log.csv"), sep=",")
dfs.append(df_)
if avg is None:
avg = df_.copy()
for header in list(df_.copy()):
masterDataFrameList[header] = df_[ [header, x_axis]]
else:
avg += df_.copy()
for header in list(df_.copy()):
masterDataFrameList[header][header+str(len(dfs))] = df_[header]
divBy += 1
avg = avg / divBy
for index, whatToPlot in enumerate(separate_graphs):
if len(whatToPlot) == 0:
continue
biggestLastY = -100000000
smallestLastY = 100000000
ax = None
horizontalLine1 = None
horizontalLine2 = None
desiredTitle = "untitled"
def | (x):
if whatToPlot.get(x[0], {}).get("secondary_y", 0):
return 1
return 0
# we order the dict, because if we graph a separate_y before a regular one, the std shading is broken :'(
# so we try to graph separate_y last
masterDataFrameList = collections.OrderedDict(sorted(masterDataFrameList.items(), key=lambda x:sort_(x)))
for colName in masterDataFrameList:
if colName == x_axis or colName not in whatToPlot or whatToPlot[colName].get('ignore', False):
continue
if whatToPlot[colName].get('desiredTitle', False):
desiredTitle = whatToPlot[colName]['desiredTitle']
if whatToPlot[colName].get('horizontalLine1', None) is not None:
horizontalLine1 = whatToPlot[colName].get('horizontalLine1', False)
if whatToPlot[colName].get('horizontalLine2', None) is not None:
horizontalLine2 = whatToPlot[colName].get('horizontalLine2', False)
thisCompoundDf = masterDataFrameList[colName]
plotMe = thisCompoundDf.loc[:,[c for c in thisCompoundDf.columns if c != x_axis]].mean(axis=1).to_frame()
plotMe.columns = [colName]
std = thisCompoundDf.loc[:, [c for c in thisCompoundDf.columns if c != x_axis]].std(axis=1)
if whatToPlot[colName]['normalise']:
plotMe[colName] = (plotMe[colName] - plotMe[colName].min()) / (
plotMe[colName].max() - plotMe[colName].min())
assert len(thisCompoundDf[colName]) == len(std)
addedValues = plotMe[colName].add(std).values
plotMe = plotMe.assign(stdPLUS=addedValues)
plotMe = plotMe.assign(justSTD=std)
subbedValues = plotMe[colName].subtract(std).values
plotMe = plotMe.assign(stdMINUS=subbedValues)
if x_axis == "episodes":
plotMe = plotMe.assign(episodes=thisCompoundDf[x_axis].values)
elif x_axis == "frames":
plotMe = plotMe.assign(frames=thisCompoundDf[x_axis].values)
annotateAllChanges = whatToPlot[colName]['annotateAllChanges'] if not ONLY_LAST_VALUE_OVERWRITE else False
# https://stackoverflow.com/questions/8409095/set-markers-for-individual-points-on-a-line-in-matplotlib
percentilesPretty = [colName]
plot_kwargs = {
'x': x_axis,
'y': percentilesPretty,
'figsize':(12, 8),
'label': [whatToPlot.get(n, {}).get('alias', n) for n in percentilesPretty]
}
if ax is not None: plot_kwargs['ax'] = ax
if whatToPlot[colName].get('secondary_y', False): plot_kwargs['secondary_y'] = True
if whatToPlot[colName].get('desiredColour', False): plot_kwargs['color'] = whatToPlot[colName]['desiredColour']
ax = plotMe.plot(**plot_kwargs)
fill_between_kwargs = {
'alpha': 0.2,
}
if whatToPlot[colName].get('desiredColour', False): fill_between_kwargs['color'] = whatToPlot[colName][
'desiredColour']
plt.fill_between(plotMe[x_axis], plotMe['stdPLUS'], plotMe['stdMINUS'], **fill_between_kwargs)
prev = None
for pi, colNamePretty in enumerate(percentilesPretty):
for i, (a, b) in enumerate(zip(plotMe[x_axis], plotMe[colNamePretty])):
lastValue = i+1 == len(plotMe[x_axis])
if b != prev or lastValue:
plusSTD = round(plotMe['justSTD'][i], 2)
if whatToPlot[colName].get('hidePlusMinus', False):
writeMeOnGraph = str(round(b, 2))
else:
writeMeOnGraph = str(round(b, 2)) + " (+-" + str(plusSTD) + ")"
if annotateAllChanges or lastValue:
### simple annotation
if not NO_NUMBERS and not whatToPlot[colName].get('hideValue', False):
plt.annotate(writeMeOnGraph, xy=(a, b))
if lastValue:
biggestLastY = max(biggestLastY, b)
smallestLastY = min(smallestLastY, b)
prev = b
if horizontalLine1 is not None:
plt.axhline(y=horizontalLine1, color='firebrick', linestyle='--', label="Random Full")
if horizontalLine2 is not None:
plt.axhline(y=horizontalLine2, color='midnightblue', linestyle=':', label="Random Button")
plt.title(desiredTitle + ", " + algo + " " + env)
### axis modification
axes = plt.gca()
plt.grid(True)
plt.savefig(os.path.join(storagePath, args.dir, "graph" + str(index) + "_" + desiredTitle + "_" + algo + "_" + env + ".pdf"))
if showGraph:
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dir", required=True,
help="folder name")
parser.add_argument("--x_axis", default="frames",
help="x_axis")
parser.add_argument("--algo", default="ppo",
help="which algo to save under")
parser.add_argument("--env", default=False,
help="which env to save under")
parser.add_argument("--storagePath", required=True, default=True,
help="storagePath")
parser.add_argument("--showGraph", required=False, default=True, type=int,
help="plot.show() or not")
args = parser.parse_args()
storagePath = args.storagePath
horizontal = -2
horizontal3 = -1/2
x_axis = args.x_axis
x_axis = "frames"
x_axis = "episodes"
algo = args.algo
algo = "ppo"
# algo = "dqn"
env = args.env if args.env else ''
env = "(2, 2, 1, 2, 1)"
ONLY_LAST_VALUE_OVERWRITE = True
ONLY_LAST_VALUE_OVERWRITE = False
NO_NUMBERS = True
NO_NUMBERS = False
realGraph0Perf = {
"X_all_performance_full" : { # data += [(finalTestPerformanceFull + finalTrainingPerformanceFull)/2]
"desiredTitle": "performance graph",
"alias": "performance_full",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': u'#1f77b4', # blue
"horizontalLine1": horizontal,
"horizontalLine2": 0,
"horizontalLine3": horizontal3,
},
"X_all_performance" : { # data += [(finalTestPerformance + finalTrainingPerformance)/2]
"alias": "performance_button",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': u'#ff7f0e', # orange
},
"X_performance_full" : {
"alias": "performance_button",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': u'#ff7f0e', # orange
},
}
# colours not done
realGraph1Training = {
"X_TRAINtest_reward" : {
"desiredTitle": "reward, entropy and buttons in training",
"alias": "return",
"normalise": False,
"annotateAllChanges": False,
"secondary_y": True,
},
"X_epsilon": {
"desiredTitle": "reward, entropy and buttons in training",
"alias": "epsilon",
"normalise": False,
"annotateAllChanges": False,
"hideValue": True,
"hidePlusMinus": True,
},
"entropy" : {
"alias": "entropy",
"normalise": False,
"annotateAllChanges": False,
"hideValue": True,
"hidePlusMinus": True,
},
"buttons_mean" : { # ppo
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False
},
"X_buttons_mean" : { # dqn
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#9467bd', # purple
},
"X_TRAINtest_buttons" : {
"desiredTitle": "reward, entropy and buttons in training",
"alias": "test_buttons",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#9467bd', # purple
},
}
# colours not done
realGraph2TrainingPerfAndReward = {
"X_TRAINtest_reward": {
"desiredTitle": "reward and performances in training graph",
"alias": "training_reward",
"normalise": False,
"annotateAllChanges": False,
"secondary_y": True,
},
"X_TRAINtest_performance": {
"desiredTitle": "reward and performances in training graph",
"alias": "training_performance_button",
"normalise": False,
"annotateAllChanges": False,
"ignore": False,
},
"X_TRAINtest_performance_full": {
"alias": "training_performance_full",
"normalise": False,
"annotateAllChanges": False
},
}
# colours not done
realGraph3TestPerfAndReward = {
"X_test_reward": {
"desiredTitle": "reward and performances in test graph",
"alias": "test_reward",
"normalise": False,
"annotateAllChanges": False,
"secondary_y": True,
},
"X_test_performance": {
"alias": "test_performance_button",
"normalise": False,
"annotateAllChanges": False
},
"X_test_performance_full": {
"alias": "test_performance_full",
"normalise": False,
"annotateAllChanges": False
},
}
realGraph4TrainingPerfVSTestBreakdown = {
"X_test_performance_full": {
"desiredTitle": "training and test performance breakdown",
"alias": "test_performance_full",
"normalise": False,
"annotateAllChanges": False,
# "secondary_y": True,
"horizontalLine1": horizontal,
"horizontalLine2": 0,
'desiredColour': u'#1f77b4', # blue
},
"X_test_performance": {
"alias": "test_performance_button",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': u'#ff7f0e', # orange
},
"X_TRAINtest_performance_full": {
"alias": "training_performance_full",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#17becf', # light blue
},
"X_TRAINtest_performance": {
"alias": "training_performance_button",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#FFA500', # orange
},
}
# colours not done
realGraph5Buttons = {
"buttons_mean" : { # ppo
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False
},
"X_buttons_mean" : { # dqn
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False
},
"X_TRAINtest_buttons" : {
"alias": "test_buttons",
"normalise": False,
"annotateAllChanges": False
},
"X_epsilon": {
"desiredTitle": "graph of buttons",
"alias": "epsilon",
"normalise": False,
"annotateAllChanges": False
},
"entropy": {
"desiredTitle": "graph of buttons",
"alias": "entropy",
"normalise": False,
"annotateAllChanges": False
},
}
#colours not done
realGraph6Phones = {
"buttons_mean" : { # ppo
"desiredTitle": "graph of phones",
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False
},
"X_buttons_mean" : { # dqn
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False
},
"X_TRAINtest_buttons" : {
"desiredTitle": "graph of phones",
"alias": "test_buttons",
"normalise": False,
"annotateAllChanges": False
},
"X_TRAINtest_phones": {
"alias": "training_phones",
"normalise": False,
"annotateAllChanges": False
},
"X_test_phones" : {
"alias": "test_phones",
"normalise": False,
"annotateAllChanges": False
},
}
realGraph7PhonesButtonDirtTraining = {
"X_TRAINtest_phones": {
"desiredTitle": "graph of phones, dirts and buttons in training",
"alias": "training_phones",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#d62728', # red
},
"X_TRAINtest_buttons": {
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#9467bd', #purple
},
"X_TRAINtest_dirts": {
"alias": "training_dirts",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#2ca02c', #green
},
"X_TRAINtest_messes": {
"alias": "training_messes",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#7f7f7f', # grey
},
}
realGraph8PhonesButtonDirtTest = {
"X_test_phones": {
"desiredTitle": "graph of phones, dirts and buttons on the test set",
"alias": "test_phones",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#d62728', # red
},
"X_test_buttons": {
"alias": "test_buttons",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#9467bd', # purple
},
"X_test_dirts": {
"alias": "test_dirts",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#2ca02c', # green
},
"X_test_messes": {
"alias": "test_messes",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#7f7f7f', # grey
},
}
realGraph9PhonesButtonDirtAll = {
"X_TRAINtest_phones": {
"desiredTitle": "graph of phones, dirts and buttons in training and on the test set",
"alias": "training_phones",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#ff7f0e', # orange (related to red)
},
"X_test_phones": {
"alias": "test_phones",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#d62728', # red
},
"X_TRAINtest_buttons": {
"alias": "training_buttons",
"normalise": False,
"annotateAllChanges": False ,
'desiredColour': '#e377c2', # pink (related to purple)
},
"X_test_buttons": {
"alias": "test_buttons",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#9467bd', # purple
},
"X_TRAINtest_dirts": {
"alias": "training_dirts",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#bcbd22', # light green/yellow (related to green)
},
"X_test_dirts": {
"alias": "test_dirts",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#2ca02c', # green
},
"X_TRAINtest_messes": {
"alias": "training_messes",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#8c564b', # brown (related to grey)
},
"X_test_messes": {
"alias": "test_messes",
"normalise": False,
"annotateAllChanges": False,
'desiredColour': '#7f7f7f', # grey
},
}
separate_graphs = [
realGraph0Perf,
realGraph1Training,
realGraph2TrainingPerfAndReward,
realGraph3TestPerfAndReward,
realGraph4TrainingPerfVSTestBreakdown,
realGraph5Buttons,
realGraph6Phones,
realGraph7PhonesButtonDirtTraining,
realGraph8PhonesButtonDirtTest,
realGraph9PhonesButtonDirtAll,
]
showGraph = False if args.showGraph == 0 else True
my_plot_function(showGraph=showGraph, x_axis=x_axis, algo=algo, env=env) | sort_ |
binpacking_estimator_test.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package estimator
import (
"testing"
"k8s.io/contrib/cluster-autoscaler/simulator"
"k8s.io/kubernetes/pkg/api/resource"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"github.com/stretchr/testify/assert"
)
func TestBinpackingEstimate(t *testing.T) |
func TestBinpackingEstimateComingNodes(t *testing.T) {
estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker())
cpuPerPod := int64(350)
memoryPerPod := int64(1000 * 1024 * 1024)
pod := makePod(cpuPerPod, memoryPerPod)
pods := make([]*apiv1.Pod, 0)
for i := 0; i < 10; i++ {
pods = append(pods, pod)
}
node := &apiv1.Node{
Status: apiv1.NodeStatus{
Capacity: apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewMilliQuantity(cpuPerPod*3-50, resource.DecimalSI),
apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI),
apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
},
},
}
node.Status.Allocatable = node.Status.Capacity
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{nodeInfo, nodeInfo})
// 5 - 2 nodes that are coming.
assert.Equal(t, 3, estimate)
}
func TestBinpackingEstimateWithPorts(t *testing.T) {
estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker())
cpuPerPod := int64(200)
memoryPerPod := int64(1000 * 1024 * 1024)
pod := makePod(cpuPerPod, memoryPerPod)
pod.Spec.Containers[0].Ports = []apiv1.ContainerPort{
{
HostPort: 5555,
},
}
pods := make([]*apiv1.Pod, 0)
for i := 0; i < 8; i++ {
pods = append(pods, pod)
}
node := &apiv1.Node{
Status: apiv1.NodeStatus{
Capacity: apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewMilliQuantity(5*cpuPerPod, resource.DecimalSI),
apiv1.ResourceMemory: *resource.NewQuantity(5*memoryPerPod, resource.DecimalSI),
apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
},
},
}
node.Status.Allocatable = node.Status.Capacity
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
assert.Equal(t, 8, estimate)
}
| {
estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker())
cpuPerPod := int64(350)
memoryPerPod := int64(1000 * 1024 * 1024)
pod := makePod(cpuPerPod, memoryPerPod)
pods := make([]*apiv1.Pod, 0)
for i := 0; i < 10; i++ {
pods = append(pods, pod)
}
node := &apiv1.Node{
Status: apiv1.NodeStatus{
Capacity: apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewMilliQuantity(cpuPerPod*3-50, resource.DecimalSI),
apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI),
apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
},
},
}
node.Status.Allocatable = node.Status.Capacity
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
estimate := estimator.Estimate(pods, nodeInfo, []*schedulercache.NodeInfo{})
assert.Equal(t, 5, estimate)
} |
contact.py | # -*- coding: utf-8 -*-
from model.contact import Contact
from selenium.webdriver.support.ui import Select
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_contact_form(self, contact):
wd = self.app.wd
# fill contact fields (displayd on home page)
self.change_contact_field("firstname", contact.first_name)
self.change_contact_field("lastname", contact.last_name)
self.change_contact_field("address", contact.address)
self.change_contact_field("home", contact.address)
self.change_contact_field("mobile", contact.mobile_phone_number)
self.change_contact_field("work", contact.work_phone_number)
self.change_contact_field("fax", contact.fax_number)
self.change_contact_field("email", contact.email_1)
self.change_contact_field("email2", contact.email_2)
self.change_contact_field("email3", contact.email_3)
self.change_contact_field("address2", contact.address2)
self.change_contact_field("notes", contact.notes)
def change_contact_field(self, fild_name, value):
if value is not None:
wd = self.app.wd
wd.find_element_by_name(fild_name).click()
wd.find_element_by_name(fild_name).clear()
wd.find_element_by_name(fild_name).send_keys(value)
def create_new(self, contact):
wd = self.app.wd
# create new contact
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
self.app.return_to_home_page()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.select_contact_for_del_by_index(index)
# delete element
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit element delete
wd.switch_to.alert.accept()
wd.implicitly_wait(3)
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_contact_by_id(id)
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit element delete
wd.switch_to.alert.accept()
wd.implicitly_wait(3)
self.contact_cache = None
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_contact_for_del_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_all_contact(self):
wd = self.app.wd
wd.find_element_by_id("MassCB").click()
wd.find_element_by_xpath("//input[@value='Delete']").click()
# submit contact delete
wd.switch_to.alert.accept()
self.contact_cache = None
def edit_first_contact(self, contact):
wd = self.app.wd
self.edit_contact_by_index(0, contact)
def edit_contact_by_index(self, index, contact):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
# edit contact
self.fill_contact_form(contact)
wd.find_element_by_xpath("(//input[@name='update'])[2]").click()
self.app.return_to_home_page()
self.contact_cache = None
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
wd.find_elements_by_xpath("//img[@alt='Edit']")[index].click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.return_to_home_page()
wd.find_elements_by_xpath("//img[@alt='Details']")[index].click()
def contact_count(self):
wd = self.app.wd
self.app.return_to_home_page()
# self.app.return_to_home_page()
#if wd.find_element_by_xpath("//span[@id='search_count']") == 0:
return int(wd.find_element_by_xpath("//span[@id='search_count']").text)
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.return_to_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
last_name = cells[1].text
name = cells[2].text
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(Contact(last_name=last_name, first_name=name, address = address, id=id,
all_emails_from_homepage=all_emails,
all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
homephone = wd.find_element_by_name("home").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email1 = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(first_name=firstname, last_name=lastname, id=id, home_phone_number=homephone,
mobile_phone_number=mobilephone,work_phone_number=workphone, address=address, email_1=email1,
email_2=email2, email_3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text).group(1)
mobilephone = re.search("M: (.*)", text).group(1)
workphone = re.search("W: (.*)", text).group(1)
return Contact(home_phone_number=homephone, mobile_phone_number=mobilephone, work_phone_number=workphone)
def select_group_by_id(self, group_list_name, group_id):
wd = self.app.wd
wd.find_element_by_name(group_list_name).click()
Select(wd.find_element_by_name(group_list_name)).select_by_value(group_id)
def add_contact_in_group(self, group_id, contact_id):
wd = self.app.wd
self.app.return_to_home_page()
self.select_contact_by_id(contact_id)
self.select_group_by_id("to_group", group_id)
wd.find_element_by_name("add").click()
self.app.return_to_home_page()
def del_contact_from_group(self, group_id, contact_id):
| wd = self.app.wd
self.app.return_to_home_page()
self.select_group_by_id("group", group_id)
self.select_contact_by_id(contact_id)
wd.find_element_by_name("remove").click()
self.app.return_to_home_page() |
|
Perceptron.py | import numpy as np
from _data import DataSets
from _math import ActivationFunctions
from _plot import PlotUtils
class Perceptron:
def __init__(self, n, g):
|
def train(self, x, d):
k = len(x)
w = np.random.rand(len(x[0]))
epoch = 0
error = True
while error and epoch < 10000:
error = False
for i in range(0, k):
v = np.dot(np.transpose(w), x[i])
y = self.g(v)
if y != d[i]:
w = np.add(w, np.multiply(self.n * (d[i] - y), x[i]))
error = True
epoch = epoch + 1
print(f"Epoch: {epoch}\tWeights: {w}")
self.plot_data_x.append(epoch)
self.plot_data_y.append(1 if error else 0)
return w
def test(self, w, x):
v = np.dot(np.transpose(w), x)
y = self.g(v)
return y
def evaluate(self, w, x, d):
correct = 0
total = len(x)
for i in range(0, len(x)):
y = self.test(w, x[i])
if (y == d[i]):
correct = correct + 1
accuracy = 100.0 * (float(correct) / float(total))
print(f"Accuracy: {accuracy:.2f}% ({correct}/{total})")
return accuracy
if __name__ == "__main__":
# set random number generator seed
np.random.seed(NUMERO_DE_MATRICULA)
# set floating point formatting when printing
np.set_printoptions(formatter={"float": "{: 0.6f}".format})
# load data
x = DataSets.NOME_DO_DATASET.input
d = DataSets.NOME_DO_DATASET.output
# define the network parameters
n = TAXA_DE_APRENDIZADO
g = ActivationFunctions.FUNCAO_DE_ATIVACAO
# create the neural network
nn = Perceptron(n, g)
# train the neural network
w = nn.train(x, d)
# evaluate the neural network
acc = nn.evaluate(w, x, d)
# plot epoch versus error data
PlotUtils.plot(nn.plot_data_x, "epoch", nn.plot_data_y, "error")
| self.n = n # learning rate
self.g = g # activation function
self.plot_data_x = [] # epochs for plotting
self.plot_data_y = [] # error for plotting
|
test_tsne.py | from cuml.manifold import TSNE
from sklearn.manifold.t_sne import trustworthiness
from sklearn import datasets
import pandas as pd
import numpy as np
import cudf
import pytest
dataset_names = ['digits', 'boston', 'iris', 'breast_cancer',
'diabetes']
@pytest.mark.parametrize('name', dataset_names)
def test_tsne(name): | (1) cuDF DataFrames are passed input
(2) Numpy arrays are passed in
(3) Params are changed in the TSNE class
(4) The class gets re-used across time
(5) Trustworthiness is checked
(6) Tests NAN in TSNE output for learning rate explosions
(7) Tests verbosity
"""
datasets
X = eval("datasets.load_{}".format(name))().data
X_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X))
for i in range(3):
print("iteration = ", i)
tsne = TSNE(2, random_state=i, verbose=0, learning_rate=2+i)
Y = tsne.fit_transform(X_cudf).to_pandas().values
nans = np.sum(np.isnan(Y))
trust = trustworthiness(X, Y)
print("Trust = ", trust)
assert trust > 0.76
assert nans == 0
del Y
# Reuse
Y = tsne.fit_transform(X)
nans = np.sum(np.isnan(Y))
trust = trustworthiness(X, Y)
print("Trust = ", trust)
assert trust > 0.76
assert nans == 0
del Y
# Again
tsne = TSNE(2, random_state=i+2, verbose=1, learning_rate=2+i+2)
Y = tsne.fit_transform(X_cudf).to_pandas().values
nans = np.sum(np.isnan(Y))
trust = trustworthiness(X, Y)
print("Trust = ", trust)
assert trust > 0.76
assert nans == 0
del Y
# Reuse
Y = tsne.fit_transform(X)
nans = np.sum(np.isnan(Y))
trust = trustworthiness(X, Y)
print("Trust = ", trust)
assert trust > 0.76
assert nans == 0
del Y | """
This tests how TSNE handles a lot of input data across time. |
index.ts | export * from './player-form.service'; |
||
count_min_row.rs | //! This mod implements Count Min Row.
//!
//! This file is a mechanical translation of the reference Golang code, available at https://github.com/dgryski/go-tinylfu/blob/master/cm4.go
//!
//! I claim no additional copyright over the original implementation.
use alloc::fmt::format;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use core::fmt::{Debug, Formatter};
use core::ops::{Index, IndexMut};
pub(crate) struct CountMinRow(Vec<u8>);
impl CountMinRow {
pub(crate) fn new(width: u64) -> Self {
Self(vec![0; width as usize])
}
pub(crate) fn get(&self, i: u64) -> u8 {
((self[(i / 2) as usize] >> ((i & 1) * 4)) as u8) & 0x0f
}
pub(crate) fn increment(&mut self, i: u64) {
// Index of the counter
let idx = (i / 2) as usize;
// shift distance (even 0, odd 4).
let shift = (i & 1) * 4;
// counter value
let v = (self[idx] >> shift) & 0x0f;
// only increment if not max value (overflow wrap is bad for LFU).
if v < 15 {
self[idx] += 1 << shift;
}
}
pub(crate) fn reset(&mut self) {
// halve each counter
self.0.iter_mut().for_each(|v| *v = (*v >> 1) & 0x77)
}
pub(crate) fn clear(&mut self) {
// zero each counter
self.0.iter_mut().for_each(|v| *v = 0)
}
}
impl Index<usize> for CountMinRow {
type Output = u8;
fn index(&self, index: usize) -> &Self::Output {
self.0.index(index)
}
}
impl IndexMut<usize> for CountMinRow {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.0.index_mut(index)
}
}
impl Debug for CountMinRow {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
let mut s = String::new();
for i in 0..(self.0.len() * 2) {
s.push_str(&format(format_args!(
"{:02} ",
(self[i / 2] >> ((i & 1) * 4)) & 0x0f
)));
}
write!(f, "{}", s)
}
}
#[cfg(test)]
mod test {
use crate::lfu::tinylfu::sketch::count_min_row::CountMinRow;
#[test]
fn test_count_min_row() {
let mut cmr = CountMinRow::new(8);
cmr.increment(0);
assert_eq!(cmr[0], 0x01);
assert_eq!(cmr.get(0), 1);
assert_eq!(cmr.get(1), 0);
cmr.increment(1);
assert_eq!(cmr[0], 0x11);
assert_eq!(cmr.get(0), 1);
assert_eq!(cmr.get(1), 1);
(0..14).for_each(|_| cmr.increment(1));
assert_eq!(cmr[0], 0xf1); | // ensure clamped
(0..3).for_each(|_| {
cmr.increment(1);
assert_eq!(cmr[0], 0xf1);
});
cmr.reset();
assert_eq!(cmr[0], 0x70);
}
} | assert_eq!(cmr.get(1), 15);
assert_eq!(cmr.get(0), 1);
|
index.ts | import Moveable from "react-moveable";
export default Moveable; | export * from "react-moveable"; |
|
store.go | package websocket
import (
"encoding/json"
"time"
"github.com/peterzernia/lets-fork/utils"
)
func setParty(p Party) error {
rdb := utils.GetRDB()
jsn, err := json.Marshal(p)
if err != nil {
return err
}
rdb.Set("party:"+*p.ID, string(jsn), time.Hour)
if err != nil {
return err
}
return nil
}
func getParty(id string) (*Party, error) {
var party Party
rdb := utils.GetRDB()
p, err := rdb.Get("party:" + id).Result()
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(p), &party)
return &party, err
}
func setUser(u User) error {
rdb := utils.GetRDB()
jsn, err := json.Marshal(u)
if err != nil |
rdb.Set("user:"+*u.ID, string(jsn), time.Hour)
if err != nil {
return err
}
return nil
}
func getUser(id string) (*User, error) {
var user User
rdb := utils.GetRDB()
p, err := rdb.Get("user:" + id).Result()
if err != nil {
return nil, err
}
err = json.Unmarshal([]byte(p), &user)
return &user, err
}
| {
return err
} |
handler.go | package handler
import (
"context"
"fmt"
"free5gc/lib/http_wrapper"
"free5gc/lib/openapi/models"
"free5gc/lib/pfcp"
"free5gc/lib/pfcp/pfcpType"
"free5gc/lib/pfcp/pfcpUdp"
smf_context "free5gc/src/smf/context"
smf_message "free5gc/src/smf/handler/message"
"free5gc/src/smf/logger"
pfcp_message "free5gc/src/smf/pfcp/message"
"free5gc/src/smf/producer"
//"free5gc/src/smf/producer"
"net/http"
)
func HandlePfcpHeartbeatRequest(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Heartbeat Request handling is not implemented")
}
func HandlePfcpHeartbeatResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Heartbeat Response handling is not implemented")
}
func HandlePfcpPfdManagementRequest(msg *pfcpUdp.Message) |
func HandlePfcpPfdManagementResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP PFD Management Response handling is not implemented")
}
func HandlePfcpAssociationSetupRequest(msg *pfcpUdp.Message) {
req := msg.PfcpMessage.Body.(pfcp.PFCPAssociationSetupRequest)
nodeID := req.NodeID
if nodeID == nil {
logger.PfcpLog.Errorln("pfcp association needs NodeID")
return
}
logger.PfcpLog.Infof("Handle PFCP Association Setup Request with NodeID[%s]", nodeID.ResolveNodeIdToIp().String())
upf := smf_context.RetrieveUPFNodeByNodeID(*nodeID)
if upf == nil {
logger.PfcpLog.Errorf("can't find UPF[%s]", nodeID.ResolveNodeIdToIp().String())
return
}
upf.UPIPInfo = *req.UserPlaneIPResourceInformation
// Response with PFCP Association Setup Response
cause := pfcpType.Cause{
CauseValue: pfcpType.CauseRequestAccepted,
}
pfcp_message.SendPfcpAssociationSetupResponse(msg.RemoteAddr, cause)
}
func HandlePfcpAssociationSetupResponse(msg *pfcpUdp.Message) {
req := msg.PfcpMessage.Body.(pfcp.PFCPAssociationSetupResponse)
nodeID := req.NodeID
if req.Cause.CauseValue == pfcpType.CauseRequestAccepted {
if nodeID == nil {
logger.PfcpLog.Errorln("pfcp association needs NodeID")
return
}
upf := smf_context.RetrieveUPFNodeByNodeID(*req.NodeID)
upf.UPFStatus = smf_context.AssociatedSetUpSuccess
if req.UserPlaneIPResourceInformation != nil {
upf.UPIPInfo = *req.UserPlaneIPResourceInformation
logger.PfcpLog.Infof("UPF(%s)[%s] setup association", upf.NodeID.ResolveNodeIdToIp().String(), upf.UPIPInfo.NetworkInstance)
} else {
logger.PfcpLog.Errorln("pfcp association setup response has no UserPlane IP Resource Information")
}
}
}
func HandlePfcpAssociationUpdateRequest(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Association Update Request handling is not implemented")
}
func HandlePfcpAssociationUpdateResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Association Update Response handling is not implemented")
}
// Deprecated: PFCP Association Release Request should be initiated by the CP function
func HandlePfcpAssociationReleaseRequest(msg *pfcpUdp.Message) {
pfcpMsg := msg.PfcpMessage.Body.(pfcp.PFCPAssociationReleaseRequest)
var cause pfcpType.Cause
upfNode := smf_context.RetrieveUPFNodeByNodeID(*pfcpMsg.NodeID)
if upfNode != nil {
smf_context.RemoveUPFNodeByNodeId(*pfcpMsg.NodeID)
cause.CauseValue = pfcpType.CauseRequestAccepted
} else {
cause.CauseValue = pfcpType.CauseNoEstablishedPfcpAssociation
}
pfcp_message.SendPfcpAssociationReleaseResponse(msg.RemoteAddr, cause)
}
func HandlePfcpAssociationReleaseResponse(msg *pfcpUdp.Message) {
pfcpMsg := msg.PfcpMessage.Body.(pfcp.PFCPAssociationReleaseResponse)
if pfcpMsg.Cause.CauseValue == pfcpType.CauseRequestAccepted {
smf_context.RemoveUPFNodeByNodeId(*pfcpMsg.NodeID)
}
}
func HandlePfcpVersionNotSupportedResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Version Not Support Response handling is not implemented")
}
func HandlePfcpNodeReportRequest(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Node Report Request handling is not implemented")
}
func HandlePfcpNodeReportResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Node Report Response handling is not implemented")
}
func HandlePfcpSessionSetDeletionRequest(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Session Set Deletion Request handling is not implemented")
}
func HandlePfcpSessionSetDeletionResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Session Set Deletion Response handling is not implemented")
}
func HandlePfcpSessionEstablishmentResponse(msg *pfcpUdp.Message) {
rsp := msg.PfcpMessage.Body.(pfcp.PFCPSessionEstablishmentResponse)
logger.PfcpLog.Infoln("In HandlePfcpSessionEstablishmentResponse")
SEID := msg.PfcpMessage.Header.SEID
smContext := smf_context.GetSMContextBySEID(SEID)
if rsp.UPFSEID != nil {
NodeIDtoIP := rsp.NodeID.ResolveNodeIdToIp().String()
pfcpSessionCtx := smContext.PFCPContext[NodeIDtoIP]
pfcpSessionCtx.RemoteSEID = rsp.UPFSEID.Seid
}
ANUPF := smContext.Tunnel.DataPathPool.GetDefaultPath().FirstDPNode
if rsp.Cause.CauseValue == pfcpType.CauseRequestAccepted && ANUPF.UPF.NodeID.ResolveNodeIdToIp().Equal(rsp.NodeID.ResolveNodeIdToIp()) {
smNasBuf, _ := smf_context.BuildGSMPDUSessionEstablishmentAccept(smContext)
n2Pdu, _ := smf_context.BuildPDUSessionResourceSetupRequestTransfer(smContext)
n1n2Request := models.N1N2MessageTransferRequest{}
n1n2Request.JsonData = &models.N1N2MessageTransferReqData{
PduSessionId: smContext.PDUSessionID,
N1MessageContainer: &models.N1MessageContainer{
N1MessageClass: "SM",
N1MessageContent: &models.RefToBinaryData{ContentId: "GSM_NAS"},
},
N2InfoContainer: &models.N2InfoContainer{
N2InformationClass: models.N2InformationClass_SM,
SmInfo: &models.N2SmInformation{
PduSessionId: smContext.PDUSessionID,
N2InfoContent: &models.N2InfoContent{
NgapIeType: models.NgapIeType_PDU_RES_SETUP_REQ,
NgapData: &models.RefToBinaryData{
ContentId: "N2SmInformation",
},
},
SNssai: smContext.Snssai,
},
},
}
n1n2Request.BinaryDataN1Message = smNasBuf
n1n2Request.BinaryDataN2Information = n2Pdu
rspData, _, err := smContext.CommunicationClient.N1N2MessageCollectionDocumentApi.N1N2MessageTransfer(context.Background(), smContext.Supi, n1n2Request)
if err != nil {
logger.PfcpLog.Warnf("Send N1N2Transfer failed")
}
if rspData.Cause == models.N1N2MessageTransferCause_N1_MSG_NOT_TRANSFERRED {
logger.PfcpLog.Warnf("%v", rspData.Cause)
}
}
if smf_context.SMF_Self().ULCLSupport && smContext.BPManager != nil {
if smContext.BPManager.BPStatus == smf_context.AddingPSA {
logger.PfcpLog.Infoln("Keep Adding PSAndULCL")
producer.AddPDUSessionAnchorAndULCL(smContext, *rsp.NodeID)
smContext.BPManager.BPStatus = smf_context.AddingPSA
}
}
}
func HandlePfcpSessionModificationResponse(msg *pfcpUdp.Message) {
pfcpRsp := msg.PfcpMessage.Body.(pfcp.PFCPSessionModificationResponse)
SEID := msg.PfcpMessage.Header.SEID
seqNum := msg.PfcpMessage.Header.SequenceNumber
smContext := smf_context.GetSMContextBySEID(SEID)
logger.PfcpLog.Infoln("In HandlePfcpSessionModificationResponse")
HttpResponseQueue := smf_message.RspQueue
if smf_context.SMF_Self().ULCLSupport && smContext.BPManager != nil {
if smContext.BPManager.BPStatus == smf_context.AddingPSA {
logger.PfcpLog.Infoln("Keep Adding PSAAndULCL")
upfNodeID := smContext.GetNodeIDByLocalSEID(SEID)
producer.AddPDUSessionAnchorAndULCL(smContext, upfNodeID)
}
}
if HttpResponseQueue.CheckItemExist(seqNum) {
if pfcpRsp.Cause.CauseValue == pfcpType.CauseRequestAccepted {
resQueueItem := HttpResponseQueue.GetItem(seqNum)
logger.PduSessLog.Infoln("[SMF] Send Update SMContext Response")
resQueueItem.RspChan <- smf_message.HandlerResponseMessage{HTTPResponse: &resQueueItem.Response}
if smf_context.SMF_Self().ULCLSupport && smContext.BPManager != nil {
if smContext.BPManager.BPStatus == smf_context.UnInitialized {
logger.PfcpLog.Infoln("Add PSAAndULCL")
upfNodeID := smContext.GetNodeIDByLocalSEID(SEID)
producer.AddPDUSessionAnchorAndULCL(smContext, upfNodeID)
smContext.BPManager.BPStatus = smf_context.AddingPSA
}
}
HttpResponseQueue.DeleteItem(seqNum)
//if smContext.SMState == smf_context.PDUSessionInactive {
// smNasBuf, _ := smf_context.BuildGSMPDUSessionEstablishmentAccept(smContext)
// n1n2Request := models.N1N2MessageTransferRequest{}
// n1n2Request.JsonData = &models.N1N2MessageTransferReqData{
// N1MessageContainer: &models.N1MessageContainer{
// N1MessageClass: "SM",
// N1MessageContent: &models.RefToBinaryData{ContentId: "GSM_NAS"},
// },
// }
// n1n2Request.BinaryDataN1Message = smNasBuf
// logger.PfcpLog.Warnf("N1N2 Transfer")
//rspData, _, err := smContext.CommunicationClient.N1N2MessageCollectionDocumentApi.N1N2MessageTransfer(context.Background(), smContext.Supi, n1n2Request)
//if err != nil {
// logger.PfcpLog.Warnf("Send N1N2Transfer failed")
// }
// if rspData.Cause == models.N1N2MessageTransferCause_N1_MSG_NOT_TRANSFERRED {
// logger.PfcpLog.Warnf("%v", rspData.Cause)
// }
// smContext.SMState = smf_context.PDUSessionActive
// }
logger.PfcpLog.Infof("PFCP Session Modification Success[%d]\n", SEID)
} else {
logger.PfcpLog.Infof("PFCP Session Modification Failed[%d]\n", SEID)
}
}
logger.CtxLog.Traceln("PFCP Session Context")
for _, ctx := range smContext.PFCPContext {
logger.CtxLog.Traceln(ctx.ToString())
}
}
func HandlePfcpSessionDeletionResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Infof("Handle PFCP Session Deletion Response")
pfcpRsp := msg.PfcpMessage.Body.(pfcp.PFCPSessionDeletionResponse)
SEID := msg.PfcpMessage.Header.SEID
seqNum := msg.PfcpMessage.Header.SequenceNumber
HttpResponseQueue := smf_message.RspQueue
smContext := smf_context.GetSMContextBySEID(SEID)
if HttpResponseQueue.CheckItemExist(seqNum) {
resQueueItem := HttpResponseQueue.GetItem(seqNum)
if pfcpRsp.Cause.CauseValue == pfcpType.CauseRequestAccepted {
if smContext == nil {
logger.PfcpLog.Warnf("PFCP Session Deletion Response Found SM Context NULL, Request Rejected")
// TODO fix: SEID should be the value sent by UPF but now the SEID value is from sm context
} else {
resQueueItem.RspChan <- smf_message.HandlerResponseMessage{HTTPResponse: &resQueueItem.Response}
HttpResponseQueue.DeleteItem(seqNum)
logger.PfcpLog.Infof("PFCP Session Deletion Success[%d]\n", SEID)
return
}
}
problemDetail := models.ProblemDetails{
Status: http.StatusInternalServerError,
Cause: "SYSTEM_FAILULE",
}
response := http_wrapper.Response{
Status: int(problemDetail.Status),
}
if resQueueItem.Response.Status == http.StatusOK {
// Update SmContext Request(N1 PDU Session Release Request)
// Send PDU Session Release Reject
errResponse := models.UpdateSmContextErrorResponse{
JsonData: &models.SmContextUpdateError{
Error: &problemDetail,
},
}
buf, _ := smf_context.BuildGSMPDUSessionReleaseReject(smContext)
errResponse.BinaryDataN1SmMessage = buf
errResponse.JsonData.N1SmMsg = &models.RefToBinaryData{ContentId: "PDUSessionReleaseReject"}
response.Body = errResponse
} else {
// Release SmContext Request
response.Body = problemDetail
}
resQueueItem.RspChan <- smf_message.HandlerResponseMessage{HTTPResponse: &response}
logger.PfcpLog.Infof("PFCP Session Deletion Failed[%d]\n", SEID)
} else {
logger.PfcpLog.Infof("[PFCP Deletion RSP] Can't find corresponding seq num[%d]\n", seqNum)
}
}
func HandlePfcpSessionReportRequest(msg *pfcpUdp.Message) {
req := msg.PfcpMessage.Body.(pfcp.PFCPSessionReportRequest)
SEID := msg.PfcpMessage.Header.SEID
smContext := smf_context.GetSMContextBySEID(SEID)
seqFromUPF := msg.PfcpMessage.Header.SequenceNumber
var cause pfcpType.Cause
if smContext == nil {
logger.PfcpLog.Warnf("PFCP Session Report Request Found SM Context NULL, Request Rejected")
cause.CauseValue = pfcpType.CauseRequestRejected
// TODO fix: SEID should be the value sent by UPF but now the SEID value is from sm context
pfcp_message.SendPfcpSessionReportResponse(msg.RemoteAddr, cause, seqFromUPF, SEID)
return
}
if req.ReportType.Dldr {
downlinkDataReport := req.DownlinkDataReport
pdrID := downlinkDataReport.PDRID.RuleId
if downlinkDataReport.DownlinkDataServiceInformation != nil {
logger.PfcpLog.Warnf("PFCP Session Report Request DownlinkDataServiceInformation handling is not implemented")
}
ANUPF := smContext.Tunnel.DataPathPool.GetDefaultPath().FirstDPNode
DLPDR := ANUPF.DownLinkTunnel.PDR
if DLPDR.PDRID == pdrID {
// TS 23.502 4.2.3.3 2b. Send Data Notification Ack, SMF->UPF
cause.CauseValue = pfcpType.CauseRequestAccepted
// TODO fix: SEID should be the value sent by UPF but now the SEID value is from sm context
pfcp_message.SendPfcpSessionReportResponse(msg.RemoteAddr, cause, seqFromUPF, SEID)
// TS 23.502 4.2.3.3 3a. Send Namf_Communication_N1N2MessageTransfer Request, SMF->AMF
n2SmBuf, _ := smf_context.BuildPDUSessionResourceSetupRequestTransfer(smContext)
n1n2Request := models.N1N2MessageTransferRequest{}
n1n2Request.JsonData = &models.N1N2MessageTransferReqData{
PduSessionId: smContext.PDUSessionID,
// Temporarily assign SMF itself, TODO: TS 23.502 4.2.3.3 5. Namf_Communication_N1N2TransferFailureNotification
N1n2FailureTxfNotifURI: fmt.Sprintf("%s://%s:%d", smf_context.SMF_Self().URIScheme, smf_context.SMF_Self().HTTPAddress, smf_context.SMF_Self().HTTPPort),
N2InfoContainer: &models.N2InfoContainer{
N2InformationClass: models.N2InformationClass_SM,
SmInfo: &models.N2SmInformation{
PduSessionId: smContext.PDUSessionID,
N2InfoContent: &models.N2InfoContent{
NgapIeType: models.NgapIeType_PDU_RES_SETUP_REQ,
NgapData: &models.RefToBinaryData{
ContentId: "N2SmInformation",
},
},
SNssai: smContext.Snssai,
},
},
}
n1n2Request.BinaryDataN2Information = n2SmBuf
rspData, _, err := smContext.CommunicationClient.N1N2MessageCollectionDocumentApi.N1N2MessageTransfer(context.Background(), smContext.Supi, n1n2Request)
if err != nil {
logger.PfcpLog.Warnf("Send N1N2Transfer failed")
}
if rspData.Cause == models.N1N2MessageTransferCause_ATTEMPTING_TO_REACH_UE {
logger.PfcpLog.Infof("Receive %v, AMF is able to page the UE", rspData.Cause)
}
if rspData.Cause == models.N1N2MessageTransferCause_UE_NOT_RESPONDING {
logger.PfcpLog.Warnf("%v", rspData.Cause)
// TODO: TS 23.502 4.2.3.3 3c. Failure indication
}
}
}
}
func HandlePfcpSessionReportResponse(msg *pfcpUdp.Message) {
logger.PfcpLog.Warnf("PFCP Session Report Response handling is not implemented")
}
| {
logger.PfcpLog.Warnf("PFCP PFD Management Request handling is not implemented")
} |
WorkspaceEngine.ts | import {WorkspacePanelFactory} from "./WorkspacePanelFactory";
import {WorkspacePanelModel} from "./models/WorkspacePanelModel";
import {AbstractWorkspaceModel} from "./models/AbstractWorkspaceModel";
export class | {
factories: { [type: string]: WorkspacePanelFactory };
listeners: {[id: string]: () => any};
draggingNode: AbstractWorkspaceModel;
fullscreenModel: WorkspacePanelModel;
constructor() {
this.factories = {};
this.listeners = {};
this.draggingNode = null;
this.fullscreenModel = null;
}
setFullscreenModel(model: WorkspacePanelModel | null) {
this.fullscreenModel = model;
this.fireRepaintListeners();
}
registerRepaintListener(listener: () => any): string{
let id = WorkspaceEngine.generateID();
this.listeners[id] = listener;
return id;
}
removeRepaintListener(id: string){
delete this.listeners[id];
}
fireRepaintListeners(){
for(let i in this.listeners){
this.listeners[i]();
}
}
registerFactory(factory: WorkspacePanelFactory) {
this.factories[factory.type] = factory;
}
getFactory(model: WorkspacePanelModel) : WorkspacePanelFactory {
if (!this.factories[model.factory]) {
throw "Cannot find Workspace factory for model with type: [" + model.factory + "]";
}
return this.factories[model.factory];
}
setDraggingNode(node: AbstractWorkspaceModel | null){
this.draggingNode = node;
this.fireRepaintListeners();
}
static generateID(){
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = Math.random() * 16 | 0, v = c == 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}
}
| WorkspaceEngine |
fakex.go | // Package fakex provides extensions for counterfeiter fakes.
package fakex
import (
"reflect"
"strings"
"testing"
)
// StubNotImplemented stubs out unset stubs in the provided fake with functions
// that will fail the test if called. This is useful to ensure that no function
// other than the expected ones are called, without manually updating things when
// the interface changes.
//
// Do not use this function if the fake's "Returns" feature is used or the default
// zero return is wanted, as the stub will take precedence.
//
// If passed bad arguments, this function may panic.
func StubNotImplemented(t testing.TB, fake interface{}) | {
fakeValue := reflect.ValueOf(fake)
fakeName := fakeValue.Type().String()
fakeValue = fakeValue.Elem()
fakeType := fakeValue.Type()
for i := 0; i < fakeValue.NumField(); i++ {
f := fakeType.Field(i)
if f.Type.Kind() != reflect.Func || !strings.HasSuffix(f.Name, "Stub") {
continue
}
fv := fakeValue.Field(i)
if !fv.IsZero() {
continue
}
out := make([]reflect.Value, f.Type.NumOut())
for j := range out {
out[j] = reflect.Zero(f.Type.Out(j))
}
fn := reflect.MakeFunc(f.Type, func(args []reflect.Value) (results []reflect.Value) {
t.Helper()
t.Fatalf("(%s).%s not implemented", fakeName, f.Name)
return out
})
fv.Set(fn)
}
} |
|
service_util.py | """
Utilities and base functions for Services.
"""
import abc
import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
from pydantic import validator
from qcelemental.models import ComputeError
from ..interface.models import ObjectId, ProtoModel
from ..interface.models.rest_models import TaskQueuePOSTBody
from ..interface.models.task_models import PriorityEnum
from ..procedures import get_procedure_parser
class TaskManager(ProtoModel):
storage_socket: Optional[Any] = None
logger: Optional[Any] = None
required_tasks: Dict[str, str] = {}
tag: Optional[str] = None
priority: PriorityEnum = PriorityEnum.HIGH
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def done(self) -> bool:
"""
Check if requested tasks are complete.
"""
if len(self.required_tasks) == 0:
return True
task_query = self.storage_socket.get_procedures(
id=list(self.required_tasks.values()), include=["status", "error"]
)
status_values = set(x["status"] for x in task_query["data"])
if status_values == {"COMPLETE"}:
return True
elif "ERROR" in status_values:
for x in task_query["data"]:
if x["status"] != "ERROR":
continue
self.logger.error("Error in service compute as follows:")
tasks = self.storage_socket.get_queue()["data"]
for x in tasks:
if "error" not in x:
continue
self.logger.error(x["error"]["error_message"])
raise KeyError("All tasks did not execute successfully.")
else:
return False
def get_tasks(self) -> Dict[str, Any]:
"""
Pulls currently held tasks.
"""
ret = {}
for k, id in self.required_tasks.items():
ret[k] = self.storage_socket.get_procedures(id=id)["data"][0]
return ret
def submit_tasks(self, procedure_type: str, tasks: Dict[str, Any]) -> bool:
|
class BaseService(ProtoModel, abc.ABC):
# Excluded fields
storage_socket: Optional[Any]
logger: Optional[Any]
# Base identification
id: Optional[ObjectId] = None
hash_index: str
service: str
program: str
procedure: str
# Output data
output: Any
# Links
task_id: Optional[ObjectId] = None
procedure_id: Optional[ObjectId] = None
# Task manager
task_tag: Optional[str] = None
task_priority: PriorityEnum
task_manager: TaskManager = TaskManager()
status: str = "WAITING"
error: Optional[ComputeError] = None
tag: Optional[str] = None
# Sorting and priority
priority: PriorityEnum = PriorityEnum.NORMAL
modified_on: datetime.datetime = None
created_on: datetime.datetime = None
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def __init__(self, **data):
dt = datetime.datetime.utcnow()
data.setdefault("modified_on", dt)
data.setdefault("created_on", dt)
super().__init__(**data)
self.task_manager.logger = self.logger
self.task_manager.storage_socket = self.storage_socket
self.task_manager.tag = self.task_tag
self.task_manager.priority = self.task_priority
@validator("task_priority", pre=True)
def munge_priority(cls, v):
if isinstance(v, str):
v = PriorityEnum[v.upper()]
elif v is None:
v = PriorityEnum.HIGH
return v
@classmethod
@abc.abstractmethod
def initialize_from_api(cls, storage_socket, meta, molecule, tag=None, priority=None):
"""
Initalizes a Service from the API.
"""
@abc.abstractmethod
def iterate(self):
"""
Takes a "step" of the service. Should return False if not finished.
"""
def expand_ndimensional_grid(
dimensions: Tuple[int, ...], seeds: Set[Tuple[int, ...]], complete: Set[Tuple[int, ...]]
) -> List[Tuple[Tuple[int, ...], Tuple[int, ...]]]:
"""
Expands an n-dimensional key/value grid.
Example
-------
>>> expand_ndimensional_grid((3, 3), {(1, 1)}, set())
[((1, 1), (0, 1)), ((1, 1), (2, 1)), ((1, 1), (1, 0)), ((1, 1), (1, 2))]
"""
dimensions = tuple(dimensions)
compute = set()
connections = []
for d in range(len(dimensions)):
# Loop over all compute seeds
for seed in seeds:
# Iterate both directions
for disp in [-1, 1]:
new_dim = seed[d] + disp
# Bound check
if new_dim >= dimensions[d]:
continue
if new_dim < 0:
continue
new = list(seed)
new[d] = new_dim
new = tuple(new)
# Push out duplicates from both new compute and copmlete
if new in compute:
continue
if new in complete:
continue
compute |= {new}
connections.append((seed, new))
return connections
| """
Submits new tasks to the queue and provides a waiter until there are done.
"""
procedure_parser = get_procedure_parser(procedure_type, self.storage_socket, self.logger)
required_tasks = {}
# Add in all new tasks
for key, packet in tasks.items():
packet["meta"].update({"tag": self.tag, "priority": self.priority})
# print("Check tag and priority:", packet)
packet = TaskQueuePOSTBody(**packet)
# Turn packet into a full task, if there are duplicates, get the ID
r = procedure_parser.submit_tasks(packet)
if len(r["meta"]["errors"]):
raise KeyError("Problem submitting task: {}.".format(errors))
# print("Submission:", r["data"])
required_tasks[key] = r["data"]["ids"][0]
self.required_tasks = required_tasks
return True |
validation.rs | //! FIXME: write short doc here
mod block;
use rustc_lexer::unescape;
use crate::{
ast, match_ast, AstNode, SyntaxError,
SyntaxKind::{BYTE, BYTE_STRING, CHAR, CONST_DEF, FN_DEF, INT_NUMBER, STRING, TYPE_ALIAS_DEF},
SyntaxNode, SyntaxToken, TextUnit, T,
};
fn rustc_unescape_error_to_string(err: unescape::EscapeError) -> &'static str {
use unescape::EscapeError as EE;
#[rustfmt::skip]
let err_message = match err {
EE::ZeroChars => {
"Literal must not be empty"
}
EE::MoreThanOneChar => {
"Literal must be one character long"
}
EE::LoneSlash => {
"Character must be escaped: `\\`"
}
EE::InvalidEscape => {
"Invalid escape"
}
EE::BareCarriageReturn | EE::BareCarriageReturnInRawString => {
"Character must be escaped: `\r`"
}
EE::EscapeOnlyChar => {
"Escape character `\\` must be escaped itself"
}
EE::TooShortHexEscape => {
"ASCII hex escape code must have exactly two digits"
}
EE::InvalidCharInHexEscape => {
"ASCII hex escape code must contain only hex characters"
}
EE::OutOfRangeHexEscape => {
"ASCII hex escape code must be at most 0x7F"
}
EE::NoBraceInUnicodeEscape => {
"Missing `{` to begin the unicode escape"
}
EE::InvalidCharInUnicodeEscape => {
"Unicode escape must contain only hex characters and underscores"
}
EE::EmptyUnicodeEscape => {
"Unicode escape must not be empty"
}
EE::UnclosedUnicodeEscape => {
"Missing '}' to terminate the unicode escape"
}
EE::LeadingUnderscoreUnicodeEscape => {
"Unicode escape code must not begin with an underscore"
}
EE::OverlongUnicodeEscape => {
"Unicode escape code must have at most 6 digits"
}
EE::LoneSurrogateUnicodeEscape => {
"Unicode escape code must not be a surrogate"
}
EE::OutOfRangeUnicodeEscape => {
"Unicode escape code must be at most 0x10FFFF"
}
EE::UnicodeEscapeInByte => {
"Byte literals must not contain unicode escapes"
}
EE::NonAsciiCharInByte | EE::NonAsciiCharInByteString => {
"Byte literals must not contain non-ASCII characters"
}
};
err_message
}
pub(crate) fn validate(root: &SyntaxNode) -> Vec<SyntaxError> {
// FIXME:
// * Add validation of character literal containing only a single char
// * Add validation of `crate` keyword not appearing in the middle of the symbol path
// * Add validation of doc comments are being attached to nodes
// * Remove validation of unterminated literals (it is already implemented in `tokenize()`)
let mut errors = Vec::new();
for node in root.descendants() {
match_ast! {
match node {
ast::Literal(it) => { validate_literal(it, &mut errors) },
ast::BlockExpr(it) => { block::validate_block_expr(it, &mut errors) },
ast::FieldExpr(it) => { validate_numeric_name(it.name_ref(), &mut errors) },
ast::RecordField(it) => { validate_numeric_name(it.name_ref(), &mut errors) },
ast::Visibility(it) => { validate_visibility(it, &mut errors) },
ast::RangeExpr(it) => { validate_range_expr(it, &mut errors) },
_ => (),
}
}
}
errors
}
fn validate_literal(literal: ast::Literal, acc: &mut Vec<SyntaxError>) {
// FIXME: move this function to outer scope (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366196658)
fn unquote(text: &str, prefix_len: usize, end_delimiter: char) -> Option<&str> {
text.rfind(end_delimiter).and_then(|end| text.get(prefix_len..end))
}
let token = literal.token();
let text = token.text().as_str();
// FIXME: lift this lambda refactor to `fn` (https://github.com/rust-analyzer/rust-analyzer/pull/2834#discussion_r366199205)
let mut push_err = |prefix_len, (off, err): (usize, unescape::EscapeError)| {
let off = token.text_range().start() + TextUnit::from_usize(off + prefix_len);
acc.push(SyntaxError::new_at_offset(rustc_unescape_error_to_string(err), off));
};
match token.kind() {
BYTE => {
if let Some(Err(e)) = unquote(text, 2, '\'').map(unescape::unescape_byte) {
push_err(2, e);
}
}
CHAR => {
if let Some(Err(e)) = unquote(text, 1, '\'').map(unescape::unescape_char) {
push_err(1, e);
}
}
BYTE_STRING => {
if let Some(without_quotes) = unquote(text, 2, '"') {
unescape::unescape_byte_str(without_quotes, &mut |range, char| {
if let Err(err) = char {
push_err(2, (range.start, err));
}
})
}
}
STRING => {
if let Some(without_quotes) = unquote(text, 1, '"') {
unescape::unescape_str(without_quotes, &mut |range, char| {
if let Err(err) = char {
push_err(1, (range.start, err));
}
})
}
}
_ => (),
}
}
pub(crate) fn validate_block_structure(root: &SyntaxNode) {
let mut stack = Vec::new();
for node in root.descendants() {
match node.kind() {
T!['{'] => stack.push(node),
T!['}'] => {
if let Some(pair) = stack.pop() {
assert_eq!(
node.parent(),
pair.parent(),
"\nunpaired curleys:\n{}\n{:#?}\n",
root.text(),
root,
);
assert!(
node.next_sibling().is_none() && pair.prev_sibling().is_none(),
"\nfloating curlys at {:?}\nfile:\n{}\nerror:\n{}\n",
node,
root.text(),
node.text(),
);
}
}
_ => (),
}
}
}
fn validate_numeric_name(name_ref: Option<ast::NameRef>, errors: &mut Vec<SyntaxError>) {
if let Some(int_token) = int_token(name_ref) {
if int_token.text().chars().any(|c| !c.is_digit(10)) {
errors.push(SyntaxError::new(
"Tuple (struct) field access is only allowed through \
decimal integers with no underscores or suffix",
int_token.text_range(),
));
}
}
fn int_token(name_ref: Option<ast::NameRef>) -> Option<SyntaxToken> |
}
fn validate_visibility(vis: ast::Visibility, errors: &mut Vec<SyntaxError>) {
let parent = match vis.syntax().parent() {
Some(it) => it,
None => return,
};
match parent.kind() {
FN_DEF | CONST_DEF | TYPE_ALIAS_DEF => (),
_ => return,
}
let impl_def = match parent.parent().and_then(|it| it.parent()).and_then(ast::ImplDef::cast) {
Some(it) => it,
None => return,
};
if impl_def.target_trait().is_some() {
errors.push(SyntaxError::new("Unnecessary visibility qualifier", vis.syntax.text_range()));
}
}
fn validate_range_expr(expr: ast::RangeExpr, errors: &mut Vec<SyntaxError>) {
if expr.op_kind() == Some(ast::RangeOp::Inclusive) && expr.end().is_none() {
errors.push(SyntaxError::new(
"An inclusive range must have an end expression",
expr.syntax().text_range(),
));
}
}
| {
name_ref?.syntax().first_child_or_token()?.into_token().filter(|it| it.kind() == INT_NUMBER)
} |
map.js | /**
* echarts图表类:地图
*
* @desc echarts基于Canvas,纯Javascript图表库,提供直观,生动,可交互,可个性化定制的数据统计图表。
* @author Kener (@Kener-林峰, [email protected])
*
*/
define(function (require) {
var ChartBase = require('./base');
// 图形依赖
var TextShape = require('../../zrender/shape/Text');
var PathShape = require('../../zrender/shape/Path');
var CircleShape = require('../../zrender/shape/Circle');
var RectangleShape = require('../../zrender/shape/Rectangle');
var LineShape = require('../../zrender/shape/Line');
var PolygonShape = require('../../zrender/shape/Polygon');
var EllipseShape = require('../../zrender/shape/Ellipse');
// 组件依赖
require('../component/dataRange');
require('../component/roamController');
var ecConfig = require('../config');
// 地图默认参数
ecConfig.map = {
zlevel: 0, // 一级层叠
z: 2, // 二级层叠
mapType: 'china', // 各省的mapType暂时都用中文
//mapLocation: {
// x: 'center' | 'left' | 'right' | 'x%' | {number},
// y: 'center' | 'top' | 'bottom' | 'x%' | {number}
// width // 自适应
// height // 自适应
//},
// mapValueCalculation: 'sum', // 数值合并方式,默认加和,可选为:
// 'sum' | 'average' | 'max' | 'min'
mapValuePrecision: 0, // 地图数值计算结果小数精度
showLegendSymbol: true, // 显示图例颜色标识(系列标识的小圆点),存在legend时生效
// selectedMode: false, // 选择模式,默认关闭,可选single,multiple
dataRangeHoverLink: true,
hoverable: true,
clickable: true,
// roam: false, // 是否开启缩放及漫游模式
// scaleLimit: null,
itemStyle: {
normal: {
// color: 各异,
borderColor: 'rgba(0,0,0,0)',
borderWidth: 1,
areaStyle: {
color: '#ccc'
},
label: {
show: false,
textStyle: {
color: 'rgb(139,69,19)'
}
}
},
emphasis: { // 也是选中样式
// color: 各异,
borderColor: 'rgba(0,0,0,0)',
borderWidth: 1,
areaStyle: {
color: 'rgba(255,215,0,0.8)'
},
label: {
show: false,
textStyle: {
color: 'rgb(100,0,0)'
}
}
}
}
};
var ecData = require('../util/ecData');
var zrUtil = require('../../zrender/tool/util');
var zrConfig = require('../../zrender/config');
var zrEvent = require('../../zrender/tool/event');
var _mapParams = require('../util/mapData/params').params;
var _textFixed = require('../util/mapData/textFixed');
var _geoCoord = require('../util/mapData/geoCoord');
/**
* 构造函数
* @param {Object} messageCenter echart消息中心
* @param {ZRender} zr zrender实例
* @param {Object} series 数据
* @param {Object} component 组件
*/
function Map(ecTheme, messageCenter, zr, option, myChart){
// 图表基类
ChartBase.call(this, ecTheme, messageCenter, zr, option, myChart);
var self = this;
self._onmousewheel = function(params) {
return self.__onmousewheel(params);
};
self._onmousedown = function(params) {
return self.__onmousedown(params);
};
self._onmousemove = function(params) {
return self.__onmousemove(params);
};
self._onmouseup = function(params) {
return self.__onmouseup(params);
};
self._onroamcontroller = function(params) {
return self.__onroamcontroller(params);
};
self._ondrhoverlink = function(params) {
return self.__ondrhoverlink(params);
};
this._isAlive = true; // 活着标记
this._selectedMode = {}; // 选择模式
this._activeMapType = {}; // 当前活跃的地图类型
this._clickable = {}; // 悬浮高亮模式,索引到图表
this._hoverable = {}; // 悬浮高亮模式,索引到图表
this._showLegendSymbol = {}; // 显示图例颜色标识
this._selected = {}; // 地图选择状态
this._mapTypeMap = {}; // 图例类型索引
this._mapDataMap = {}; // 根据地图类型索引bbox,transform,path
this._nameMap = {}; // 个性化地名
this._specialArea = {}; // 特殊
this._refreshDelayTicket; // 滚轮缩放时让refresh飞一会
this._mapDataRequireCounter; // 异步回调计数器
this._markAnimation = false;
this._hoverLinkMap = {};
// 漫游相关信息
this._roamMap = {};
this._scaleLimitMap = {};
this._mx;
this._my;
this._mousedown;
this._justMove; // 避免移动响应点击
this._curMapType; // 当前移动的地图类型
this.refresh(option);
this.zr.on(zrConfig.EVENT.MOUSEWHEEL, this._onmousewheel);
this.zr.on(zrConfig.EVENT.MOUSEDOWN, this._onmousedown);
messageCenter.bind(ecConfig.EVENT.ROAMCONTROLLER, this._onroamcontroller);
messageCenter.bind(ecConfig.EVENT.DATA_RANGE_HOVERLINK, this._ondrhoverlink);
}
Map.prototype = {
type : ecConfig.CHART_TYPE_MAP,
/**
* 绘制图形
*/
_buildShape : function () {
var series = this.series;
this.selectedMap = {}; // 系列
this._activeMapType = {}; // 当前活跃的地图类型
var legend = this.component.legend;
var seriesName;
var valueData = {};
var mapType;
var data;
var name;
var mapSeries = {};
var mapValuePrecision = {};
var valueCalculation = {};
for (var i = 0, l = series.length; i < l; i++) {
if (series[i].type == ecConfig.CHART_TYPE_MAP) { // map
series[i] = this.reformOption(series[i]);
mapType = series[i].mapType;
mapSeries[mapType] = mapSeries[mapType] || {};
mapSeries[mapType][i] = true;
mapValuePrecision[mapType] = mapValuePrecision[mapType]
|| series[i].mapValuePrecision;
this._scaleLimitMap[mapType] = this._scaleLimitMap[mapType] || {};
series[i].scaleLimit
&& zrUtil.merge(this._scaleLimitMap[mapType], series[i].scaleLimit, true);
this._roamMap[mapType] = series[i].roam || this._roamMap[mapType];
if (this._hoverLinkMap[mapType] == null || this._hoverLinkMap[mapType]) {
// false 1票否决
this._hoverLinkMap[mapType] = series[i].dataRangeHoverLink;
}
this._nameMap[mapType] = this._nameMap[mapType] || {};
series[i].nameMap
&& zrUtil.merge(this._nameMap[mapType], series[i].nameMap, true);
this._activeMapType[mapType] = true;
if (series[i].textFixed) {
zrUtil.merge(
_textFixed, series[i].textFixed, true
);
}
if (series[i].geoCoord) {
zrUtil.merge(
_geoCoord, series[i].geoCoord, true
);
}
this._selectedMode[mapType] = this._selectedMode[mapType]
|| series[i].selectedMode;
if (this._hoverable[mapType] == null || this._hoverable[mapType]) {
// false 1票否决
this._hoverable[mapType] = series[i].hoverable;
}
if (this._clickable[mapType] == null || this._clickable[mapType]) {
// false 1票否决
this._clickable[mapType] = series[i].clickable;
}
if (this._showLegendSymbol[mapType] == null
|| this._showLegendSymbol[mapType]
) {
// false 1票否决
this._showLegendSymbol[mapType] = series[i].showLegendSymbol;
}
valueCalculation[mapType] = valueCalculation[mapType]
|| series[i].mapValueCalculation;
seriesName = series[i].name;
this.selectedMap[seriesName] = legend
? legend.isSelected(seriesName)
: true;
if (this.selectedMap[seriesName]) {
valueData[mapType] = valueData[mapType] || {};
data = series[i].data;
for (var j = 0, k = data.length; j < k; j++) {
name = this._nameChange(mapType, data[j].name);
valueData[mapType][name] = valueData[mapType][name]
|| {
seriesIndex : [],
valueMap: {}
};
for (var key in data[j]) {
if (key != 'value') {
valueData[mapType][name][key] =
data[j][key];
}
else if (!isNaN(data[j].value)) {
// value
valueData[mapType][name].value == null
&& (valueData[mapType][name].value = 0);
valueData[mapType][name].value += (+data[j].value);
valueData[mapType][name].valueMap[i] = +data[j].value;
}
}
//索引有该区域的系列样式
valueData[mapType][name].seriesIndex.push(i);
}
}
}
}
this._mapDataRequireCounter = 0;
for (var mt in valueData) {
this._mapDataRequireCounter++;
}
//清空
this._clearSelected();
if (this._mapDataRequireCounter === 0) {
this.clear();
this.zr && this.zr.delShape(this.lastShapeList);
this.lastShapeList = [];
}
for (var mt in valueData) {
if (valueCalculation[mt] && valueCalculation[mt] == 'average') {
for (var k in valueData[mt]) {
valueData[mt][k].value =
(valueData[mt][k].value / valueData[mt][k].seriesIndex.length)
.toFixed(
mapValuePrecision[mt]
) - 0;
}
}
this._mapDataMap[mt] = this._mapDataMap[mt] || {};
if (this._mapDataMap[mt].mapData) {
// 已经缓存了则直接用
this._mapDataCallback(mt, valueData[mt], mapSeries[mt])(
this._mapDataMap[mt].mapData
);
}
else if (_mapParams[mt.replace(/\|.*/, '')].getGeoJson) {
// 特殊区域
this._specialArea[mt] =
_mapParams[mt.replace(/\|.*/, '')].specialArea
|| this._specialArea[mt];
_mapParams[mt.replace(/\|.*/, '')].getGeoJson(
this._mapDataCallback(mt, valueData[mt], mapSeries[mt])
);
}
}
},
/**
* @param {string} mt mapType
* @parma {Object} vd valueData
* @param {Object} ms mapSeries
*/
_mapDataCallback : function (mt, vd, ms) {
var self = this;
return function (md) {
if (!self._isAlive || self._activeMapType[mt] == null) {
// 异步地图数据回调时有可能实例已经被释放
return;
}
// 缓存这份数据
if (mt.indexOf('|') != -1) {
// 子地图,加工一份新的mapData
md = self._getSubMapData(mt, md);
}
self._mapDataMap[mt].mapData = md;
if (md.firstChild) {
self._mapDataMap[mt].rate = 1;
self._mapDataMap[mt].projection = require('../util/projection/svg');
}
else {
self._mapDataMap[mt].rate = 0.75;
self._mapDataMap[mt].projection = require('../util/projection/normal');
}
self._buildMap(
mt, // 类型
self._getProjectionData(mt, md, ms), // 地图数据
vd, // 用户数据
ms // 系列
);
self._buildMark(mt, ms);
if (--self._mapDataRequireCounter <= 0) {
self.addShapeList();
self.zr.refreshNextFrame();
}
};
},
_clearSelected : function() {
for (var k in this._selected) {
if (!this._activeMapType[this._mapTypeMap[k]]) {
delete this._selected[k];
delete this._mapTypeMap[k];
}
}
},
_getSubMapData : function (mapType, mapData) {
var subType = mapType.replace(/^.*\|/, '');
var features = mapData.features;
for (var i = 0, l = features.length; i < l; i++) {
if (features[i].properties
&& features[i].properties.name == subType
) {
features = features[i];
if (subType == 'United States of America'
&& features.geometry.coordinates.length > 1 // 未被简化
) {
features = {
geometry: {
coordinates: features.geometry
.coordinates.slice(5,6),
type: features.geometry.type
},
id: features.id,
properties: features.properties,
type: features.type
};
}
break;
}
}
return {
'type' : 'FeatureCollection',
'features':[
features
]
};
},
/**
* 按需加载相关地图
*/
_getProjectionData : function (mapType, mapData, mapSeries) {
var normalProjection = this._mapDataMap[mapType].projection;
var province = [];
// bbox永远不变
var bbox = this._mapDataMap[mapType].bbox
|| normalProjection.getBbox(
mapData, this._specialArea[mapType]
);
//console.log(bbox)
var transform;
//console.log(1111,transform)
if (!this._mapDataMap[mapType].hasRoam) {
// 第一次或者发生了resize,需要判断
transform = this._getTransform(
bbox,
mapSeries,
this._mapDataMap[mapType].rate
);
}
else {
//经过用户漫游不再响应resize
transform = this._mapDataMap[mapType].transform;
}
//console.log(bbox,transform)
var lastTransform = this._mapDataMap[mapType].lastTransform
|| {scale:{}};
var pathArray;
if (transform.left != lastTransform.left
|| transform.top != lastTransform.top
|| transform.scale.x != lastTransform.scale.x
|| transform.scale.y != lastTransform.scale.y
) {
// 发生过变化,需要重新生成pathArray
// 一般投射
//console.log(transform)
pathArray = normalProjection.geoJson2Path(
mapData, transform, this._specialArea[mapType]
);
lastTransform = zrUtil.clone(transform);
}
else {
transform = this._mapDataMap[mapType].transform;
pathArray = this._mapDataMap[mapType].pathArray;
}
this._mapDataMap[mapType].bbox = bbox;
this._mapDataMap[mapType].transform = transform;
this._mapDataMap[mapType].lastTransform = lastTransform;
this._mapDataMap[mapType].pathArray = pathArray;
//console.log(pathArray)
var position = [transform.left, transform.top];
for (var i = 0, l = pathArray.length; i < l; i++) {
/* for test
console.log(
mapData.features[i].properties.cp, // 经纬度度
pathArray[i].cp // 平面坐标
);
console.log(
this.pos2geo(mapType, pathArray[i].cp), // 平面坐标转经纬度
this.geo2pos(mapType, mapData.features[i].properties.cp)
)
*/
province.push(this._getSingleProvince(
mapType, pathArray[i], position
));
}
if (this._specialArea[mapType]) {
for (var area in this._specialArea[mapType]) {
province.push(this._getSpecialProjectionData(
mapType, mapData,
area, this._specialArea[mapType][area],
position
));
}
}
// 中国地图加入南海诸岛
if (mapType == 'china') {
var leftTop = this.geo2pos(
mapType,
_geoCoord['南海诸岛'] || _mapParams['南海诸岛'].textCoord
);
// scale.x : width = 10.51 : 64
var scale = transform.scale.x / 10.5;
var textPosition = [
32 * scale + leftTop[0],
83 * scale + leftTop[1]
];
if (_textFixed['南海诸岛']) {
textPosition[0] += _textFixed['南海诸岛'][0];
textPosition[1] += _textFixed['南海诸岛'][1];
}
province.push({
name : this._nameChange(mapType, '南海诸岛'),
path : _mapParams['南海诸岛'].getPath(leftTop, scale),
position : position,
textX : textPosition[0],
textY : textPosition[1]
});
}
//console.log(JSON.stringify(province));
//console.log(JSON.stringify(this._mapDataMap[mapType].transform));
return province;
},
/**
* 特殊地区投射数据
*/
_getSpecialProjectionData : function (mapType, mapData, areaName, mapSize, position) {
//console.log('_getSpecialProjectionData--------------')
// 构造单独的geoJson地图数据
mapData = this._getSubMapData('x|' + areaName, mapData);
// bbox
var normalProjection = require('../util/projection/normal');
var bbox = normalProjection.getBbox(mapData);
//console.log('bbox', bbox)
// transform
var leftTop = this.geo2pos(
mapType,
[mapSize.left, mapSize.top]
);
var rightBottom = this.geo2pos(
mapType,
[mapSize.left + mapSize.width, mapSize.top + mapSize.height]
);
//console.log('leftright' , leftTop, rightBottom);
var width = Math.abs(rightBottom[0] - leftTop[0]);
var height = Math.abs(rightBottom[1] - leftTop[1]);
var mapWidth = bbox.width;
var mapHeight = bbox.height;
//var minScale;
var xScale = (width / 0.75) / mapWidth;
var yScale = height / mapHeight;
if (xScale > yScale) {
xScale = yScale * 0.75;
width = mapWidth * xScale;
}
else {
yScale = xScale;
xScale = yScale * 0.75;
height = mapHeight * yScale;
}
var transform = {
OffsetLeft : leftTop[0],
OffsetTop : leftTop[1],
//width: width,
//height: height,
scale : {
x : xScale,
y : yScale
}
};
//console.log('**',areaName, transform)
var pathArray = normalProjection.geoJson2Path(
mapData, transform
);
//console.log(pathArray)
return this._getSingleProvince(
mapType, pathArray[0], position
);
},
_getSingleProvince : function (mapType, path, position) {
var textPosition;
var name = path.properties.name;
var textFixed = _textFixed[name] || [0, 0];
if (_geoCoord[name]) {
// 经纬度直接定位不加textFixed
textPosition = this.geo2pos(
mapType,
_geoCoord[name]
);
}
else if (path.cp) {
textPosition = [
path.cp[0] + textFixed[0],
path.cp[1] + textFixed[1]
];
}
else {
var bbox = this._mapDataMap[mapType].bbox;
textPosition = this.geo2pos(
mapType,
[bbox.left + bbox.width / 2, bbox.top + bbox.height / 2]
);
textPosition[0] += textFixed[0];
textPosition[1] += textFixed[1];
}
//console.log(textPosition)
path.name = this._nameChange(mapType, name);
path.position = position;
path.textX = textPosition[0];
path.textY = textPosition[1];
return path;
},
/**
* 获取缩放
*/
_getTransform : function (bbox, mapSeries, rate) {
var series = this.series;
var mapLocation;
var x;
var cusX;
var y;
var cusY;
var width;
var height;
var zrWidth = this.zr.getWidth();
var zrHeight = this.zr.getHeight();
//上下左右留空
var padding = Math.round(Math.min(zrWidth, zrHeight) * 0.02);
for (var key in mapSeries) {
mapLocation = series[key].mapLocation || {};
cusX = mapLocation.x || cusX;
cusY = mapLocation.y || cusY;
width = mapLocation.width || width;
height = mapLocation.height || height;
}
//x = isNaN(cusX) ? padding : cusX;
x = this.parsePercent(cusX, zrWidth);
x = isNaN(x) ? padding : x;
//y = isNaN(cusY) ? padding : cusY;
y = this.parsePercent(cusY, zrHeight);
y = isNaN(y) ? padding : y;
width = width == null
? (zrWidth - x - 2 * padding)
: (this.parsePercent(width, zrWidth));
height = height == null
? (zrHeight - y - 2 * padding)
: (this.parsePercent(height, zrHeight));
var mapWidth = bbox.width;
var mapHeight = bbox.height;
//var minScale;
var xScale = (width / rate) / mapWidth;
var yScale = height / mapHeight;
if (xScale > yScale) {
//minScale = yScale;
xScale = yScale * rate;
width = mapWidth * xScale;
}
else {
//minScale = xScale;
yScale = xScale;
xScale = yScale * rate;
height = mapHeight * yScale;
}
//console.log(minScale)
//width = mapWidth * minScale;
//height = mapHeight * minScale;
if (isNaN(cusX)) {
cusX = cusX || 'center';
switch (cusX + '') {
case 'center' :
x = Math.floor((zrWidth - width) / 2);
break;
case 'right' :
x = zrWidth - width;
break;
//case 'left' :
//x = padding;
}
}
//console.log(cusX,x,zrWidth,width,'kener')
if (isNaN(cusY)) {
cusY = cusY || 'center';
switch (cusY + '') {
case 'center' :
y = Math.floor((zrHeight - height) / 2);
break;
case 'bottom' :
y = zrHeight - height;
break;
//case 'top' :
//y = padding;
}
}
//console.log(x,y,width,height)
return {
left : x,
top : y,
width: width,
height: height,
//scale : minScale * 50, // wtf 50
baseScale : 1,
scale : {
x : xScale,
y : yScale
}
//translate : [x + width / 2, y + height / 2]
};
},
/**
* 构建地图
* @param {Object} mapData 图形数据
* @param {Object} valueData 用户数据
*/
_buildMap : function (mapType, mapData, valueData, mapSeries) {
var series = this.series;
var legend = this.component.legend;
var dataRange = this.component.dataRange;
var seriesName;
var name;
var data;
var value;
var queryTarget;
var color;
var font;
var style;
var highlightStyle;
var shape;
var textShape;
for (var i = 0, l = mapData.length; i < l; i++) {
style = zrUtil.clone(mapData[i]);
highlightStyle = {
name : style.name,
path : style.path,
position : zrUtil.clone(style.position)
};
name = style.name;
data = valueData[name]; // 多系列合并后的数据
if (data) {
queryTarget = [data]; // level 3
seriesName = '';
for (var j = 0, k = data.seriesIndex.length; j < k; j++) {
// level 2
queryTarget.push(series[data.seriesIndex[j]]);
seriesName += series[data.seriesIndex[j]].name + ' ';
if (legend
&& this._showLegendSymbol[mapType]
&& legend.hasColor(series[data.seriesIndex[j]].name)
) {
this.shapeList.push(new CircleShape({
zlevel : this.getZlevelBase(),
z : this.getZBase() + 1,
position : zrUtil.clone(style.position),
_mapType : mapType,
/*
_geo : this.pos2geo(
mapType, [style.textX + 3 + j * 7, style.textY - 10]
),
*/
style : {
x : style.textX + 3 + j * 7,
y : style.textY - 10,
r : 3,
color : legend.getColor(
series[data.seriesIndex[j]].name
)
},
hoverable : false
}));
}
}
value = data.value;
}
else {
data = {
name: name,
value: '-'
};
seriesName = '';
queryTarget = [];
for (var key in mapSeries) {
queryTarget.push(series[key]);
}
value = '-';
}
this.ecTheme.map && queryTarget.push(this.ecTheme.map); // level 1
queryTarget.push(ecConfig.map); // level 1
// 值域控件控制
color = (dataRange && !isNaN(value))
? dataRange.getColor(value)
: null;
// 常规设置
style.color = style.color
|| color
|| this.getItemStyleColor(
this.deepQuery(queryTarget, 'itemStyle.normal.color'),
data.seriesIndex, -1, data
)
|| this.deepQuery(
queryTarget, 'itemStyle.normal.areaStyle.color'
);
style.strokeColor = style.strokeColor
|| this.deepQuery(queryTarget, 'itemStyle.normal.borderColor');
style.lineWidth = style.lineWidth
|| this.deepQuery(queryTarget, 'itemStyle.normal.borderWidth');
// 高亮
highlightStyle.color = this.getItemStyleColor(
this.deepQuery(queryTarget, 'itemStyle.emphasis.color'),
data.seriesIndex, -1, data
)
|| this.deepQuery(
queryTarget, 'itemStyle.emphasis.areaStyle.color'
)
|| style.color;
highlightStyle.strokeColor = this.deepQuery(
queryTarget, 'itemStyle.emphasis.borderColor'
)
|| style.strokeColor;
highlightStyle.lineWidth = this.deepQuery(
queryTarget, 'itemStyle.emphasis.borderWidth'
)
|| style.lineWidth;
style.brushType = highlightStyle.brushType = style.brushType || 'both';
style.lineJoin = highlightStyle.lineJoin = 'round';
style._name = highlightStyle._name = name;
font = this.deepQuery(queryTarget, 'itemStyle.normal.label.textStyle');
// 文字标签避免覆盖单独一个shape
textShape = {
zlevel : this.getZlevelBase(),
z : this.getZBase() + 1,
//hoverable: this._hoverable[mapType],
//clickable: this._clickable[mapType],
position : zrUtil.clone(style.position),
_mapType : mapType,
_geo : this.pos2geo(
mapType, [style.textX, style.textY]
),
style : {
brushType : 'fill',
x : style.textX,
y : style.textY,
text : this.getLabelText(name, value, queryTarget, 'normal'),
_name : name,
textAlign : 'center',
color : this.deepQuery(queryTarget, 'itemStyle.normal.label.show')
? this.deepQuery(
queryTarget,
'itemStyle.normal.label.textStyle.color'
)
: 'rgba(0,0,0,0)',
textFont : this.getFont(font)
}
}; | if (this.deepQuery(queryTarget, 'itemStyle.emphasis.label.show')) {
textShape.highlightStyle.text = this.getLabelText(
name, value, queryTarget, 'emphasis'
);
textShape.highlightStyle.color = this.deepQuery(
queryTarget,
'itemStyle.emphasis.label.textStyle.color'
) || textShape.style.color;
font = this.deepQuery(
queryTarget,
'itemStyle.emphasis.label.textStyle'
) || font;
textShape.highlightStyle.textFont = this.getFont(font);
}
else {
textShape.highlightStyle.color = 'rgba(0,0,0,0)';
}
shape = {
zlevel : this.getZlevelBase(),
z : this.getZBase(),
//hoverable: this._hoverable[mapType],
//clickable: this._clickable[mapType],
position : zrUtil.clone(style.position),
style : style,
highlightStyle : highlightStyle,
_style: zrUtil.clone(style),
_mapType: mapType
};
if (style.scale != null) {
shape.scale = zrUtil.clone(style.scale);
}
textShape = new TextShape(textShape);
switch (shape.style.shapeType) {
case 'rectangle' :
shape = new RectangleShape(shape);
break;
case 'line' :
shape = new LineShape(shape);
break;
case 'circle' :
shape = new CircleShape(shape);
break;
case 'polygon' :
shape = new PolygonShape(shape);
break;
case 'ellipse':
shape = new EllipseShape(shape);
break;
default :
shape = new PathShape(shape);
if (shape.buildPathArray) {
shape.style.pathArray = shape.buildPathArray(shape.style.path);
}
break;
}
if (this._selectedMode[mapType] &&
this._selected[name]
|| (data.selected && this._selected[name] !== false)
) {
textShape.style = textShape.highlightStyle;
shape.style = shape.highlightStyle;
}
textShape.clickable = shape.clickable =
this._clickable[mapType]
&& (data.clickable == null || data.clickable);
if (this._selectedMode[mapType]) {
this._selected[name] = this._selected[name] != null
? this._selected[name]
: data.selected;
this._mapTypeMap[name] = mapType;
if (data.selectable == null || data.selectable) {
shape.clickable = textShape.clickable = true;
shape.onclick = textShape.onclick = this.shapeHandler.onclick;
}
}
if (this._hoverable[mapType]
&& (data.hoverable == null || data.hoverable)
) {
textShape.hoverable = shape.hoverable = true;
shape.hoverConnect = textShape.id;
textShape.hoverConnect = shape.id;
}
else {
textShape.hoverable = shape.hoverable = false;
}
// console.log(name,shape);
ecData.pack(
textShape,
{
name: seriesName,
tooltip: this.deepQuery(queryTarget, 'tooltip')
},
0,
data, 0,
name
);
this.shapeList.push(textShape);
ecData.pack(
shape,
{
name: seriesName,
tooltip: this.deepQuery(queryTarget, 'tooltip')
},
0,
data, 0,
name
);
this.shapeList.push(shape);
}
//console.log(this._selected);
},
// 添加标注
_buildMark : function (mapType, mapSeries) {
this._seriesIndexToMapType = this._seriesIndexToMapType || {};
this.markAttachStyle = this.markAttachStyle || {};
var position = [
this._mapDataMap[mapType].transform.left,
this._mapDataMap[mapType].transform.top
];
if (mapType == 'none') {
position = [0, 0];
}
for (var sIdx in mapSeries) {
this._seriesIndexToMapType[sIdx] = mapType;
this.markAttachStyle[sIdx] = {
position : position,
_mapType : mapType
};
this.buildMark(sIdx);
}
},
// 位置转换
getMarkCoord : function (seriesIndex, mpData) {
return (mpData.geoCoord || _geoCoord[mpData.name])
? this.geo2pos(
this._seriesIndexToMapType[seriesIndex],
mpData.geoCoord || _geoCoord[mpData.name]
)
: [0, 0];
},
getMarkGeo : function(mpData) {
return mpData.geoCoord || _geoCoord[mpData.name];
},
_nameChange : function (mapType, name) {
return this._nameMap[mapType][name] || name;
},
/**
* 根据lable.format计算label text
*/
getLabelText : function (name, value, queryTarget, status) {
var formatter = this.deepQuery(
queryTarget,
'itemStyle.' + status + '.label.formatter'
);
if (formatter) {
if (typeof formatter == 'function') {
return formatter.call(
this.myChart,
name,
value
);
}
else if (typeof formatter == 'string') {
formatter = formatter.replace('{a}','{a0}')
.replace('{b}','{b0}');
formatter = formatter.replace('{a0}', name)
.replace('{b0}', value);
return formatter;
}
}
else {
return name;
}
},
_findMapTypeByPos : function (mx, my) {
var transform;
var left;
var top;
var width;
var height;
for (var mapType in this._mapDataMap) {
transform = this._mapDataMap[mapType].transform;
if (!transform || !this._roamMap[mapType] || !this._activeMapType[mapType]) {
continue;
}
left = transform.left;
top = transform.top;
width = transform.width;
height = transform.height;
if (mx >= left
&& mx <= (left + width)
&& my >= top
&& my <= (top + height)
) {
return mapType;
}
}
return;
},
/**
* 滚轮缩放
*/
__onmousewheel : function (params) {
if (this.shapeList.length <= 0) {
return;
}
for (var i = 0, l = this.shapeList.length; i < l; i++) {
var shape = this.shapeList[i];
// If any shape is still animating
if (shape.__animating) {
return;
}
}
var event = params.event;
var mx = zrEvent.getX(event);
var my = zrEvent.getY(event);
var delta;
var eventDelta = zrEvent.getDelta(event);
//eventDelta = eventDelta > 0 ? (-1) : 1;
var mapType;
var mapTypeControl = params.mapTypeControl;
if (!mapTypeControl) {
mapTypeControl = {};
mapType = this._findMapTypeByPos(mx, my);
if (mapType && this._roamMap[mapType] && this._roamMap[mapType] != 'move') {
mapTypeControl[mapType] = true;
}
}
function scalePolyline(shapeStyle, delta) {
for (var i = 0; i < shapeStyle.pointList.length; i++) {
var point = shapeStyle.pointList[i];
point[0] *= delta;
point[1] *= delta;
}
//If smoothness > 0
var controlPointList = shapeStyle.controlPointList;
if (controlPointList) {
for (var i = 0; i < controlPointList.length; i++) {
var point = controlPointList[i];
point[0] *= delta;
point[1] *= delta;
}
}
}
function scaleMarkline(shapeStyle, delta) {
shapeStyle.xStart *= delta;
shapeStyle.yStart *= delta;
shapeStyle.xEnd *= delta;
shapeStyle.yEnd *= delta;
if (shapeStyle.cpX1 != null) {
shapeStyle.cpX1 *= delta;
shapeStyle.cpY1 *= delta;
}
}
var haveScale = false;
for (mapType in mapTypeControl) {
if (mapTypeControl[mapType]) {
haveScale = true;
var transform = this._mapDataMap[mapType].transform;
var left = transform.left;
var top = transform.top;
var width = transform.width;
var height = transform.height;
// 位置转经纬度
var geoAndPos = this.pos2geo(mapType, [mx - left, my - top]);
if (eventDelta > 0) {
delta = 1.2; // 放大
if (this._scaleLimitMap[mapType].max != null
&& transform.baseScale >= this._scaleLimitMap[mapType].max
) {
continue; // 缩放限制
}
}
else {
delta = 1 / 1.2; // 缩小
if (this._scaleLimitMap[mapType].min != null
&& transform.baseScale <= this._scaleLimitMap[mapType].min
) {
continue; // 缩放限制
}
}
transform.baseScale *= delta;
transform.scale.x *= delta;
transform.scale.y *= delta;
transform.width = width * delta;
transform.height = height * delta;
this._mapDataMap[mapType].hasRoam = true;
this._mapDataMap[mapType].transform = transform;
// 经纬度转位置
geoAndPos = this.geo2pos(mapType, geoAndPos);
// 保持视觉中心
transform.left -= geoAndPos[0] - (mx - left);
transform.top -= geoAndPos[1] - (my - top);
this._mapDataMap[mapType].transform = transform;
this.clearEffectShape(true);
for (var i = 0, l = this.shapeList.length; i < l; i++) {
var shape = this.shapeList[i];
if(shape._mapType == mapType) {
var shapeType = shape.type;
var shapeStyle = shape.style;
shape.position[0] = transform.left;
shape.position[1] = transform.top;
switch (shapeType) {
case 'path':
case 'symbol':
case 'circle':
case 'rectangle':
case 'polygon':
case 'line':
case 'ellipse':
shape.scale[0] *= delta;
shape.scale[1] *= delta;
break;
case 'mark-line':
scaleMarkline(shapeStyle, delta);
break;
case 'polyline':
scalePolyline(shapeStyle, delta);
break;
case 'shape-bundle':
for (var j = 0; j < shapeStyle.shapeList.length; j++) {
var subShape = shapeStyle.shapeList[j];
if (subShape.type == 'mark-line') {
scaleMarkline(subShape.style, delta);
}
else if (subShape.type == 'polyline') {
scalePolyline(subShape.style, delta);
}
}
break;
case 'icon':
case 'image':
geoAndPos = this.geo2pos(mapType, shape._geo);
shapeStyle.x = shapeStyle._x =
geoAndPos[0] - shapeStyle.width / 2;
shapeStyle.y = shapeStyle._y =
geoAndPos[1] - shapeStyle.height / 2;
break;
default:
geoAndPos = this.geo2pos(mapType, shape._geo);
shapeStyle.x = geoAndPos[0];
shapeStyle.y = geoAndPos[1];
if (shapeType == 'text') {
shape._style.x = shape.highlightStyle.x
= geoAndPos[0];
shape._style.y = shape.highlightStyle.y
= geoAndPos[1];
}
}
this.zr.modShape(shape.id);
}
}
}
}
if (haveScale) {
zrEvent.stop(event);
this.zr.refreshNextFrame();
var self = this;
clearTimeout(this._refreshDelayTicket);
this._refreshDelayTicket = setTimeout(
function(){
self && self.shapeList && self.animationEffect();
},
100
);
this.messageCenter.dispatch(
ecConfig.EVENT.MAP_ROAM,
params.event,
{type : 'scale'},
this.myChart
);
}
},
__onmousedown : function (params) {
if (this.shapeList.length <= 0) {
return;
}
var target = params.target;
if (target && target.draggable) {
return;
}
var event = params.event;
var mx = zrEvent.getX(event);
var my = zrEvent.getY(event);
var mapType = this._findMapTypeByPos(mx, my);
if (mapType && this._roamMap[mapType] && this._roamMap[mapType] != 'scale') {
this._mousedown = true;
this._mx = mx;
this._my = my;
this._curMapType = mapType;
this.zr.on(zrConfig.EVENT.MOUSEUP, this._onmouseup);
var self = this;
setTimeout(function (){
self.zr.on(zrConfig.EVENT.MOUSEMOVE, self._onmousemove);
},100);
}
},
__onmousemove : function (params) {
if (!this._mousedown || !this._isAlive) {
return;
}
var event = params.event;
var mx = zrEvent.getX(event);
var my = zrEvent.getY(event);
var transform = this._mapDataMap[this._curMapType].transform;
transform.hasRoam = true;
transform.left -= this._mx - mx;
transform.top -= this._my - my;
this._mx = mx;
this._my = my;
this._mapDataMap[this._curMapType].transform = transform;
for (var i = 0, l = this.shapeList.length; i < l; i++) {
if(this.shapeList[i]._mapType == this._curMapType) {
this.shapeList[i].position[0] = transform.left;
this.shapeList[i].position[1] = transform.top;
this.zr.modShape(this.shapeList[i].id);
}
}
this.messageCenter.dispatch(
ecConfig.EVENT.MAP_ROAM,
params.event,
{type : 'move'},
this.myChart
);
this.clearEffectShape(true);
this.zr.refreshNextFrame();
this._justMove = true;
zrEvent.stop(event);
},
__onmouseup : function (params) {
var event = params.event;
this._mx = zrEvent.getX(event);
this._my = zrEvent.getY(event);
this._mousedown = false;
var self = this;
setTimeout(function (){
self._justMove && self.animationEffect();
self._justMove = false;
self.zr.un(zrConfig.EVENT.MOUSEMOVE, self._onmousemove);
self.zr.un(zrConfig.EVENT.MOUSEUP, self._onmouseup);
},120);
},
/**
* 漫游组件事件响应
*/
__onroamcontroller: function(params) {
var event = params.event;
event.zrenderX = this.zr.getWidth() / 2;
event.zrenderY = this.zr.getHeight() / 2;
var mapTypeControl = params.mapTypeControl;
var top = 0;
var left = 0;
var step = params.step;
switch(params.roamType) {
case 'scaleUp':
event.zrenderDelta = 1;
this.__onmousewheel({
event: event,
mapTypeControl: mapTypeControl
});
return;
case 'scaleDown':
event.zrenderDelta = -1;
this.__onmousewheel({
event: event,
mapTypeControl: mapTypeControl
});
return;
case 'up':
top = -step;
break;
case 'down':
top = step;
break;
case 'left':
left = -step;
break;
case 'right':
left = step;
break;
}
var transform;
var curMapType;
for (curMapType in mapTypeControl) {
if (!this._mapDataMap[curMapType] || !this._activeMapType[curMapType]) {
continue;
}
transform = this._mapDataMap[curMapType].transform;
transform.hasRoam = true;
transform.left -= left;
transform.top -= top;
this._mapDataMap[curMapType].transform = transform;
}
for (var i = 0, l = this.shapeList.length; i < l; i++) {
curMapType = this.shapeList[i]._mapType;
if (!mapTypeControl[curMapType] || !this._activeMapType[curMapType]) {
continue;
}
transform = this._mapDataMap[curMapType].transform;
this.shapeList[i].position[0] = transform.left;
this.shapeList[i].position[1] = transform.top;
this.zr.modShape(this.shapeList[i].id);
}
this.messageCenter.dispatch(
ecConfig.EVENT.MAP_ROAM,
params.event,
{type : 'move'},
this.myChart
);
this.clearEffectShape(true);
this.zr.refreshNextFrame();
clearTimeout(this.dircetionTimer);
var self = this;
this.dircetionTimer = setTimeout(function() {
self.animationEffect();
}, 150);
},
/**
* dataRange hoverlink 事件响应
*/
__ondrhoverlink : function(param) {
var curMapType;
var value;
for (var i = 0, l = this.shapeList.length; i < l; i++) {
curMapType = this.shapeList[i]._mapType;
if (!this._hoverLinkMap[curMapType] || !this._activeMapType[curMapType]) {
continue;
}
value = ecData.get(this.shapeList[i], 'value');
if (value != null && value >= param.valueMin && value <= param.valueMax) {
this.zr.addHoverShape(this.shapeList[i]);
}
}
},
/**
* 点击响应
*/
onclick : function (params) {
if (!this.isClick || !params.target || this._justMove || params.target.type == 'icon') {
// 没有在当前实例上发生点击直接返回
return;
}
this.isClick = false;
var target = params.target;
var name = target.style._name;
var len = this.shapeList.length;
var mapType = target._mapType || '';
if (this._selectedMode[mapType] == 'single') {
for (var p in this._selected) {
// 同一地图类型
if (this._selected[p] && this._mapTypeMap[p] == mapType) {
// 复位那些生效shape(包括文字)
for (var i = 0; i < len; i++) {
if (this.shapeList[i].style._name == p
&& this.shapeList[i]._mapType == mapType
) {
this.shapeList[i].style = this.shapeList[i]._style;
this.zr.modShape(this.shapeList[i].id);
}
}
p != name && (this._selected[p] = false);
}
}
}
this._selected[name] = !this._selected[name];
// 更新当前点击shape(包括文字)
for (var i = 0; i < len; i++) {
if (this.shapeList[i].style._name == name
&& this.shapeList[i]._mapType == mapType
) {
if (this._selected[name]) {
this.shapeList[i].style = this.shapeList[i].highlightStyle;
}
else {
this.shapeList[i].style = this.shapeList[i]._style;
}
this.zr.modShape(this.shapeList[i].id);
}
}
this.messageCenter.dispatch(
ecConfig.EVENT.MAP_SELECTED,
params.event,
{
selected : this._selected,
target : name
},
this.myChart
);
this.zr.refreshNextFrame();
var self = this;
setTimeout(function(){
self.zr.trigger(
zrConfig.EVENT.MOUSEMOVE,
params.event
);
},100);
},
/**
* 刷新
*/
refresh : function (newOption) {
if (newOption) {
this.option = newOption;
this.series = newOption.series;
}
if (this._mapDataRequireCounter > 0) {
this.clear();
}
else {
this.backupShapeList();
}
this._buildShape();
this.zr.refreshHover();
},
/**
* 值域响应
* @param {Object} param
* @param {Object} status
*/
ondataRange : function (param, status) {
if (this.component.dataRange) {
this.refresh();
status.needRefresh = true;
}
return;
},
/**
* 平面坐标转经纬度
*/
pos2geo : function (mapType, p) {
if (!this._mapDataMap[mapType].transform) {
return null;
}
return this._mapDataMap[mapType].projection.pos2geo(
this._mapDataMap[mapType].transform, p
);
},
/**
* 公开接口 : 平面坐标转经纬度
*/
getGeoByPos : function (mapType, p) {
if (!this._mapDataMap[mapType].transform) {
return null;
}
var position = [
this._mapDataMap[mapType].transform.left,
this._mapDataMap[mapType].transform.top
];
if (p instanceof Array) {
p[0] -= position[0];
p[1] -= position[1];
}
else {
p.x -= position[0];
p.y -= position[1];
}
return this.pos2geo(mapType, p);
},
/**
* 经纬度转平面坐标
* @param {Object} p
*/
geo2pos : function (mapType, p) {
if (!this._mapDataMap[mapType].transform) {
return null;
}
return this._mapDataMap[mapType].projection.geo2pos(
this._mapDataMap[mapType].transform, p
);
},
/**
* 公开接口 : 经纬度转平面坐标
*/
getPosByGeo : function (mapType, p) {
if (!this._mapDataMap[mapType].transform) {
return null;
}
var pos = this.geo2pos(mapType, p);
pos[0] += this._mapDataMap[mapType].transform.left;
pos[1] += this._mapDataMap[mapType].transform.top;
return pos;
},
/**
* 公开接口 : 地图参考坐标
*/
getMapPosition : function (mapType) {
if (!this._mapDataMap[mapType].transform) {
return null;
}
return [
this._mapDataMap[mapType].transform.left,
this._mapDataMap[mapType].transform.top
];
},
/*
appendShape : function (mapType, shapeList) {
shapeList = shapeList instanceof Array
? shapeList : [shapeList];
for (var i = 0, l = shapeList.length; i < l; i++) {
if (typeof shapeList[i].zlevel == 'undefined') {
shapeList[i].zlevel = this.getZlevelBase();
shapeList[i].z = this.getZBase() + 1;
}
shapeList[i]._mapType = mapType;
this.shapeList.push(shapeList[i]);
this.zr.addShape(shapeList[i]);
}
this.zr.refresh();
},
*/
/**
* 释放后实例不可用
*/
onbeforDispose : function () {
this._isAlive = false;
this.zr.un(zrConfig.EVENT.MOUSEWHEEL, this._onmousewheel);
this.zr.un(zrConfig.EVENT.MOUSEDOWN, this._onmousedown);
this.messageCenter.unbind(
ecConfig.EVENT.ROAMCONTROLLER, this._onroamcontroller
);
this.messageCenter.unbind(
ecConfig.EVENT.DATA_RANGE_HOVERLINK, this._ondrhoverlink
);
}
};
zrUtil.inherits(Map, ChartBase);
// 图表注册
require('../chart').define('map', Map);
return Map;
}); | textShape._style = zrUtil.clone(textShape.style);
textShape.highlightStyle = zrUtil.clone(textShape.style); |
_version.py | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "plipify/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
| Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None} | def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
|
db.rs | use crate::leveldb::LDBKey;
use crate::traits::KVStore;
use db_key::Key;
use log::trace;
use std::path::Path;
use leveldb::database::{
batch::{Batch, Writebatch},
kv::KV,
Database,
};
use leveldb::options::{Options, ReadOptions, WriteOptions};
/// An implementation of `KVStore` trait against `leveldb`.
pub struct LDBStore {
pub(crate) db: Database<LDBKey>,
}
impl LDBStore {
pub fn new(path: &Path) -> Self {
let mut opts = Options::new();
opts.create_if_missing = true;
let db = match Database::open(path, opts) {
Ok(db) => db,
Err(e) => panic!("failed to open database: {:?}", e),
};
Self { db }
}
}
impl KVStore for LDBStore {
fn | (&self, key: &[u8]) -> Option<Vec<u8>> {
let ldb_key = LDBKey(key.to_vec());
let read_opts = ReadOptions::new();
let res = self.db.get(read_opts, ldb_key);
match res {
Ok(data) => data,
Err(_) => panic!("failed reading data"),
}
}
fn store(&mut self, changes: &[(&[u8], &[u8])]) {
let mut batch = Writebatch::<LDBKey>::new();
for (k, v) in changes {
let k = LDBKey::from_u8(k);
batch.put(k, v);
}
let res = self.db.write(WriteOptions::new(), &batch);
if res.is_err() {
panic!("failed writing data");
}
}
fn close(&mut self) {
trace!("dropping `LDBStore`");
drop(self)
}
}
impl Drop for LDBStore {
fn drop(&mut self) {
self.close();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn leveldb_sanity() {
let mut db = LDBStore::new(Path::new("leveldb-tests"));
db.store(&[(&[10, 20, 30], &[40, 50, 60])]);
let v = db.get(&[10, 20, 30]).unwrap();
assert_eq!(vec![40, 50, 60], v);
db.close();
let mut db = LDBStore::new(Path::new("leveldb-tests"));
let v = db.get(&[10, 20, 30]).unwrap();
assert_eq!(vec![40, 50, 60], v);
}
}
| get |
ipv6.go | package column
import (
"net"
"github.com/loloxiaoz/clickhouse-driver/lib/binary"
)
type IPv6 struct {
base
}
func (*IPv6) Read(decoder *binary.Decoder) (interface{}, error) {
v, err := decoder.Fixed(16)
if err != nil {
return nil, err
}
return net.IP(v), nil
}
func (ip *IPv6) Write(encoder *binary.Encoder, v interface{}) error {
netIP, ok := v.(net.IP) | if !ok {
return &ErrUnexpectedType{
T: v,
Column: ip,
}
}
if _, err := encoder.Write([]byte(netIP.To16())); err != nil {
return err
}
return nil
} | |
variables_c.js | var searchData= | ['pi_702',['Pi',['../structmackey_1_1SmithNormalForm.html#acc749b38a76d7473e878a4d476d24714',1,'mackey::SmithNormalForm']]],
['power_703',['power',['../structmackey_1_1C2Power_3_01N_00_01__rank_00_01__diff_01_4.html#a78af5ea16108df3eb587678f02444029',1,'mackey::C2Power< N, _rank, _diff >::power()'],['../structmackey_1_1C__4.html#a68a283d62407da5b56b99c1d00b8639b',1,'mackey::C_4::power()']]],
['previous_704',['previous',['../classmackey_1_1ShortestPaths.html#a185f0cc2c5e3e954d48a113ba30721ec',1,'mackey::ShortestPaths']]],
['prime_705',['prime',['../structmackey_1_1C2Power_3_01N_00_01__rank_00_01__diff_01_4.html#a75c204ff3ad0380f817cdae8fd5085a0',1,'mackey::C2Power< N, _rank, _diff >::prime()'],['../structmackey_1_1C__4.html#a494fd70fe3ba8ac3ed4946c39de3bbe8',1,'mackey::C_4::prime()']]]
]; | [
['p_700',['P',['../structmackey_1_1SmithNormalForm.html#ac2b49916aa07d1560c360da334522dca',1,'mackey::SmithNormalForm']]],
['paths_701',['paths',['../classmackey_1_1ShortestPaths.html#a70466929b4e446bd1fd0d04deadd7970',1,'mackey::ShortestPaths']]], |
juego.component.ts | import { Component, OnInit, ViewChild } from '@angular/core';
import { ThemePalette } from '@angular/material/core';
import { SelectionModel } from '@angular/cdk/collections';
import { MatDialog, MatTabGroup } from '@angular/material';
import { Location } from '@angular/common';
import { MatTableDataSource } from '@angular/material/table';
import { Form, FormBuilder, FormGroup, Validators } from '@angular/forms';
// tslint:disable-next-line:max-line-length
import {
Nivel, Alumno, Equipo, Juego, JuegoDeCompeticion, Punto, TablaPuntosFormulaUno,
AlumnoJuegoDePuntos, EquipoJuegoDePuntos, Grupo, AlumnoJuegoDeCompeticionLiga,
EquipoJuegoDeCompeticionLiga, Jornada, AlumnoJuegoDeCompeticionFormulaUno,
EquipoJuegoDeCompeticionFormulaUno, Cuestionario, JuegoDeAvatar, FamiliaAvatares,
AlumnoJuegoDeAvatar, AsignacionPuntosJuego, Coleccion, AlumnoJuegoDeColeccion,
EquipoJuegoDeColeccion, Escenario, JuegoDeGeocaching, AlumnoJuegoDeGeocaching, PuntoGeolocalizable,
JuegoDeVotacionUnoATodos, AlumnoJuegoDeVotacionUnoATodos, Profesor,
JuegoDeVotacionTodosAUno, AlumnoJuegoDeVotacionTodosAUno, CuestionarioSatisfaccion,
JuegoDeCuestionarioSatisfaccion, AlumnoJuegoDeCuestionarioSatisfaccion, Rubrica, FamiliaDeImagenesDePerfil
} from '../../clases/index';
// Services
import { SesionService, CalculosService, PeticionesAPIService, ComServerService } from '../../servicios/index';
import { Observable, of } from 'rxjs';
import 'rxjs';
import { DialogoConfirmacionComponent } from '../COMPARTIDO/dialogo-confirmacion/dialogo-confirmacion.component';
import Swal from 'sweetalert2';
import * as URL from 'src/app/URLs/urls';
import { AsignaCuestionarioComponent } from './asigna-cuestionario/asigna-cuestionario.component';
import { JuegoDeCuestionario } from 'src/app/clases/JuegoDeCuestionario';
import { AlumnoJuegoDeCuestionario } from 'src/app/clases/AlumnoJuegoDeCuestionario';
import { Router } from '@angular/router';
import { AsignaEscenarioComponent } from './asigna-escenario/asigna-escenario.component';
import { AsignaPreguntasComponent } from './asigna-preguntas/asigna-preguntas.component';
import { JuegoDeEvaluacion } from '../../clases/JuegoDeEvaluacion';
import { log } from 'util';
import { EquipoJuegoEvaluado } from '../../clases/EquipoJuegoEvaluado';
import { AlumnoJuegoEvaluado } from '../../clases/AlumnoJuegoEvaluado';
import { JuegoDeEscapeRoom } from 'src/app/clases/JuegoDeEscapeRoom';
import { AlumnoJuegoEscapeRoom } from 'src/app/clases/AlumnoJuegoEscapeRoom';
import { stringify } from '@angular/core/src/util';
import { ObjetoEscape } from 'src/app/clases/ObjetoEscape';
import { Mochila } from 'src/app/clases/Mochila';
import { ObjetoEnigma } from 'src/app/clases/ObjetoEnigma';
import { EscenarioEscapeRoom } from 'src/app/clases/EscenarioEscapeRoom';
import { NgbModal } from '@ng-bootstrap/ng-bootstrap';
import { ObjetoGlobalEscape } from 'src/app/clases/ObjetoGlobalEscape';
import { ObjetoJuego } from 'src/app/clases/ObjetoJuego';
import { PartidaEscape } from 'src/app/clases/PartidaEscape';
import { EscenaDeJuego } from 'src/app/clases/EscenaDeJuego';
import { partialRefresh } from '@syncfusion/ej2-grids';
import { THIS_EXPR } from '@angular/compiler/src/output/output_ast';
import { ImagenEscenario } from 'src/app/clases/ImagenEscenario';
export interface OpcionSeleccionada {
nombre: string;
id: string;
}
export interface ChipColor {
nombre: string;
color: ThemePalette;
}
@Component({
selector: 'app-juego',
templateUrl: './juego.component.html',
styleUrls: ['./juego.component.scss']
})
export class JuegoComponent implements OnInit {
///////////////////////////////////// VARIABLE GENERALES PARA EL COMPONENTE ///////////////////////////////////
profesorId: number;
profesor: Profesor;
grupo: Grupo;
alumnosGrupo: Alumno[];
equiposGrupo: Equipo[];
@ViewChild('stepper') stepper;
@ViewChild('tabs') tabGroup: MatTabGroup;
// tslint:disable-next-line:ban-types
juegoCreado: Boolean = false;
// Usaré esta variable para determinar si debo advertir al usuario de
// que está abandonando el proceso de creación del juego
creandoJuego = false;
private host = URL.host;
objetoEnigma: ObjetoEnigma;
juego: any;
juegoDeCuestionario: JuegoDeCuestionario;
juegoDeCompeticion: JuegoDeCompeticion;
juegoDeAvatar: JuegoDeAvatar;
juegoDeGeocaching: JuegoDeGeocaching;
escenariosSecundariosProfesor: boolean = false;
varHelper: string;
objeto1: ObjetoEscape;
objeto2: ObjetoEscape;
objetosProfesor: boolean = false;
objetoModificadoEnigma: ObjetoEnigma;
// Informacion para todos los juegos
myForm: FormGroup;
tipoDeEscenarioSeleccionado: string;
nombreDelJuego: string;
tipoDeJuegoSeleccionado: string;
modoDeJuegoSeleccionado: string;
tengoNombre = false;
tengoTipo = false;
tengoModo = false;
objetoPista: ObjetoEscape;
seleccionTipoJuego: ChipColor[] = [
{ nombre: 'Juego De Puntos', color: 'primary' },
{ nombre: 'Juego De Colección', color: 'accent' },
{ nombre: 'Juego De Competición', color: 'warn' },
{ nombre: 'Juego De Avatar', color: 'primary' },
{ nombre: 'Juego De Cuestionario', color: 'accent' },
{ nombre: 'Juego De Geocaching', color: 'warn' },
{ nombre: 'Juego De Votación', color: 'primary' },
{ nombre: 'Juego De Cuestionario de Satisfacción', color: 'accent' },
{ nombre: 'Juego De Evaluación', color: 'accent' },
{ nombre: 'Juego De Escape Room', color: 'warn' }
];
seleccionModoJuego: ChipColor[] = [
{ nombre: 'Individual', color: 'primary' },
{ nombre: 'Equipos', color: 'accent' }
];
seleccionEscenario: ChipColor[] = [
{ nombre: 'Habitación', color: 'primary' },
{ nombre: 'Cocina', color: 'accent' },
{ nombre: 'Baño', color: 'warn' },
];
// información para crear un juego de puntos
puntosDelJuego: Punto[] = [];
nivelesDelJuego: Nivel[] = [];
logosNiveles: FormData[] = [];
// información para crear un juego de colección
coleccionSeleccionada: Coleccion;
tengoColeccion = false;
modoAsignacion;
configuradoEscenarioPrincipal: boolean = false;
// información para crear un juego de cuestionario
cuestionario: Cuestionario;
tengoCuestionario = false;
puntuacionCorrecta: number;
puntuacionIncorrecta: number;
modoPresentacion: string;
tengoModoPresentacion = false;
seleccionModoPresentacion: string[] = ['Mismo orden para todos',
'Preguntas desordenadas',
'Preguntas y respuestas desordenadas'];
tiempoLimite: number;
tipoDeJuegoDeCuestionarioSeleccionado: string;
tengoTipoJuegoCuestionario = false;
seleccionTipoDeJuegoDeCuestionario: ChipColor[] = [
{ nombre: 'Test clásico', color: 'primary' },
{ nombre: 'Kahoot', color: 'accent' },
];
// información para crear juego de avatares
familiasElegidas: number[];
tengoFamilias = false;
// Información para crear juego de evaluación
seleccionTipoDeEvaluacion: ChipColor[] = [
{ nombre: '1 a N', color: 'primary' },
{ nombre: 'Todos con todos', color: 'warn' }
];
tipoDeEvaluacionSeleccionado: string;
tengoTipoDeEvaluacion = false;
//
seleccionRelacionesEvaluacion: ChipColor[] = [
{ nombre: 'A elegir', color: 'primary' },
{ nombre: 'Aleatorio', color: 'warn' }
];
relacionesEvaluacionSeleccionado: string;
tengoRelacionEvaluacion = false;
relacionesMap = new Map();
numeroDeMiembros = 1;
//
profesorEvalua = false;
profesorEvaluaModo = 'normal';
autoevaluacion = false;
//
tengoRubrica = false;
rubricaElegida: Rubrica;
rubricas: Rubrica[];
//
seleccionCriterioEvaluacion: ChipColor[] = [
{ nombre: 'Por pesos', color: 'primary' },
{ nombre: 'Por penalización', color: 'warn' }
];
criterioEvaluacionSeleccionado: string;
tengoCriterioEvaluacion = false;
//
pesosArray = [];
pesosSuman100 = true;
penalizacionArray = [];
//
seleccionEquiposEvaluacion: ChipColor[] = [
{ nombre: 'Individualmente', color: 'primary' },
{ nombre: 'Por Equipos', color: 'warn' }
];
equiposEvaluacionSeleccionado: string;
tengoEquipoEvaluacion = false;
//
relacionAlumnosEquipos = [];
comprobacionDeN = false;
todosTienenEvaluador = false;
// Información para crear juego de competicion
tipoDeCompeticionSeleccionado: string;
seleccionTipoDeCompeticion: ChipColor[] = [
{ nombre: 'Liga', color: 'primary' },
{ nombre: 'Fórmula Uno', color: 'warn' },
{ nombre: 'Torneo', color: 'accent' }
];
tengoTipoDeCompeticion = false;
numeroDeJornadas: number;
tengoNumeroDeJornadas = false;
jornadasLiga: Jornada[];
jornadasFormulaUno: Jornada[];
objetosEnigmaModal: boolean = false;
nuevaPuntuacion: number;
tengoNuevaPuntuacion = false;
Puntuacion: number[] = [];
selection = new SelectionModel<any>(true, []);
dataSource: any;
TablaPuntuacion: TablaPuntosFormulaUno[];
displayedColumnsTablaPuntuacion: string[] = ['select', 'Posicion', 'Puntos'];
displayedColumnsEscenarioSecundario: string[] = ['mapa', 'descripcion', 'añadir'];
displayedColumns: string[] = ['mapa', 'descripcion', 'verObjetos', 'añadir'];
displayedColumnsObjetos: string[] = ['nombre', 'tipoDeObjeto', 'añadir', 'ver'];
displayedColumnsObjetosEnigma: string[] = ['nombre', 'pregunta', 'respuesta', 'escoger'];
displayedColumnsObjetosEscapePeso: string[] = ['nombre', 'peso', 'añadir']
objetosProfesorModal: boolean = false;
mensaje: string = 'Estás seguro/a de que quieres eliminar el escenario llamado: ';
escenariosProfesor: EscenarioEscapeRoom[];
objetoEnigmaModificarGlobal: ObjetoGlobalEscape;
// Informacion para juego de geocatching
escenario: Escenario;
tengoEscenario = false;
puntosgeolocalizablesEscenario: PuntoGeolocalizable[];
numeroDePuntosGeolocalizables: number;
idescenario: number;
PreguntasBasicas: number[];
PreguntasBonus: number[];
tengoPreguntas = false;
puntuacionCorrectaGeo: number;
puntuacionIncorrectaGeo: number;
puntuacionCorrectaGeoBonus: number;
puntuacionIncorrectaGeoBonus: number;
objetosEnigma: ObjetoEnigma[] = [];
editarObjetoEnigmaVar: boolean = false;
// información para crear juego de votación
tipoDeVotacionSeleccionado: string;
seleccionTipoDeVotacion: ChipColor[] = [
{ nombre: 'Uno A Todos', color: 'primary' },
{ nombre: 'Todos A Uno', color: 'warn' }
];
modoDeRepartoSeleccionado: string;
seleccionModoReparto: ChipColor[] = [
{ nombre: 'Reparto fijo según posición', color: 'primary' },
{ nombre: 'Reparto libre', color: 'warn' }
];
tengoModoReparto = false;
puntosARepartir = 0;
tengoTipoDeVotacion = false;
conceptos: string[];
listaConceptos: any[] = [];
dataSourceConceptos;
nombreConcepto;
pesoConcepto;
pesos: number[];
totalPesos: number;
conceptosAsignados = false;
displayedColumnsConceptos: string[] = ['nombreConcepto', 'pesoConcepto', ' '];
displayedColumnsEscenarios: string[] = ['mapa', 'descripcion', 'imagenes', 'escoger'];
displayedColumnsPosiciones: string[] = ['nombre', 'tipoDeObjeto', 'escogerPosicion'];
displayedColumnsPos: string[] = ['posicion', 'escogerPosicion'];
objetosEscapePeso: ObjetoEscape[] = [];
// Información para el juego de cuestionario de satisfacción
cuestionarioSatisfaccion: CuestionarioSatisfaccion;
tengoCuestionarioSatisfaccion = false;
descripcionCuestionarioSatisfaccion: string;
tituloObjetoEnigma: string;
final = false;
// HACEMOS DOS LISTAS CON LOS JUEGOS ACTIVOS, INACTIVOS Y PREPARADOS
// Lo logico seria que fuesen listas de tipo Juego, pero meteremos objetos
// de varios tipos (por ejemplo, de tipo Juego y de tipo JuegoDeCuestionario)
juegosActivos: any[];
juegosInactivos: any[];
juegosPreparados: any[];
varTitulo: string;
varTituloColumnaTabla: string;
varLineaDivisoria: string;
// tslint:disable-next-line:no-inferrable-types
opcionSeleccionada: string = 'todosLosJuegos';
escenarioEscapeRoom: EscenarioEscapeRoom;
escenarioSecundarioEscapeRoom: EscenarioEscapeRoom;
escogidoEscenarioSecundario: boolean = false;
objetosEscapePrimerEscenario: ObjetoEscape[] = [];
objetosEscapePrimerEscenarioVar: boolean = false;
varBool: boolean = false;
// criterioComplemento1: string;
//////////////////////////////////// PARÁMETROS PARA PÁGINA DE CREAR JUEGO //////////////////////////////////////
editObject: FormGroup;
editPista: FormGroup;
escenaYaescogida: boolean = false;
juegoEscape: JuegoDeEscapeRoom;
numeroDeEscenasSeleccionadas: number;
objetosJuego: ObjetoJuego[] = [];
objetosGlobal: ObjetoGlobalEscape[] = [];
objetosEscogidos: ObjetoGlobalEscape[] = [];
objetosEscogidosModal: boolean = false;
objetosJuegoEscogidos: ObjetoJuego[] = [];
countEscape: number = 3;
countEnigma: number = 2;
objetoGlobalEscogido: ObjetoGlobalEscape;
posicionesEscape: number[] = [1, 2, 3];
posicionesEnigma: number[] = [4, 5];
dataSourcePosicion;
posicionesVar2: boolean = false;
numeroEscena: number = 1;
partidasEscape: PartidaEscape[] = [];
escenasEscape: Map<number, EscenaDeJuego> = new Map<number, EscenaDeJuego>();
listaEscenasEscape: EscenaDeJuego[] = [];
mapEscenaPorPosicion: Map<number, number> = new Map<number, number>();
objetosEnigmaEscogidos: ObjetoJuego[] = [];
objetosEnigmaEscogidosGlobal: ObjetoGlobalEscape[] = [];
objetosEnigmaEscogidosModal: boolean = false;
displayedColumnsEnigmaPrincipal: string[] = ['nombre', 'escogerPrincipal', 'modificarObjeto'];
objetosRequisitadosModal: boolean = false;
displayedColumnsRequisitos: string[] = ['nombre', 'escoger'];
objetosRequisitos: ObjetoGlobalEscape[] = [];
objetosSinRequisitos: ObjetoGlobalEscape[] = [];
mapRequisitosEscena: Map<number, ObjetoGlobalEscape> = new Map<number, ObjetoGlobalEscape>();
pista: ObjetoJuego;
imagenes: ImagenEscenario [] = [];
constructor(
public dialog: MatDialog,
private calculos: CalculosService,
private sesion: SesionService,
private comService: ComServerService,
private location: Location,
private peticionesAPI: PeticionesAPIService,
// tslint:disable-next-line:variable-name
private _formBuilder: FormBuilder,
private router: Router,
private modal: NgbModal
) { }
ngOnInit() {
this.grupo = this.sesion.DameGrupo();
console.log(' Grupo ' + this.grupo);
this.alumnosGrupo = this.sesion.DameAlumnosGrupo();
this.profesor = this.sesion.DameProfesor();
this.profesorId = this.sesion.DameProfesor().id;
this.varTitulo = 'titulo' + this.profesor.estacion;
this.varLineaDivisoria = 'lineaDivisoria' + this.profesor.estacion;
console.log("this.lineaDiv: ", this.varLineaDivisoria);
this.editPista = this._formBuilder.group({
texto: ['', Validators.required]
});
this.editObject = this._formBuilder.group({
pregunta: ['', Validators.required],
respuesta: ['', Validators.required],
imagen: ['', Validators.required]
});
// La lista de equipos del grupo no esta en el servicio sesión. Asi que hay que
// ir a buscarla
this.peticionesAPI.DameEquiposDelGrupo(this.grupo.id)
.subscribe(equipos => {
if (equipos[0] !== undefined) {
console.log('Hay equipos', equipos[0]);
this.equiposGrupo = equipos;
console.log(this.equiposGrupo);
for (const equipo of equipos) {
this.peticionesAPI.DameAlumnosEquipo(equipo.id).subscribe((alumnos: Alumno[]) => {
this.relacionAlumnosEquipos.push({ equipoId: equipo.id, alumnos });
console.log('relacion alumnos equipos', this.relacionAlumnosEquipos);
});
}
} else {
// mensaje al usuario
console.log('Este grupo aun no tiene equipos');
this.equiposGrupo = undefined;
}
});
// Ahora traemos la lista de juegos
// esta operacion es complicada. Por eso está en calculos
this.calculos.DameListaJuegos(this.grupo.id)
.subscribe(listas => {
console.log('He recibido los juegos');
console.log(listas);
this.juegosActivos = listas.activos;
// Si la lista aun esta vacia la dejo como indefinida para que me
// salga el mensaje de que aun no hay juegos
if (listas.activos[0] === undefined) {
this.juegosActivos = undefined;
console.log('No hay inactivos');
} else {
this.juegosActivos = listas.activos;
console.log('hay activos');
}
if (listas.inactivos[0] === undefined) {
this.juegosInactivos = undefined;
console.log('No hay inactivos');
} else {
this.juegosInactivos = listas.inactivos;
console.log('hay inactivos');
}
if (listas.preparados[0] === undefined) {
this.juegosPreparados = undefined;
} else {
this.juegosPreparados = listas.preparados;
}
});
// Peticion API Juego de Evaluacion
this.peticionesAPI.DameRubricasProfesor(this.profesorId).subscribe(rubricas => {
console.log('Tengo rubricas', rubricas);
this.rubricas = rubricas;
});
// Fin Peticion API Juego de Evaluacion
//
// Es este formulario recogeremos la información que vaya introduciendo
// el usuario segun el tipo de juego
this.myForm = this._formBuilder.group({
NombreDelJuego: ['', Validators.required],
PuntuacionCorrecta: ['', Validators.required],
PuntuacionIncorrecta: ['', Validators.required],
NumeroDeJornadas: ['', Validators.required],
criterioPrivilegioComplemento1: ['', Validators.required],
criterioPrivilegioComplemento2: ['', Validators.required],
criterioPrivilegioComplemento3: ['', Validators.required],
criterioPrivilegioComplemento4: ['', Validators.required],
criterioPrivilegioVoz: ['', Validators.required],
criterioPrivilegioVerTodos: ['', Validators.required],
NuevaPuntuacion: ['', Validators.required],
PuntuacionCorrectaGeo: ['', Validators.required],
PuntuacionIncorrectaGeo: ['', Validators.required],
PuntuacionCorrectaGeoBonus: ['', Validators.required],
PuntuacionIncorrectaGeoBonus: ['', Validators.required],
NombreDelConcepto: ['', Validators.required],
PesoDelConcepto: ['', Validators.required],
TiempoLimite: ['', Validators.required],
EscenasDelJuego: ['', Validators.required]
});
this.TablaPuntuacion = [];
this.TablaPuntuacion[0] = new TablaPuntosFormulaUno(1, 10);
this.dataSource = new MatTableDataSource(this.TablaPuntuacion);
this.Puntuacion[0] = 10;
this.listaConceptos = [];
this.totalPesos = 0;
console.log("variable TipoDeEscenarioSeleccionado: ", this.tipoDeEscenarioSeleccionado);
this.tipoDeEscenarioSeleccionado = null;
}
//// ESCAPE ROOM
openModal(contenido, number: number) {
if (number == 1) {
this.modal.open(contenido, { centered: true, size: "lg" });
this.TraeEscenariosDelProfesor();
} if (number == 2) {
this.verObjetos(contenido);
} if (number == 3) {
this.verObjetosEscogidos(contenido);
} if (number == 4) {
this.verObjetosEnigmaEscogidos(contenido);
} if (number == 5) {
this.verObjetosRequisito(contenido);
} if (number == 6) {
this.verPistaParaCrear(contenido);
}
}
escogerNumeroEscenas() {
console.clear();
let number: Number;
this.juegoEscape = new JuegoDeEscapeRoom(this.modoDeJuegoSeleccionado, this.grupo.id, this.myForm.value.NombreDelJuego, true, "Juego De Escape Room");
this.numeroDeEscenasSeleccionadas = this.myForm.value.EscenasDelJuego;
this.escenaYaescogida = true;
console.log("Número de escenas:", this.numeroDeEscenasSeleccionadas);
// number = Number(this.myForm.value.NombreDelJuego);
// //NO ENTIENDO POR QUE
// if (number == NaN) {
// Swal.fire("¡Tienes que escribir un número!", "", "error");
// } else {
// this.numeroDeEscenasSeleccionadas = this.myForm.value.EscenasDelJuego;
// this.escenaYaescogida = true;
// }
}
TraeEscenariosDelProfesor() {
this.peticionesAPI.DameEscenariosDelProfesorEscapeRoom(this.profesorId)
.subscribe(escenarios => {
if (escenarios[0] != undefined) {
console.log('Voy a dar la lista de escenarios');
this.escenariosProfesor = escenarios;
this.escenariosProfesor.forEach(esc => {
console.log("esc: ", esc);
this.peticionesAPI.DameImagenDelEscenario(this.profesorId, esc.imagenId).subscribe(imagen =>{
console.log("imagen", imagen);
this.imagenes.push(imagen)
});
})
this.dataSource = new MatTableDataSource(this.escenariosProfesor);
console.log(this.escenariosProfesor);
} else {
Swal.fire("No tienes escenarios!", "", "error");
this.escenariosProfesor = undefined;
}
});
}
escogerEscenarioEscape(escenario: EscenarioEscapeRoom) {
Swal.fire({
title: escenario.mapa,
text: '¿Estás seguro que quieres escoger este escenario?',
confirmButtonText: 'Sí',
showCancelButton: true,
cancelButtonText: 'No'
}).then((result) => {
if (result.value == true) {
this.escenarioEscapeRoom = escenario;
}
});
}
verEscenarioEscape(escenario: EscenarioEscapeRoom){
let imagenEsc: ImagenEscenario = new ImagenEscenario ();
this.imagenes.forEach(imagen => {
if(imagen.id == escenario.imagenId){
imagenEsc = imagen;
}
});
Swal.fire({
title: escenario.mapa,
text:'La imagen se llama: ' + imagenEsc.nombre,
imageUrl: this.host+':3000/api/Imagenes/ImagenesEscenarioEscape/download/'+imagenEsc.nombreDelFichero,
imageWidth: 400,
imageHeight: 200,
confirmButtonText: 'Volver',
}).then((result) => { });
}
verObjetos(objetos) {
this.objetosProfesorModal = true;
this.peticionesAPI.DameObjetosGlobalesDelProfesorEscapeRoom(this.profesor.id).subscribe(objetosGlobales => {
if (objetosGlobales != null && objetosGlobales != undefined) {
this.objetosGlobal = objetosGlobales;
this.dataSource = new MatTableDataSource(this.objetosGlobal);
} else {
Swal.fire("¡No tienes objetos guardados!", "", "error");
}
});
this.modal.open(objetos, { centered: true, size: "lg" });
}
anadirObjeto(objeto: ObjetoGlobalEscape) {
let bool: boolean = false;
this.objetosJuegoEscogidos.forEach(obj => {
if (obj.objetoId == objeto.id) {
Swal.fire("¡Ya has usado este objeto, no se puede repetir!", "", "error");
bool = true;
}
}); | text: '¿Estás seguro que quieres escoger este objeto?',
confirmButtonText: 'Sí',
showCancelButton: true,
cancelButtonText: 'No'
}).then((result) => {
if (result.value == true) {
this.objetosEscogidos.push(objeto);
this.objetosSinRequisitos.push(objeto);
this.countEscape = this.countEscape - 1;
}
});
} else {
if (objeto.tipo == "objetoEscape") {
Swal.fire("Ya has cogido 3 objetos escape!", "", "error");
} else {
if (this.countEnigma > 0 && objeto.tipo == "objetoEnigma") {
Swal.fire({
text: '¿Estás seguro que quieres escoger este objeto?',
confirmButtonText: 'Sí',
showCancelButton: true,
cancelButtonText: 'No'
}).then((result) => {
if (result.value == true) {
this.objetosEscogidos.push(objeto);
this.countEnigma = this.countEnigma - 1;
}
});
} else {
Swal.fire("Ya has cogido 2 objetos enigma!", "", "error");
}
}
}
}
}
verObj(objeto: ObjetoGlobalEscape) {
Swal.fire({
title: objeto.nombre,
imageUrl: this.host+':3000/api/Imagenes/ImagenesObjetoGlobalEscape/download/'+objeto.imagen,
imageWidth: 400,
imageHeight: 200,
confirmButtonText: 'Volver',
}).then((result) => { });
}
verObjetosEscogidos(objetos) {
this.objetosEscogidosModal = true;
this.dataSource = new MatTableDataSource(this.objetosEscogidos);
this.modal.open(objetos, { centered: true, size: "lg" });
}
verObjetosEnigmaEscogidos(objetos) {
this.objetosEnigmaEscogidosModal = true;
this.dataSource = new MatTableDataSource(this.objetosEnigmaEscogidosGlobal);
this.modal.open(objetos, { centered: true, size: "lg" });
}
verPosiciones(objeto: ObjetoGlobalEscape, contenido) {
this.objetoGlobalEscogido = objeto;
this.posicionesVar2 = true;
if (objeto.tipo == "objetoEscape") {
this.dataSourcePosicion = new MatTableDataSource(this.posicionesEscape);
}
if (objeto.tipo == "objetoEnigma") {
this.dataSourcePosicion = new MatTableDataSource(this.posicionesEnigma);
}
this.modal.open(contenido, { centered: true, size: "lg" });
}
escogerPosicion(posicion: number) {
let objetoJuego;
if (this.objetoGlobalEscogido.tipo == "objetoEscape") {
objetoJuego = new ObjetoJuego(this.objetoGlobalEscogido.id, this.escenarioEscapeRoom.id, "string", "string", false, false, false, posicion, this.juegoEscape.id, this.numeroEscena, false, false, 1);
this.objetosJuegoEscogidos.push(objetoJuego);
this.posicionesEscape.forEach((value, index) => {
if (value == posicion) {
this.posicionesEscape.splice(index, 1);
}
});
} else {
this.objetosEnigmaEscogidosGlobal.push(this.objetoGlobalEscogido);
objetoJuego = new ObjetoJuego(this.objetoGlobalEscogido.id, this.escenarioEscapeRoom.id, "string", "string", false, false, false, posicion, this.juegoEscape.id, this.numeroEscena, false, false, 1);
this.objetosJuegoEscogidos.push(objetoJuego);
this.objetosEnigmaEscogidos.push(objetoJuego);
this.posicionesEnigma.forEach((value, index) => {
if (value == posicion) {
this.posicionesEnigma.splice(index, 1);
}
});
}
Swal.fire("¡Listo!", "", 'success');
}
escogerPrincipal(objeto: ObjetoGlobalEscape) {
let objetoJu: ObjetoJuego;
this.objetosEnigmaEscogidos.forEach(objetoJuego => {
if (objetoJuego.objetoId == objeto.id) {
objetoJuego.principal = true;
objetoJu = objetoJuego;
}
});
this.objetosJuegoEscogidos.forEach(objetoJuegoEscogido => {
if (objetoJuegoEscogido.objetoId == objetoJu.objetoId && objetoJuegoEscogido.escenaId == objetoJu.escenaId) {
objetoJuegoEscogido.principal = true;
}
});
Swal.fire("Has escogido el objeto " + objeto.nombre + " como principal.", "", "success");
this.objetosEnigmaEscogidos = [];
}
editarEnigma(objeto: ObjetoGlobalEscape, objetoModal) {
this.tituloObjetoEnigma = objeto.nombre;
this.objetoEnigmaModificarGlobal = objeto;
if (objeto.tipo == "objetoEscape") {
Swal.fire("No se puede editar un objeto escape", "", "info");
} else {
this.editarObjetoEnigmaVar = true;
this.modal.open(objetoModal, { centered: true, size: "lg" });
}
}
modificarObjeto() {
Swal.fire({
title: this.objetoEnigmaModificarGlobal.nombre,
text: '¿Estás seguro de la configuración del objeto?',
confirmButtonText: 'Sí',
showCancelButton: true,
cancelButtonText: 'No'
}).then((result) => {
if (result.value == true) {
this.objetosJuegoEscogidos.forEach(objeto => {
if (objeto.objetoId == this.objetoEnigmaModificarGlobal.id) {
objeto.pregunta = this.editObject.value.pregunta;
objeto.respuesta = this.editObject.value.respuesta;
}
});
}
});
}
verObjetosRequisito(objetos) {
this.objetosRequisitadosModal = true;
this.dataSource = new MatTableDataSource(this.objetosSinRequisitos);
this.modal.open(objetos, { centered: true, size: "lg" });
}
escogerObjetoRequisito(objeto: ObjetoGlobalEscape) {
let bool: boolean = false;
this.objetosSinRequisitos.forEach((value, index) => {
if (value == objeto) {
this.objetosSinRequisitos.splice(index, 1);
bool = true;
}
});
if (!bool) {
Swal.fire("Este objeto ya esta escogido.", "", "warning");
} else {
Swal.fire({
title: objeto.nombre,
text: '¿Estás seguro de la configuración del objeto?',
confirmButtonText: 'Sí',
showCancelButton: true,
cancelButtonText: 'No'
}).then((result) => {
if (result.value == true) {
this.objetosJuegoEscogidos.forEach(objetoJ => {
if (objetoJ.objetoId == objeto.id) {
objetoJ.requerido = true;
objetoJ.requeridoEscenaId = this.numeroEscena;
}
});
}
});
}
}
verPistaParaCrear(objetoModal) {
this.modal.open(objetoModal, { centered: true, size: "lg" });
}
crearPista() {
this.pista = new ObjetoJuego(1, 1, this.editPista.value.texto, "string", false, false, false, 1, 1, 1, false, true, 1);
Swal.fire("¡Creada!", "", "success");
}
crearJuegoDeEscapeRoom() {
Swal.fire({
title: "Escena " + this.numeroEscena,
text: '¿Estás seguro de la configuración de la escena?',
confirmButtonText: 'Sí',
showCancelButton: true,
cancelButtonText: 'No'
}).then((result) => {
if (result.value == true) {
let escenaEscape: EscenaDeJuego;
// console.clear();
escenaEscape = new EscenaDeJuego(this.escenarioEscapeRoom.id, this.numeroEscena);
console.log("Numero de escena: ", this.numeroEscena);
console.log("Escena: ", escenaEscape);
console.log("ObjetosJuego para esta escena: ", this.objetosJuegoEscogidos);
this.listaEscenasEscape.push(escenaEscape);
if (this.numeroEscena < this.numeroDeEscenasSeleccionadas) {
this.posicionesEscape = [1, 2, 3];
this.posicionesEnigma = [4, 5];
this.objetosEscogidos = [];
this.objetosEnigmaEscogidosGlobal = [];
this.countEnigma = 2;
this.countEscape = 3;
this.numeroEscena = this.numeroEscena + 1;
Swal.fire("Escena configurada!", "", "success");
}
else {
//CREAR ESCAPE ROOM
console.log("This objetos juego escogidos: ", this.objetosJuegoEscogidos);
this.peticionesAPI.CreaJuegoDeEscapeRoom(this.juegoEscape, this.grupo.id).subscribe({
next: data => {
this.juegoEscape = data;
console.log("This.juego: ", this.juegoEscape);
// CREAR ALUMNO
this.alumnosGrupo.forEach(alumno => {
let alumnoEscape: AlumnoJuegoEscapeRoom = new AlumnoJuegoEscapeRoom(alumno.id, "Sin escoger", this.juegoEscape.id, 1);
this.peticionesAPI.InscribeAlumnoJuegoEscapeRoom(alumnoEscape).subscribe({
next: data => {
console.log("Bien!", data);
},
error: error => {
console.log("Error!", error);
}
});
});
let partidaEscape: PartidaEscape;
console.log("Lista escenas: ", this.listaEscenasEscape);
//CREAR ESCENAS
this.listaEscenasEscape.forEach(escena => {
console.log("Escena: ", escena);
//PEDIR SI YA HAY UNA ESCENA CON ESA POSICION Y ESE ESCENARIO
this.peticionesAPI.DameEscenasEscapeConEsteEscenario(escena.escenarioId, escena.posicion).subscribe({
next: data => {
let escenaEscapeDevuelto: EscenaDeJuego = new EscenaDeJuego();
escenaEscapeDevuelto = data;
console.log("Escena devuelta: ", escenaEscapeDevuelto);
//CASO AFIRMATIVO
if (escenaEscapeDevuelto[0] != undefined && escenaEscapeDevuelto[0] != null) {
//CREAR PARTIDA POR ESTA ESCENA
partidaEscape = new PartidaEscape(this.juegoEscape.id, escenaEscapeDevuelto[0].id, escenaEscapeDevuelto[0].posicion);
this.peticionesAPI.CrearPartidaEscape(partidaEscape).subscribe({
next: data => {
console.log("Partida creada correctamente (Partida): ", data);
},
error: error => {
console.log("Error al crear partida: ", error);
Swal.fire("¡Error al crear la partida!", "", "error");
}
});
//PEDIR OBEJETO GLOBAL LLAVE PARA PODER CREARLA
this.peticionesAPI.DameObjetoLlaveGlobal("Llave").subscribe({
next: data => {
console.log("Llave devuelta 1: ", data);
let llave: ObjetoJuego = new ObjetoJuego(data[0].id, escenaEscapeDevuelto[0].escenarioId, "string", "string", false, false, false, 1, this.juegoEscape.id, escenaEscapeDevuelto[0].id, false, true, escenaEscapeDevuelto[0].id);
//CREAR LLAVE
console.log("LLAVE: ", llave);
this.peticionesAPI.CrearObjetoJuego(llave).subscribe({
next: data => {
console.log("Llave creada: ", data);
//DAME PISTA GLOBAL PARA PODER CREARLA
this.peticionesAPI.DameObjetoPistaGlobal("Pista").subscribe({
next: data => {
let pistaDevueltaGlobal: ObjetoJuego;
pistaDevueltaGlobal = data[0];
console.log("Pista devuelta: ", pistaDevueltaGlobal);
let pista: ObjetoJuego = new ObjetoJuego(pistaDevueltaGlobal.id, escenaEscapeDevuelto[0].escenarioId, this.pista.pregunta, "string", false, false, false, 1, this.juegoEscape.id, escenaEscapeDevuelto[0].id, false, true, escenaEscapeDevuelto[0].id);
//CREAR PISTA
console.log("PISTA: ", pista);
this.peticionesAPI.CrearObjetoJuego(pista).subscribe({
next: data => {
console.log("Pista creada: ", data);
//UNA VEZ CREADA LA PISTA Y LA LLAVE, CREAMOS OBJETOS GLOBALES
this.objetosJuegoEscogidos.forEach(objeto => {
console.log("Objeto dentro de los objetos juego escogidos: ", objeto);
if (objeto.escenaId == escenaEscapeDevuelto[0].posicion) {
objeto.juegoDeEscapeRoomId = this.juegoEscape.id;
console.log("Objeto con el juego: ", objeto);
this.peticionesAPI.CrearObjetoJuego(objeto).subscribe({
next: data => {
console.log("Creado correctamente el objeto: ", data);
}, error: error => {
console.log("Error al crear objeto: ", error);
Swal.fire("Error al crear objeto.", "", "error");
}
});
}
});
},
error: error => {
console.log("Error al crear pista: ", error);
Swal.fire("Error al crear pista.", "", error);
}
});
},
error: error => {
console.log("Error al coger llave: ", error);
Swal.fire("Error al coger objeto pista.", "", "error");
}
});
},
error: error => {
console.log("Error al crear lleva: ", error);
Swal.fire("¡Error al crear objeto llave!", "", "error");
}
});
},
error: error => {
console.log("Error al coger la llave: ", error);
Swal.fire("Error al coger la llave.", "", "error");
}
});
}
else {
//CREAR ESCENA ESCAPE CUANDO NO HAY UNA IGUAL
this.peticionesAPI.CrearEscenaEscapeRoom(escena).subscribe({
next: data => {
let escenaDevuelta: EscenaDeJuego;
escenaDevuelta = data;
console.log("Escena devuelta cuando la creas: ", escenaDevuelta);
//CREAR PARTIDA
partidaEscape = new PartidaEscape(this.juegoEscape.id, escenaDevuelta.id, escenaDevuelta.posicion);
this.peticionesAPI.CrearPartidaEscape(partidaEscape).subscribe({
next: data => {
console.log("Data al crear partida: ", data);
},
error: error => {
console.log("Error al crear partida: ", error);
Swal.fire("Error al crear partida.", "", "error");
}
});
//PEDIR LLAVE
this.peticionesAPI.DameObjetoLlaveGlobal("Llave").subscribe({
next: data => {
console.log("Llave devuelta: ", data[0]);
let llaveDevueltaGlobal: ObjetoGlobalEscape;
llaveDevueltaGlobal = data[0];
//CREAR LLAVE
let llave: ObjetoJuego = new ObjetoJuego(llaveDevueltaGlobal.id, escenaDevuelta.escenarioId, "string", "string", false, false, false, 1, this.juegoEscape.id, escenaDevuelta.id, false, true, escenaDevuelta.id);
console.log("Llavee: ", llave);
this.peticionesAPI.CrearObjetoJuego(llave).subscribe({
next: data => {
console.log("Data: ", data);
//PEDIR PISTA
this.peticionesAPI.DameObjetoPistaGlobal("Pista").subscribe({
next: data => {
console.log("Data pista: ", data[0]);
let pistaDevueltaGlobal: ObjetoGlobalEscape;
pistaDevueltaGlobal = data[0];
let pista: ObjetoJuego = new ObjetoJuego(pistaDevueltaGlobal.id, escenaDevuelta.escenarioId, this.pista.pregunta, "string", false, false, false, 1, this.juegoEscape.id, escenaDevuelta.id, false, true, escenaDevuelta.id);
//CREAR PISTA
console.log("Pistaa: ", pista);
this.peticionesAPI.CrearObjetoJuego(pista).subscribe({
next: data => {
console.log("Data en crear pista: ", data);
//CREAR OBJETOS JUEGO
this.objetosJuegoEscogidos.forEach(objeto => {
console.log("Escena id: ", objeto.escenaId);
console.log("Escena id devuelta", escenaDevuelta.posicion);
if (objeto.escenaId == escenaDevuelta.posicion) {
objeto.juegoDeEscapeRoomId = this.juegoEscape.id;
this.peticionesAPI.CrearObjetoJuego(objeto).subscribe({
next: data => {
console.log("Creado objeto correctamente: ", data);
}, error: error => {
console.log("Error al crear objeto: ", error);
Swal.fire("Error al crear objeto.", "", "error");
}
});
}
});
}, error: error => {
console.log("Error crear pista: ", error),
Swal.fire("Error al crear pista,", "", "error");
}
});
}, error: error => {
console.log("Error al coger pista.", error);
Swal.fire("Error al coger pista!", "", "error");
}
});
},
error: error => {
console.log("Error al crear objeto llave.", error);
Swal.fire("Error al crear objeto llave.", "", "error");
}
});
}, error: error => {
console.log("Error al coger llave: ", error);
Swal.fire("Error al coger la llave: ", "", "error");
}
});
},
error: error => {
console.log("Error al crear escena: ", error);
Swal.fire("Error al crear escena.", "", "error");
}
});
Swal.fire("Juego creado correctamente!", "", "success");
}
},
error: error => {
console.log("Error escena: ", error);
Swal.fire("Error a la función DameEscena()", "", "error");
}
});
});
},
error: error => {
console.log("Error juego: ", error);
Swal.fire("Error al crear el juego.", "", "error");
}
});
}
}
});
}
verImagen(escenario: EscenarioEscapeRoom) {
//pedir imagen a la API
Swal.fire({
title: escenario.mapa,
text: 'La imagen se llama: ' + escenario.imagenId,
imageUrl: '../../../assets/' + escenario.imagenId,
imageWidth: 400,
imageHeight: 200,
confirmButtonText: 'Volver',
}).then((result) => { });
}
//
//
escogerObjetoPrincipalModal(objetosEnigma) {
this.objetosEnigmaModal = true;
console.log("this.objetosEnigma: ", this.objetosEnigma);
this.modal.open(objetosEnigma, { centered: true, size: "lg" });
this.dataSource = new MatTableDataSource(this.objetosEnigma);
}
escogerObjetoPrincipal(objetoEnigma: ObjetoEnigma) {
let objetosEnigmaVariable: ObjetoEnigma[] = [];
let objetoEnigmaVariable: ObjetoEnigma;
console.log("ObjetoEnigma: ", objetoEnigma);
this.objetosEnigma.forEach(elemento => {
if (elemento[0].id == objetoEnigma[0].id) {
elemento[0].principal = true;
objetoEnigmaVariable = new ObjetoEnigma(elemento[0].nombre, elemento[0].pregunta, elemento[0].respuesta, elemento[0].profesorId, elemento[0].principal, elemento[0].objetoId, "Principal");
objetoEnigmaVariable.id = elemento[0].id;
objetosEnigmaVariable.push(objetoEnigmaVariable);
console.log("objeto que envio: ", objetoEnigmaVariable);
this.peticionesAPI.EditaObjetoEnigma(objetoEnigmaVariable).subscribe(res => {
console.log("res: ", res);
Swal.fire("Perfect", "", "success");
});
}
else {
elemento[0].principal = false;
objetoEnigmaVariable = new ObjetoEnigma(elemento[0].nombre, elemento[0].pregunta, elemento[0].respuesta, elemento[0].profesorId, elemento[0].principal, elemento[0].objetoId, "Principal");
objetosEnigmaVariable.push(objetoEnigmaVariable);
objetoEnigmaVariable.id = elemento[0].id;
this.peticionesAPI.EditaObjetoEnigma(objetoEnigmaVariable).subscribe(res => {
console.log("res: ", res);
Swal.fire("Perfect", "", "success");
});
}
});
console.log("objetosEnigma: ", objetosEnigmaVariable);
this.configuradoEscenarioPrincipal = true;
this.escenariosSecundariosProfesor = true;
}
applyFilter(filterValue: string) {
this.dataSource.filter = filterValue.trim().toLowerCase();
}
verEscenario(imagen) {
console.log("imagen: ", imagen);
if (imagen == "Habitación") {
this.varHelper = "habitacion";
}
if (imagen == "Cocina") {
this.varHelper = "cocina";
}
if (imagen == "Baño") {
this.varHelper = "baño";
}
//test
this.varHelper = "imagenBase";
imagen = "imagenBase";
Swal.fire({
title: imagen,
imageUrl: '../../../assets/' + this.varHelper + '.jpg',
imageWidth: 400,
imageHeight: 200,
showCancelButton: true,
confirmButtonText: 'Asignar',
cancelButtonText: 'Volver'
}).then((result) => {
/* Read more about isConfirmed, isDenied below */
if (result.value == true) {
if (this.tipoDeEscenarioSeleccionado != null) {
if (this.tipoDeEscenarioSeleccionado == imagen) {
Swal.fire({
title: 'Ya tiene seleccionado este escenario',
confirmButtonText: 'Volver'
})
} else {
Swal.fire({
title: 'Ya hay un escenario seleccionado, ¿Desea cambiarlo?',
showCancelButton: true,
confirmButtonText: 'Sí',
cancelButtonText: 'Volver'
}).then((result) => {
if (result.value == true) {
this.TipoDeEscenarioSeleccionado2(imagen);
console.log("SELECCIONADO: ", this.tipoDeEscenarioSeleccionado);
Swal.fire('Guardado!', '', 'success');
} else if (result.value == undefined) {
Swal.fire('No se han guardado los cambios', '', 'info')
}
})
}
}
else if (this.tipoDeEscenarioSeleccionado == null) {
this.TipoDeEscenarioSeleccionado2(imagen);
console.log("SELECCIONADO: ", this.tipoDeEscenarioSeleccionado);
Swal.fire('Guardado!', '', 'success');
}
} else if (result.value == undefined) {
Swal.fire('No se han guardado los cambios', '', 'info')
}
});
}
TipoDeEscenarioSeleccionado2(tipo: string) {
this.tipoDeEscenarioSeleccionado = tipo;
}
TipoDeEscenarioSeleccionado(tipo: ChipColor) {
this.tipoDeEscenarioSeleccionado = tipo.nombre;
}
//////////////////////////////////////// FUNCIONES PARA LISTAR JUEGOS ///////////////////////////////////////////////
// Busca la lista de juego de puntos y la clasifica entre activo e inactivo, y activa la función ListaJuegosDeColeccion
// Función que usaremos para clicar en un juego y entrar en él,
// Enviamos juego a la sesión
JuegoSeleccionado(juego: Juego) {
console.log('**************guardo juego en la sesion');
console.log(juego);
this.sesion.TomaJuego(juego);
// if (juego.Tipo === 'Juego De Geocaching') {
// this.router.navigateByUrl ('juegoSeleccionadoPreparado');
// }
}
///////////////////////////////////////// FUNCIONES PARA CREAR JUEGO ///////////////////////////////////////////////
// RECUPERA LOS EQUIPOS DEL GRUPO
TraeEquiposDelGrupo() {
this.peticionesAPI.DameEquiposDelGrupo(this.grupo.id)
.subscribe(equipos => {
if (equipos[0] !== undefined) {
console.log('Hay equipos');
this.equiposGrupo = equipos;
console.log(this.equiposGrupo);
} else {
// mensaje al usuario
console.log('Este grupo aun no tiene equipos');
}
});
}
GuardaNombreDelJuego() {
this.nombreDelJuego = this.myForm.value.NombreDelJuego;
console.log('Entro en guardar nombre');
console.log(this.nombreDelJuego);
if (this.nombreDelJuego === undefined) {
this.tengoNombre = false;
} else {
this.tengoNombre = true;
this.creandoJuego = true; // empiezo el proceso de creacion del juego
console.log('tengo nombre ' + this.nombreDelJuego);
}
}
TipoDeJuegoSeleccionado(tipo: ChipColor) {
this.tipoDeJuegoSeleccionado = tipo.nombre;
console.log(' tengo tipo ' + this.tipoDeJuegoSeleccionado);
this.tengoTipo = true;
// if (this.tipoDeJuegoSeleccionado === 'Juego De Competición') {
// this.NumeroDeVueltas();
// }
}
// Recoge el modo de juego seleccionado y lo mete en la variable (modoDeJuegoSeleccionado), la cual se usará después
// para el POST del juego
ModoDeJuegoSeleccionado(modo: ChipColor) {
this.modoDeJuegoSeleccionado = modo.nombre;
console.log(' tengo modo ' + this.modoDeJuegoSeleccionado);
console.log(' tengo tipo ' + this.tipoDeJuegoSeleccionado);
if ((this.tipoDeJuegoSeleccionado === 'Juego De Cuestionario') && (this.modoDeJuegoSeleccionado === 'Equipos')) {
Swal.fire('Alerta', 'Aún no es posible el juego de cuestionario en equipo', 'warning');
} else if ((this.tipoDeJuegoSeleccionado === 'Juego De Avatar') && (this.modoDeJuegoSeleccionado === 'Equipos')) {
Swal.fire('Alerta', 'Aún no es posible el juego de avatares en equipo', 'warning');
} else if ((this.tipoDeJuegoSeleccionado === 'Juego De Geocaching') && (this.modoDeJuegoSeleccionado === 'Equipos')) {
Swal.fire('Alerta', 'Aún no es posible el juego de geocaching en equipo', 'warning');
} else if ((this.tipoDeJuegoSeleccionado === 'Juego De Votación') && (this.modoDeJuegoSeleccionado === 'Equipos')) {
Swal.fire('Alerta', 'Aún no es posible el juego de votación en equipo', 'warning');
} else if ((this.tipoDeJuegoSeleccionado === 'Juego De Cuestionario de Satisfacción') && (this.modoDeJuegoSeleccionado === 'Equipos')) {
Swal.fire('Alerta', 'No existe el juego de cuestionario de satisfacción en equipo', 'warning');
} else {
if (this.modoDeJuegoSeleccionado === 'Individual') {
if (this.alumnosGrupo === undefined) {
Swal.fire('Alerta', 'No hay ningún alumno en este grupo', 'warning');
console.log('No Hay alumnos, no puedo crear el juego');
} else {
console.log('Hay alumnos, puedo crear');
this.tengoModo = true;
}
} else {
if (this.equiposGrupo === undefined) {
Swal.fire('Alerta', 'No hay ningún equipo en este grupo', 'warning');
console.log('No se puede crear juego pq no hay equipos');
} else {
this.tengoModo = true;
console.log('Hay equipos, puedo crear');
}
}
}
}
// FUNCIONES PARA LA CREACION DE JUEGO DE EVALUACION
TipoDeEvaluacionSeleccionado(tipoEvaluacion: ChipColor) {
this.tipoDeEvaluacionSeleccionado = tipoEvaluacion.nombre;
if (this.tipoDeEvaluacionSeleccionado === 'Todos con todos') {
this.numeroDeMiembros = this.DameMaxSlider();
this.HacerRelaciones(true);
}
this.tengoTipoDeEvaluacion = true;
}
RelacionDeEvaluacionSeleccionado(relacionEvaluacion: ChipColor) {
this.relacionesEvaluacionSeleccionado = relacionEvaluacion.nombre;
if (relacionEvaluacion.nombre === 'Aleatorio') {
this.HacerRelaciones(true);
} else {
this.HacerRelaciones(false);
}
this.tengoRelacionEvaluacion = true;
}
Shuffle(a) {
let j, x, i;
for (i = a.length - 1; i > 0; i--) {
j = Math.floor(Math.random() * (i + 1));
x = a[i];
a[i] = a[j];
a[j] = x;
}
return a;
}
HacerRelaciones(fill: boolean) {
const evaluados = this.DameEvaluados().map(item => item.id);
this.relacionesMap = new Map();
do {
for (let i = 0; i < evaluados.length; i++) {
if (!fill) {
this.relacionesMap.set(evaluados[i], []);
} else {
let evaluadores = [];
if (this.modoDeJuegoSeleccionado === 'Equipos' && this.equiposEvaluacionSeleccionado === 'Individualmente') {
for (const equipo of this.relacionAlumnosEquipos) {
if (equipo.equipoId === evaluados[i]) {
evaluadores = this.DameEvaluadores()
.filter(({ id: id1 }) => !equipo.alumnos.some(({ id: id2 }) => id1 === id2))
.map(item => item.id);
}
}
} else {
evaluadores = this.DameEvaluadores().filter(item => item.id !== evaluados[i]).map(item => item.id);
}
evaluadores = this.Shuffle(evaluadores);
if (this.modoDeJuegoSeleccionado === 'Equipos'
&& this.equiposEvaluacionSeleccionado === 'Individualmente'
&& this.tipoDeEvaluacionSeleccionado === 'Todos con todos') {
evaluadores.length = this.alumnosGrupo.length;
} else {
evaluadores.length = this.numeroDeMiembros;
}
this.relacionesMap.set(evaluados[i], evaluadores.filter(item => !isNaN(item)));
}
}
} while (this.ComprobarSiTodosTienenEvaluadores() === false && fill === true);
console.log('Relaciones object', this.relacionesMap);
console.log('Todos tienen evaluadores', this.todosTienenEvaluador);
}
RelacionChanged(id: number, value: string[]) {
console.log('Relaciones changed', id, value);
this.relacionesMap.set(id, value);
console.log('Relaciones object', this.relacionesMap);
this.ComprobarSiTodosTienenEvaluadores();
}
ComprobarSiTodosTienenEvaluadores() {
let encontrado1 = false;
let encontrado2 = false;
if (this.modoDeJuegoSeleccionado === 'Equipos' && this.equiposEvaluacionSeleccionado === 'Individualmente') {
this.relacionesMap.forEach((value, key) => {
if (value.length < this.numeroDeMiembros) {
this.comprobacionDeN = false;
encontrado2 = true;
}
value.forEach(item => {
if (this.ContarEvaluadores(item) === 0) {
this.todosTienenEvaluador = false;
encontrado1 = true;
}
});
});
} else {
this.relacionesMap.forEach((value, key) => {
if (this.ContarEvaluadores(key) === 0) {
this.todosTienenEvaluador = false;
encontrado1 = true;
}
if (value.length < this.numeroDeMiembros) {
this.comprobacionDeN = false;
encontrado2 = true;
}
});
}
if (!encontrado1) {
this.todosTienenEvaluador = true;
}
if (!encontrado2) {
this.comprobacionDeN = true;
}
return this.todosTienenEvaluador;
}
ContarEvaluadores(idEvaluado: number): number {
let suma = 0;
this.relacionesMap.forEach((value, key) => {
if (value.includes(idEvaluado)) {
suma++;
}
});
return suma;
}
CriterioDeEvaluacionSeleccionado(criterioEvaluacion: ChipColor) {
this.criterioEvaluacionSeleccionado = criterioEvaluacion.nombre;
this.tengoCriterioEvaluacion = true;
if (this.criterioEvaluacionSeleccionado === 'Por pesos') {
this.pesosArray = [];
for (let i = 0; i < this.rubricaElegida.criterios.length; i++) {
this.pesosArray.push([]);
this.pesosArray[i].push(this.PesoPorDefecto(this.rubricaElegida.criterios.length));
for (let j = 0; j < this.rubricaElegida.criterios[i].elementos.length; j++) {
this.pesosArray[i].push(this.PesoPorDefecto(this.rubricaElegida.criterios[i].elementos.length));
}
}
console.log('pesos array', this.pesosArray);
} else {
this.penalizacionArray = [];
for (let i = 0; i < this.rubricaElegida.criterios.length; i++) {
this.penalizacionArray.push([]);
if (this.rubricaElegida.criterios[i].elementos.length >= 1) {
this.penalizacionArray[i].push({ num: 1, p: 75 });
}
if (this.rubricaElegida.criterios[i].elementos.length >= 2) {
this.penalizacionArray[i].push({ num: 2, p: 50 });
}
if (this.rubricaElegida.criterios[i].elementos.length >= 3) {
this.penalizacionArray[i].push({ num: 3, p: 0 });
}
}
console.log('penalizacion array', this.penalizacionArray);
}
}
EquipoDeEvaluacionSeleccionado(equipoEvaluacion: ChipColor) {
this.equiposEvaluacionSeleccionado = equipoEvaluacion.nombre;
this.tengoEquipoEvaluacion = true;
}
AutoevaluacionChange(isChecked: boolean) {
this.autoevaluacion = isChecked;
}
ProfesorEvaluaChange(isChecked: boolean) {
this.profesorEvalua = isChecked;
}
ProfesorEvaluaModoChange(value: string) {
this.profesorEvaluaModo = value;
}
DameMaxSlider(): number {
if (this.modoDeJuegoSeleccionado === 'Individual') {
return this.alumnosGrupo.length - 1;
} else if (this.modoDeJuegoSeleccionado === 'Equipos') {
if (this.equiposEvaluacionSeleccionado === 'Por Equipos') {
return this.equiposGrupo.length - 1;
} else if (this.equiposEvaluacionSeleccionado === 'Individualmente') {
let min = this.alumnosGrupo.length;
for (let i = 0; i < this.relacionAlumnosEquipos.length; i++) {
if (this.relacionAlumnosEquipos[i].alumnos.length < min) {
min = this.relacionAlumnosEquipos[i].alumnos.length;
}
}
return min;
}
}
}
DameEvaluados(): any {
if (this.modoDeJuegoSeleccionado === 'Individual') {
return this.alumnosGrupo;
} else {
return this.equiposGrupo;
}
}
DameEvaluadores(): any {
if (this.equiposEvaluacionSeleccionado === 'Por Equipos') {
return this.equiposGrupo;
} else {
return this.alumnosGrupo;
}
}
public DameRelacionesAlumnoEquipos() {
return this.relacionAlumnosEquipos;
}
SliderChanged(value: number) {
console.log('Slider changed to', value);
this.numeroDeMiembros = value;
}
RubricaSeleccionChange(index: number) {
console.log('Rubrica seleccionada', this.rubricas[index]);
this.rubricaElegida = this.rubricas[index];
this.tengoRubrica = true;
}
PesoPorDefecto(total: number): number {
return parseFloat((100 / total).toFixed(2));
}
PesosChanged(name: string, value: string): void {
console.log('Pesos changed', name, value);
const criterio = name.split('-')[0];
const elemento = name.split('-')[1];
this.pesosArray[criterio][elemento] = parseFloat(value);
console.log('pesos array changed', this.pesosArray);
this.pesosSuman100 = this.PesosSuman100();
}
PesosParentChanged(name: string, value: string): void {
console.log('Pesos parent changed', name, value);
this.pesosArray[name][0] = parseFloat(value);
console.log('pesos array changed', this.pesosArray);
this.pesosSuman100 = this.PesosSuman100();
}
PesosSuman100(): boolean {
let c = 0;
for (let i = 0; i < this.pesosArray.length; i++) {
let p = 0;
for (let j = 0; j < this.pesosArray[i].length; j++) {
if (j === 0) {
c += this.pesosArray[i][j];
} else {
p += this.pesosArray[i][j];
}
}
if (Math.round((p + Number.EPSILON) * 10) / 10 !== 100) {
return false;
}
}
return Math.round((c + Number.EPSILON) * 10) / 10 === 100;
}
PenalizacionChanged(name: string, value: string): void {
console.log('Penalizacion changed', name, value);
const criterio = name.split('-')[0];
const elemento = name.split('-')[1];
const tipo = name.split('-')[2];
if (tipo === 'num') {
const tmp = this.penalizacionArray[criterio][elemento].p;
this.penalizacionArray[criterio][elemento] = { num: parseInt(value, 10), p: tmp };
} else if (tipo === 'p') {
const tmp = this.penalizacionArray[criterio][elemento].num;
this.penalizacionArray[criterio][elemento] = { num: tmp, p: parseInt(value, 10) };
}
console.log('penalizacion array', this.penalizacionArray);
}
CrearJuegoEvaluacion() {
let evaluadores: number;
if (this.tipoDeEvaluacionSeleccionado === 'Todos con todos') {
evaluadores = 0;
} else {
evaluadores = this.numeroDeMiembros;
}
const juego: JuegoDeEvaluacion = new JuegoDeEvaluacion(
null,
this.nombreDelJuego,
'Evaluacion',
this.modoDeJuegoSeleccionado,
true,
false,
this.profesorEvalua,
this.profesorEvaluaModo === 'normal',
this.autoevaluacion,
evaluadores,
this.pesosArray,
this.criterioEvaluacionSeleccionado === 'Por pesos',
this.penalizacionArray,
this.rubricaElegida.id,
this.profesorId,
this.grupo.id
);
console.log('Creando Juego de Evaluacion', juego);
this.peticionesAPI.CrearJuegoDeEvaluacion(juego).subscribe(res => {
console.log('JuegoDeEvaluacionCreado', res);
Swal.fire('Juego de Evaluación creado correctamente', ' ', 'success');
this.juego = res;
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
this.relacionesMap.forEach((value: number[], key: number) => {
if (this.modoDeJuegoSeleccionado === 'Equipos' && this.equiposEvaluacionSeleccionado === 'Por Equipos') {
const equipo: EquipoJuegoEvaluado = new EquipoJuegoEvaluado(
null,
res.id,
key,
value,
null,
null
);
this.peticionesAPI.CrearEquipoJuegoDeEvaluacion(equipo).subscribe(equipores => console.log('EquipoJuegoEvaluado', equipores));
} else if (this.modoDeJuegoSeleccionado === 'Equipos' && this.equiposEvaluacionSeleccionado === 'Individualmente') {
const equipo: EquipoJuegoEvaluado = new EquipoJuegoEvaluado(
null,
res.id,
key,
null,
value,
null
);
this.peticionesAPI.CrearEquipoJuegoDeEvaluacion(equipo).subscribe(equipores => console.log('EquipoJuegoEvaluado', equipores));
} else if (this.modoDeJuegoSeleccionado === 'Individual') {
const alumno: AlumnoJuegoEvaluado = new AlumnoJuegoEvaluado(
null,
res.id,
key,
value,
null
);
this.peticionesAPI.CrearAlumnoJuegoDeEvaluacion(alumno).subscribe(alumnosres => console.log('AlumnoJuegoEvaluado', alumnosres));
}
});
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
}
// FUNCIONES PARA LA CREACION DE JUEGO DE PUNTOS
RecibeTiposDePuntos($event) {
this.puntosDelJuego = $event;
console.log('ya tengo los puntos');
console.log(this.puntosDelJuego);
}
RecibeNivel($event) {
this.nivelesDelJuego.push($event.n);
if ($event.l !== undefined) {
this.logosNiveles.push($event.l);
}
console.log('ya tengo los niveles');
console.log(this.nivelesDelJuego);
console.log(this.logosNiveles);
}
// Función que usaremos para crear un juego de puntos.
CrearJuegoDePuntos() {
// primero creamos el juego
this.peticionesAPI.CreaJuegoDePuntos(new Juego(this.tipoDeJuegoSeleccionado, this.modoDeJuegoSeleccionado,
undefined, undefined, undefined, undefined, undefined, undefined, undefined, this.nombreDelJuego), this.grupo.id)
.subscribe(juegoCreado => {
this.juego = juegoCreado;
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
// Ahora asignamos los puntos
// tslint:disable-next-line:max-line-length
this.puntosDelJuego.forEach(punto =>
this.peticionesAPI.AsignaPuntoJuego(new AsignacionPuntosJuego(punto.id, this.juego.id))
.subscribe()
);
// asignamos los niveles
if (this.nivelesDelJuego !== undefined) {
this.nivelesDelJuego.forEach(nivel =>
this.peticionesAPI.CreaNivel(nivel, this.juego.id)
.subscribe()
);
// Guardamos los logos de los niveles
this.logosNiveles.forEach(logo =>
this.peticionesAPI.PonImagenNivel(logo)
.subscribe()
);
}
// Inscribo los participantes en el juego
if (this.modoDeJuegoSeleccionado === 'Individual') {
console.log('Voy a inscribir a los alumnos del grupo 1');
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
console.log(this.alumnosGrupo[i]);
this.peticionesAPI.InscribeAlumnoJuegoDePuntos(new AlumnoJuegoDePuntos(this.alumnosGrupo[i].id, this.juego.id))
.subscribe();
}
} else {
console.log('Voy a inscribir los equipos del grupo');
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.equiposGrupo.length; i++) {
console.log(this.equiposGrupo[i]);
this.peticionesAPI.InscribeEquipoJuegoDePuntos(new EquipoJuegoDePuntos(this.equiposGrupo[i].id, this.juego.id))
.subscribe();
}
}
Swal.fire('Juego de puntos creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
}
/// FUNCIONES PARA LA CREACION DE JUEGO DE COLECCIÓN
// Recibo el nombre de la colección elegida en el componente hijo
RecibeColeccion($event) {
this.coleccionSeleccionada = $event;
this.tengoColeccion = true;
}
CrearJuegoDeColeccion() {
this.peticionesAPI.CreaJuegoDeColeccion(new Juego(this.tipoDeJuegoSeleccionado, this.modoDeJuegoSeleccionado, this.modoAsignacion,
this.coleccionSeleccionada.id, undefined, undefined, undefined, undefined, undefined, this.nombreDelJuego), this.grupo.id)
.subscribe(juegoCreado => {
this.juego = juegoCreado;
console.log(juegoCreado);
console.log('Juego creado correctamente');
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
// Asignamos a los participantes en el juego
if (this.modoDeJuegoSeleccionado === 'Individual') {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
this.peticionesAPI.InscribeAlumnoJuegoDeColeccion(new AlumnoJuegoDeColeccion(this.alumnosGrupo[i].id, this.juego.id))
.subscribe();
}
} else {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.equiposGrupo.length; i++) {
this.peticionesAPI.InscribeEquipoJuegoDeColeccion(new EquipoJuegoDeColeccion(this.equiposGrupo[i].id, this.juego.id))
.subscribe();
}
}
Swal.fire('Juego de colección creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
// Notificación para los miembros del grupo
// console.log ('envio notificación los miembros del grupo');
// this.comService.EnviarNotificacionGrupo (
// this.grupo.id,
// 'Nuevo juego de colección para el grupo ' + this.grupo.Nombre
// );
console.log('envio notificación los miembros del grupo');
this.comService.EnviarNotificacionGrupo(
this.grupo.id,
'Nuevo juego de colección para el grupo ' + this.grupo.nombre
);
});
}
escogerEnigma() {
this.objetoEnigma = new ObjetoEnigma("cajaFuerte", "", "");
Swal.fire({
title: "Pregunta",
text: "¿Cual es la pregunta que tiene que responder el alumno para obtener el código?",
input: "text",
inputAttributes: {
autocapitalize: 'off'
},
showCancelButton: true,
confirmButtonText: 'Asignar',
showLoaderOnConfirm: true,
cancelButtonText: 'Volver'
}).then((result) => {
if (result.value != undefined) {
this.objetoEnigma.pregunta = result.value;
Swal.fire({
title: "Respuesta",
text: "¿Cual es la respuesta correcta que tiene que dar el alumno para obtener el enigma?",
input: "text",
inputAttributes: {
autocapitalize: 'off'
},
showCancelButton: true,
confirmButtonText: 'Asignar',
showLoaderOnConfirm: true,
cancelButtonText: 'Volver'
}).then((result) => {
if (result.value != undefined) {
this.objetoEnigma.respuesta = result.value;
Swal.fire('Enigma creado correctamente', ' ', 'success');
} else {
Swal.fire('No se ha creado el enigma', ' ', 'info');
}
});
} else {
Swal.fire('No se ha creado el enigma', ' ', 'info');
}
});
this.sesion.TomaObjetoEnigma(this.objetoEnigma);
console.log("Dame objeto enigma: ", this.sesion.DameObjetoEnigma());
}
//// FUNCIONES PARA LA CREACION DE JUEGO DE CUESTIONARIO
AbrirDialogoAgregarCuestionario(): void {
const dialogRef = this.dialog.open(AsignaCuestionarioComponent, {
width: '70%',
height: '80%',
position: {
top: '0%'
},
// Pasamos los parametros necesarios
data: {
profesorId: this.profesorId
}
});
dialogRef.afterClosed().subscribe(() => {
this.cuestionario = this.sesion.DameCuestionario();
this.tengoCuestionario = true;
console.log('CUESTIONARIO SELECCIONADO --->' + this.cuestionario.titulo);
});
}
// Para habilitar el boton de guardar puntuaciones
TengoPuntuaciones() {
if (this.myForm.value.PuntuacionCorrecta === '' || this.myForm.value.PuntuacionIncorrecta === '') {
return false;
} else {
return true;
}
}
GuardarPuntuacion() {
this.puntuacionCorrecta = this.myForm.value.PuntuacionCorrecta;
this.puntuacionIncorrecta = this.myForm.value.PuntuacionIncorrecta;
}
GuardarModoPresentacion(modoPresentacion) {
this.modoPresentacion = modoPresentacion;
this.tengoModoPresentacion = true;
}
GuardarTiempoLimite() {
this.tiempoLimite = this.myForm.value.TiempoLimite;
if (this.tiempoLimite === undefined) {
this.tiempoLimite = 0;
}
}
TipoDeJuegoDeCuestionarioSeleccionado(tipoJuegoCuestionario: ChipColor) {
this.tipoDeJuegoDeCuestionarioSeleccionado = tipoJuegoCuestionario.nombre;
this.tengoTipoJuegoCuestionario = true;
}
CrearJuegoDeCuestionario() {
// Tengo que crear un juego de tipo JuegoDeCuestionario y no uno de tipo Juego, como en los casos
// anteriores. La razón es que no están bien organizado el tema de que los modelos de los diferentes juegos
// tomen como base el modelo Juego genérico. De momento se queda así.
// tslint:disable-next-line:max-line-length
this.peticionesAPI.CreaJuegoDeCuestionario(new JuegoDeCuestionario(this.nombreDelJuego, this.tipoDeJuegoSeleccionado, this.tipoDeJuegoDeCuestionarioSeleccionado, this.puntuacionCorrecta,
this.puntuacionIncorrecta, this.modoPresentacion,
false, false, this.profesorId, this.grupo.id, this.cuestionario.id, this.tiempoLimite), this.grupo.id)
.subscribe(juegoCreado => {
this.juegoDeCuestionario = juegoCreado;
// Inscribimos a los alumnos (de momento no hay juego de cuestionario por equipos)
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeAlumnoJuegoDeCuestionario(new AlumnoJuegoDeCuestionario(0, false, this.juegoDeCuestionario.id, this.alumnosGrupo[i].id))
.subscribe();
}
Swal.fire('Juego de cuestionario creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosPreparados === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosPreparados = [];
}
this.juegosPreparados.push(this.juegoDeCuestionario);
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
}
//// FUNCIONES PARA LA CREACION DE UN JUEGO DE AVATARES
RecibeFamiliasElegidas($event) {
this.familiasElegidas = $event;
this.tengoFamilias = true;
}
CrearJuegoDeAvatar() {
const juego = new JuegoDeAvatar(this.nombreDelJuego,
this.tipoDeJuegoSeleccionado,
this.modoDeJuegoSeleccionado,
true);
juego.familias = this.familiasElegidas;
juego.criteriosPrivilegioComplemento1 = this.myForm.value.criterioPrivilegioComplemento1;
juego.criteriosPrivilegioComplemento2 = this.myForm.value.criterioPrivilegioComplemento2;
juego.criteriosPrivilegioComplemento3 = this.myForm.value.criterioPrivilegioComplemento3;
juego.criteriosPrivilegioComplemento4 = this.myForm.value.criterioPrivilegioComplemento4;
juego.criteriosPrivilegioVoz = this.myForm.value.criterioPrivilegioVoz;
juego.criteriosPrivilegioVerTodos = this.myForm.value.criterioPrivilegioVerTodos;
this.peticionesAPI.CreaJuegoDeAvatar(juego, this.grupo.id)
.subscribe(nuevoJuego => {
this.juegoDeAvatar = nuevoJuego;
// Ahora inscribimos en el juego a los participantes
if (this.modoDeJuegoSeleccionado === 'Individual') {
console.log('Voy a inscribir a los alumnos del grupo');
// tslint:disable-next-line:max-line-length
if (this.modoDeJuegoSeleccionado === 'Individual') {
console.log('Voy a inscribir a los alumnos del grupo');
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
console.log('inscribo');
this.peticionesAPI.InscribeAlumnoJuegoDeAvatar(new AlumnoJuegoDeAvatar(this.alumnosGrupo[i].id, this.juegoDeAvatar.id))
.subscribe();
}
} else {
// Inscribo a los equipos
}
Swal.fire('Juego de avatares creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juegoDeAvatar);
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
}
});
}
// FUNCIONES PARA CREAR JUEGO DE COMPETICION
TipoDeCompeticionSeleccionado(tipoCompeticion: ChipColor) {
this.tipoDeCompeticionSeleccionado = tipoCompeticion.nombre;
this.tengoTipoDeCompeticion = true;
}
GuardarNumeroDeJornadas() {
this.numeroDeJornadas = this.myForm.value.NumeroDeJornadas;
if (this.numeroDeJornadas === undefined || isNaN(this.numeroDeJornadas)) {
this.tengoNumeroDeJornadas = false;
Swal.fire('Introduzca un número de jornadas válido', 'Le recordamos que debe ser un número', 'error');
} else {
console.log('tengo numero');
this.tengoNumeroDeJornadas = true;
}
}
GuardarNuevaPuntuacion() {
this.nuevaPuntuacion = this.myForm.value.NuevaPuntuacion;
console.log('tengo nueva puntuacion ' + this.nuevaPuntuacion);
this.tengoNuevaPuntuacion = true;
}
Preparado() {
if ((this.tengoNuevaPuntuacion) && (this.selection.selected.length > 0)) {
return true;
} else {
return false;
}
}
AnadirPuntos() {
console.log('nueva puntuiacion');
console.log(this.nuevaPuntuacion);
if (!isNaN(this.nuevaPuntuacion)) {
for (let i = 0; i < this.dataSource.data.length; i++) {
// Buscamos los alumnos que hemos seleccionado
if (this.selection.isSelected(this.dataSource.data[i])) {
this.Puntuacion[i] = this.nuevaPuntuacion;
this.TablaPuntuacion[i].puntuacion = this.nuevaPuntuacion;
}
}
} else {
Swal.fire('Introduzca una puntuación válida', 'Le recordamos que debe ser un Número', 'error');
}
this.dataSource = new MatTableDataSource(this.TablaPuntuacion);
this.selection.clear();
this.tengoNuevaPuntuacion = false;
}
AnadirFila() {
let i: number;
let NumeroParticipantes: number;
i = this.Puntuacion.length;
console.log(i);
console.log(this.Puntuacion);
if (this.modoDeJuegoSeleccionado === 'Individual') {
NumeroParticipantes = this.alumnosGrupo.length;
} else {
NumeroParticipantes = this.equiposGrupo.length;
}
if (i < NumeroParticipantes) {
this.TablaPuntuacion[i] = new TablaPuntosFormulaUno(i + 1, 1);
this.Puntuacion[i] = this.TablaPuntuacion[i].puntuacion;
console.log(this.TablaPuntuacion[i]);
this.dataSource = new MatTableDataSource(this.TablaPuntuacion);
} else {
Swal.fire('No es posible añadir otra fila', 'Ya puntuan todos los participantes', 'error');
}
}
EliminarFila() {
let i: number;
i = this.Puntuacion.length;
console.log(i);
console.log(this.Puntuacion);
if (i > 1) {
this.TablaPuntuacion = this.TablaPuntuacion.splice(0, i - 1);
this.Puntuacion = this.Puntuacion.slice(0, i - 1);
console.log(this.TablaPuntuacion);
console.log(this.Puntuacion);
this.dataSource = new MatTableDataSource(this.TablaPuntuacion);
} else {
Swal.fire('No es posible eliminar otra fila', 'Como mínimo debe puntuar un participante', 'error');
}
}
CrearJuegoDeCompeticionLiga() {
// tslint:disable-next-line:max-line-lengtholean)
this.peticionesAPI.CreaJuegoDeCompeticionLiga(new Juego(this.tipoDeJuegoSeleccionado + ' ' + this.tipoDeCompeticionSeleccionado,
this.modoDeJuegoSeleccionado, undefined, undefined, true, this.numeroDeJornadas,
this.tipoDeCompeticionSeleccionado,
undefined, undefined, this.nombreDelJuego), this.grupo.id)
.subscribe(juegoCreado => {
this.juego = juegoCreado;
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
// Creamos las jornadas
console.log('voy a crear jornadas');
this.calculos.CrearJornadasLiga(this.numeroDeJornadas, this.juego.id)
.subscribe(jornadas => {
this.jornadasLiga = jornadas;
console.log('Jornadas creadas correctamente');
console.log(this.jornadasLiga);
console.log(this.jornadasLiga.length);
if (this.modoDeJuegoSeleccionado === 'Individual') {
// tslint:disable-next-line:max-line-length
this.calculos.calcularLiga(this.alumnosGrupo.length, this.jornadasLiga.length, this.alumnosGrupo, this.grupo.id, this.jornadasLiga);
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeAlumnoJuegoDeCompeticionLiga(new AlumnoJuegoDeCompeticionLiga(this.alumnosGrupo[i].id, this.juego.id))
.subscribe();
}
} else {
// tslint:disable-next-line:max-line-length
this.calculos.calcularLiga(this.equiposGrupo.length, this.jornadasLiga.length, this.equiposGrupo, this.grupo.id, this.jornadasLiga);
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.equiposGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeEquipoJuegoDeCompeticionLiga(new EquipoJuegoDeCompeticionLiga(this.equiposGrupo[i].id, this.juego.id))
.subscribe();
}
}
Swal.fire('Juego de competición tipo liga creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
});
}
CrearJuegoDeCompeticionFormulaUno() {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.CreaJuegoDeCompeticionFormulaUno(new Juego(this.tipoDeJuegoSeleccionado + ' ' + this.tipoDeCompeticionSeleccionado,
this.modoDeJuegoSeleccionado, undefined, undefined, true, this.numeroDeJornadas,
undefined, this.Puntuacion.length,
this.Puntuacion, this.nombreDelJuego), this.grupo.id)
.subscribe(juegoCreado => {
this.juego = juegoCreado;
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
this.calculos.CrearJornadasFormulaUno(this.numeroDeJornadas, this.juego.id)
.subscribe(jornadas => {
this.jornadasFormulaUno = jornadas;
this.sesion.TomaDatosJornadasJuegoComponent(this.jornadasFormulaUno);
// inscribo a los participantes
if (this.modoDeJuegoSeleccionado === 'Individual') {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeAlumnoJuegoDeCompeticionFormulaUno(new AlumnoJuegoDeCompeticionFormulaUno(this.alumnosGrupo[i].id, this.juego.id))
.subscribe();
}
} else {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.equiposGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeEquipoJuegoDeCompeticionFormulaUno(new EquipoJuegoDeCompeticionFormulaUno(this.equiposGrupo[i].id, this.juego.id))
.subscribe();
}
}
Swal.fire('Juego de competición tipo fórmula uno creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
// Al darle al botón de finalizar limpiamos el formulario y reseteamos el stepper
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
});
}
/// Funciones para craar juego de Geocatching
// Geocaching
AbrirDialogoAgregarEscenario(): void {
const dialogRef = this.dialog.open(AsignaEscenarioComponent, {
width: '70%',
height: '80%',
position: {
top: '0%'
},
// Pasamos los parametros necesarios
data: {
profesorId: this.profesorId
}
});
dialogRef.afterClosed().subscribe(() => {
this.escenario = this.sesion.DameEscenario();
console.log('ESCENARIO SELECCIONADO --->' + this.escenario.mapa);
this.DamePuntosGeolocalizablesDelEscenario(this.escenario);
console.log(this.numeroDePuntosGeolocalizables);
console.log(this.puntosgeolocalizablesEscenario);
});
}
DamePuntosGeolocalizablesDelEscenario(escenario: Escenario) {
console.log('voy a mostrar los puntosgeolocalizables del escenario ' + escenario.id);
this.peticionesAPI.DamePuntosGeolocalizablesEscenario(escenario.id)
.subscribe(res => {
if (res[0] !== undefined) {
this.puntosgeolocalizablesEscenario = res;
console.log(res);
this.numeroDePuntosGeolocalizables = this.puntosgeolocalizablesEscenario.length;
console.log(this.numeroDePuntosGeolocalizables);
this.tengoEscenario = true;
} else {
console.log('No hay puntosgeolocalizables en el escenario');
this.puntosgeolocalizablesEscenario = undefined;
this.numeroDePuntosGeolocalizables = 0;
}
});
}
AbrirDialogoAgregarPreguntas(): void {
const dialogRef = this.dialog.open(AsignaPreguntasComponent, {
width: '70%',
height: '80%',
position: {
top: '0%'
},
// Pasamos los parametros necesarios
data: {
profesorId: this.profesorId,
numeroDePuntosGeolocalizables: this.numeroDePuntosGeolocalizables
}
});
dialogRef.afterClosed().subscribe(() => {
this.PreguntasBasicas = this.sesion.DameIdPreguntasBasicas();
this.PreguntasBonus = this.sesion.DameIdPreguntasBonus();
this.tengoPreguntas = true;
console.log('comprobacion de que se reciben los id de las preguntas');
console.log(this.PreguntasBasicas);
console.log(this.PreguntasBonus);
});
}
// Para habilitar el boton de guardar puntuaciones
TengoPuntuacionesGeocatching() {
if (this.myForm.value.PuntuacionCorrectaGeo === '' ||
this.myForm.value.PuntuacionIncorrectaGeo === '' ||
this.myForm.value.PuntuacionCorrectaGeoBonus === '' ||
this.myForm.value.PuntuacionIncorrectaGeoBonus === '') {
return false;
} else {
return true;
}
}
GuardarPuntuacionGeocaching() {
this.puntuacionCorrectaGeo = this.myForm.value.PuntuacionCorrectaGeo;
this.puntuacionIncorrectaGeo = this.myForm.value.PuntuacionIncorrectaGeo;
this.puntuacionCorrectaGeoBonus = this.myForm.value.PuntuacionCorrectaGeoBonus;
this.puntuacionIncorrectaGeoBonus = this.myForm.value.PuntuacionIncorrectaGeoBonus;
}
CrearJuegoDeGeocaching() {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.CreaJuegoDeGeocaching(new JuegoDeGeocaching(this.nombreDelJuego, this.tipoDeJuegoSeleccionado, this.puntuacionCorrectaGeo, this.puntuacionIncorrectaGeo, this.puntuacionCorrectaGeoBonus, this.puntuacionIncorrectaGeoBonus, this.PreguntasBasicas, this.PreguntasBonus,
false, false, this.profesorId, this.grupo.id, this.escenario.id), this.grupo.id)
.subscribe(juegoCreado => {
this.juegoDeGeocaching = juegoCreado;
this.juegoCreado = true;
// Inscribimos a los alumnos en el juego
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeAlumnoJuegoDeGeocaching(new AlumnoJuegoDeGeocaching(0, 0, this.alumnosGrupo[i].id, this.juegoDeGeocaching.id))
.subscribe();
}
Swal.fire('Juego de geocaching creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosPreparados === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosPreparados = [];
}
this.juegosPreparados.push(this.juegoDeGeocaching);
// Al darle al botón de finalizar limpiamos el formulario y reseteamos el stepper
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
}
// Funciones para crear juego de votación
// Para crear el juego de votación de tipo Uno A Todos se usa la tabla
// de asignación de puntuaciones que ya se usa en la competición de Formula Uno
// junto con las funciones asociadas, porque lo que hay que hacer es exactamente lo mismo
TipoDeVotacionSeleccionado(tipoVotacion: ChipColor) {
this.tipoDeVotacionSeleccionado = tipoVotacion.nombre;
this.tengoTipoDeVotacion = true;
}
ModoDeRepartoSeleccionado(modoReparto: ChipColor) {
this.modoDeRepartoSeleccionado = modoReparto.nombre;
this.tengoModoReparto = true;
}
// formatLabel(value: number) {
// // if (value >= 1000) {
// // return Math.round(value / 1000) + 'k';
// // }
// this.puntosARepartir = value;
// console.log ('aaaa: ' + value);
// console.log ('bbb: ' + this.puntosARepartir);
// return value;
// }
GuardaValor(event) {
this.puntosARepartir = event.value;
this.Puntuacion[0] = this.puntosARepartir;
}
CrearJuegoDeVotacionUnoATodos() {
const juegoDeVotacion = new JuegoDeVotacionUnoATodos(
this.tipoDeJuegoSeleccionado + ' ' + this.tipoDeVotacionSeleccionado,
this.modoDeJuegoSeleccionado,
this.modoDeRepartoSeleccionado,
true,
this.Puntuacion,
this.nombreDelJuego,
false,
this.grupo.id);
this.peticionesAPI.CreaJuegoDeVotacionUnoATodos(juegoDeVotacion, this.grupo.id)
.subscribe(juegoCreado => {
this.juego = juegoCreado;
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
if (this.modoDeJuegoSeleccionado === 'Individual') {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeAlumnoJuegoDeVotacionUnoATodos(
// tslint:disable-next-line:indent
new AlumnoJuegoDeVotacionUnoATodos(this.alumnosGrupo[i].id, this.juego.id))
.subscribe();
}
}
Swal.fire('Juego de votación tipo Uno A Todos creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
// Al darle al botón de finalizar limpiamos el formulario y reseteamos el stepper
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
}
PonConcepto() {
this.listaConceptos.push({ nombre: this.myForm.value.NombreDelConcepto, peso: this.myForm.value.PesoDelConcepto });
this.dataSourceConceptos = new MatTableDataSource(this.listaConceptos);
let peso: number;
peso = Number(this.myForm.value.PesoDelConcepto);
this.totalPesos = this.totalPesos + peso;
console.log('total ' + this.totalPesos);
this.myForm.reset();
}
BorraConcepto(nombre) {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.listaConceptos.length; i++) {
if (this.listaConceptos[i]['nombre'] === nombre) {
this.totalPesos = this.totalPesos - this.listaConceptos[i]['peso'];
this.listaConceptos.splice(i, 1);
}
}
this.dataSourceConceptos = new MatTableDataSource(this.listaConceptos);
}
AsignarConceptos() {
this.conceptos = [];
this.pesos = [];
if (this.totalPesos !== 100) {
Swal.fire('Los pesos no suman el 100%', ' ', 'error');
} else {
this.listaConceptos.forEach(concepto => {
this.conceptos.push(concepto['nombre']);
this.pesos.push(concepto['peso']);
});
this.conceptosAsignados = true;
}
}
CrearJuegoDeVotacionTodosAUno() {
const juegoDeVotacion = new JuegoDeVotacionTodosAUno(
this.tipoDeJuegoSeleccionado + ' ' + this.tipoDeVotacionSeleccionado,
this.modoDeJuegoSeleccionado,
true,
this.conceptos,
this.pesos,
this.nombreDelJuego,
false,
this.grupo.id);
console.log('voy a crear juego');
console.log(juegoDeVotacion);
this.peticionesAPI.CreaJuegoDeVotacionTodosAUno(juegoDeVotacion, this.grupo.id)
.subscribe(juegoCreado => {
this.juego = juegoCreado;
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
if (this.modoDeJuegoSeleccionado === 'Individual') {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeAlumnoJuegoDeVotacionTodosAUno(
new AlumnoJuegoDeVotacionTodosAUno(this.alumnosGrupo[i].id, this.juego.id))
.subscribe();
}
}
Swal.fire('Juego de votación tipo Todos A Uno creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
// Al darle al botón de finalizar limpiamos el formulario y reseteamos el stepper
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
}
///////////////// FUNCIONES PARA CREAR JUEGO DE CUESTIONARIO DE SATISFACCION /////////////
RecibeCuestionarioSatisfaccionElegido($event) {
this.cuestionarioSatisfaccion = $event;
this.tengoCuestionarioSatisfaccion = true;
console.log('tengo cuestionario: ' + this.cuestionarioSatisfaccion.titulo);
}
GuardaDescripcionCuestionarioSatisfaccion(ev) {
this.cuestionarioSatisfaccion.descripcion = ev.target.value;
}
CrearJuegoDeCuestionarioDeSatisfaccion() {
console.log('voy a crear el juego');
console.log('cuestionario: ' + this.cuestionarioSatisfaccion.titulo);
console.log('Descripcion: ' + this.cuestionarioSatisfaccion.descripcion);
const juegoDeCuestionarioSatisfaccion = new JuegoDeCuestionarioSatisfaccion(
this.nombreDelJuego,
this.tipoDeJuegoSeleccionado,
this.cuestionarioSatisfaccion.descripcion,
true,
false,
this.profesorId,
this.grupo.id,
this.cuestionarioSatisfaccion.id);
console.log('voy a crear juego');
console.log(juegoDeCuestionarioSatisfaccion);
this.peticionesAPI.CreaJuegoDeCuestionarioSatisfaccion(juegoDeCuestionarioSatisfaccion, this.grupo.id)
.subscribe(juegoCreado => {
this.juego = juegoCreado;
this.sesion.TomaJuego(this.juego);
this.juegoCreado = true;
if (this.modoDeJuegoSeleccionado === 'Individual') {
// tslint:disable-next-line:prefer-for-of
for (let i = 0; i < this.alumnosGrupo.length; i++) {
// tslint:disable-next-line:max-line-length
this.peticionesAPI.InscribeAlumnoJuegoDeCuestionarioSatisfaccion(
new AlumnoJuegoDeCuestionarioSatisfaccion(false, this.juego.id, this.alumnosGrupo[i].id))
.subscribe();
}
}
Swal.fire('Juego de cuestionario de satisfacción creado correctamente', ' ', 'success');
// El juego se ha creado como activo. Lo añadimos a la lista correspondiente
if (this.juegosActivos === undefined) {
// Si la lista aun no se ha creado no podre hacer el push
this.juegosActivos = [];
}
this.juegosActivos.push(this.juego);
// Al darle al botón de finalizar limpiamos el formulario y reseteamos el stepper
this.Limpiar();
// Regresamos a la lista de equipos (mat-tab con índice 0)
this.tabGroup.selectedIndex = 0;
});
}
goBack() {
this.location.back();
}
canExit(): Observable<boolean> {
console.log('voy a salir');
console.log(this.creandoJuego);
if (!this.creandoJuego) {
return of(true);
} else {
const confirmacionObservable = new Observable<boolean>(obs => {
const dialogRef = this.dialog.open(DialogoConfirmacionComponent, {
height: '150px',
data: {
mensaje: 'Confirma que quieres abandonar el proceso de creación del juego',
}
});
dialogRef.afterClosed().subscribe((confirmed: boolean) => {
if (confirmed) {
this.Limpiar();
}
obs.next(confirmed);
});
});
return confirmacionObservable;
}
}
// Funciones Para creacion de Competicion Formula Uno
// Para averiguar si todas las filas están seleccionadas */
IsAllSelected() {
const numSelected = this.selection.selected.length;
const numRows = this.dataSource.data.length;
return numSelected === numRows;
}
/* Cuando se clica en el checkbox de cabecera hay que ver si todos los
* checkbox estan acivados, en cuyo caso se desactivan todos, o si hay alguno
* desactivado, en cuyo caso se activan todos */
MasterToggle() {
if (this.IsAllSelected()) {
this.selection.clear(); // Desactivamos todos
} else {
// activamos todos
this.dataSource.data.forEach(row => this.selection.select(row));
}
}
Limpiar() {
// Al darle al botón de finalizar limpiamos el formulario y reseteamos el stepper
this.stepper.reset();
this.myForm.reset();
this.tengoNombre = false;
this.tengoTipo = false;
this.tengoModo = false;
this.puntosDelJuego = [];
this.nivelesDelJuego = [];
this.logosNiveles = [];
this.coleccionSeleccionada = undefined;
this.tengoColeccion = false;
this.creandoJuego = false;
this.juegoCreado = false;
this.modoPresentacion = undefined;
this.puntuacionCorrecta = undefined;
this.puntuacionIncorrecta = undefined;
this.cuestionario = undefined;
this.tengoCuestionario = false;
this.tengoModoPresentacion = false;
this.familiasElegidas = undefined;
this.tengoFamilias = false;
this.tengoNumeroDeJornadas = false;
this.tengoTipoDeCompeticion = false;
this.tengoNuevaPuntuacion = false;
this.puntuacionCorrectaGeo = undefined;
this.puntuacionIncorrectaGeo = undefined;
this.puntuacionCorrectaGeoBonus = undefined;
this.puntuacionIncorrectaGeoBonus = undefined;
this.escenario = undefined;
this.tengoEscenario = false;
this.puntosgeolocalizablesEscenario = undefined;
this.PreguntasBasicas = undefined;
this.PreguntasBonus = undefined;
this.tengoPreguntas = false;
this.conceptosAsignados = false;
this.listaConceptos = [];
this.totalPesos = 0;
this.tengoModoReparto = true;
}
} | if (!bool) {
if (this.countEscape > 0 && objeto.tipo == "objetoEscape") {
Swal.fire({ |
other_profile.js | function displayFeed() {
$("#FEED").show();
$("#WELLNESS").hide();
$("#feed_button").css("opacity", 1);
$("#wellness_button").css("opacity", 0.3);
}
function displayWellness() {
$("#WELLNESS").show();
$("#wellness_button").css("opacity", 1);
$("#feed_button").css("opacity", 0.3);
$("#FEED").hide();
$("#pol").hide();
}
$(document).ready(function() {
displayFeed();
updateProfile();
updateCount();
checkFriend();
$(".ringBell").click(function() {
if (notif_container == null) Self.notifications(localStorage.getItem("token")).then(readNotifs);
else hideNotifs();
});
$('html').click(function(e) {
if (e.target.id != 'notif_container' && notif_container != null) hideNotifs();
});
});
//NOTIFICATIONS//
let notif_container = null;
function updateProfile() {
$("#name").html(localStorage.getItem('Name'));
$("#bio").html(localStorage.getItem('Bio'));
}
function updateCount() {
let count = 0;
Self.notifications(localStorage.getItem("token")).then(function(e) {
if(e.data.comments.length + e.data.friend_requests.length > 0) $("span.-count").html('!');
else $("span.-count").hide();
});
}
function | () {
if(notif_container) notif_container.remove();
notif_container = null;
updateCount();
}
function readNotifs(n) {
notif_container = document.createElement('div');
notif_container.setAttribute('id', 'notif_container');
document.getElementById('notif_parent_container').appendChild(notif_container);
for (let i = 0; i < n.data.comments.length; i++) createNewCommentNotif(n.data.comments[i]);
for (let i = 0; i < n.data.friend_requests.length; i++) createNewFriendRequestNotif(n.data.friend_requests[i]);
}
function createNewCommentNotif(text) {
let notif = document.createElement('div');
notif_container.appendChild(notif);
notif.appendChild(document.createTextNode("Someone commented: " + text));
notif.setAttribute("class", "notif_object");
}
function createNewFriendRequestNotif(text) {
let notif = document.createElement('div');
notif_container.appendChild(notif);
notif.appendChild(document.createTextNode(text + " sent you a friend request!"));
let acceptButton = document.createElement("button");
acceptButton.innerHTML = "Y";
acceptButton.setAttribute('onclick', 'respondToRequest(text, true, notif)');
let rejectButton = document.createElement("button");
rejectButton.innerHTML = "N";
rejectButton.setAttribute('onclick', 'respondToRequest(text, false, notif)');
notif.appendChild(document.createElement("br"));
notif.appendChild(acceptButton);
notif.appendChild(rejectButton);
notif.setAttribute("class", "notif_object");
}
function respondToRequest(user, bool, obj) {
Self.friend_request(user, bool, localStorage.getItem("token")).then(function() {
hideNotifs();
});
}
//FRIEND REQUEST
function checkFriend() {
$('#addfr').hide()
$('#alreadyfr').hide()
$('#alreadysent').hide()
Self.read(localStorage.getItem("token")).then(function(a) {
var friends = false;
for (var i=0; i < a.data.friends.length; i++){
if (a.data.friends[i] === 'another-id'){
friends = true;
}
}
if(friends) {
$('#alreadyfr').show();
} else {
var sent = false;
Self.notifications(localStorage.getItem("token")).then(function(b) {
for (let i = 0; i < b.data.friend_requests.length; i++) {
if (b.data.friend_requests[i] === 'user-id'){
sent = true;
}
};
});
if (sent) {
$('#alreadysent').show();
} else {
$('#addfr').show();
}
}
});
}
function sendFrReq() {
Users.request_friend(localStorage.getItem("token"),localStorage.getItem("token"));
$('#addfr').hide()
$('#alreadysent').show()
}
//backend people: change the following function for pollution detection!
function chngimg() {
if (true) { //In particular, change this!!! if pollution low.
$("#tree").css("opacity", 1);
$("#house").css("opacity", 0.3);
$("#pol").hide();
$("#nopol").show();
} else {
$("#tree").css("opacity", 0.3);
$("#house").css("opacity", 1);
$("#pol").show();
$("#nopol").hide();
}
}
function goHome() {
window.location.href = "../index.html";
}
//LOCATION SCRIPTS//
function getLocation() {
if (navigator.geolocation) navigator.geolocation.getCurrentPosition(showPosition, showError);
else alert("Geolocation is not supported by this browser.");
}
function showPosition(position) { //change later
localStorage.setItem('pos', position.coords);
$("#location").css("opacity", 1);
let posString = "(" + Math.round(position.coords.latitude) +
"," + Math.round(position.coords.longitude) + ")";
$("#location p").html(posString);
}
function showError(error) {
switch (error.code) {
case error.PERMISSION_DENIED:
console.error("User denied the request for Geolocation.");
break;
case error.POSITION_UNAVAILABLE:
console.error("Location information is unavailable.");
break;
case error.TIMEOUT:
console.error("The request to get user location timed out.");
break;
case error.UNKNOWN_ERROR:
console.error(`An unknown error occurred: ${error}`);
break;
}
}
//BACK END//
const url = "https://us-central1-covid19-spaceapps.cloudfunctions.net/api";
// Helper function
async function sendRequest(method, path, token, body = null) {
// Build request
let options = {
method: method,
headers: {
Authorization: token,
}
}
// Add options for POST/PUT requests
if ((method === "POST" || method === "PUT") && body !== null) options.headers["Content-Type"] = "application/json";
if (body !== null) options.body = JSON.stringify(body);
// Send & parse requests
let response = await fetch(`${url}${path}`, options);
let json = await response.json();
// Generate return data
let base = {
code: response.status,
success: response.ok
}
if (response.ok) base.data = json.data;
else base.reason = json.reason;
return base;
}
class Self {
// Get the current user's basic information
static async read(token) {
return await sendRequest("GET", "/users/self", token);
}
// Get the current user's notifications
// This includes comments and friend requests
static async notifications(token) {
return await sendRequest("GET", "/users/self/notifications", token);
}
// Send a friend request
static async friend_request(user_id, accept, token) {
return await sendRequest("PUT", "/users/self/friends", token, {
friend: user_id,
accept: accept
});
}
}
// Interact with other users
class Users {
// Get another user's basic information
static async read(user_id, token) {
return await sendRequest("GET", `/users/${user_id}`, token);
}
// Make a friend request
static async request_friend(user_id, token) {
return await sendRequest("POST", `/users/${user_id}/friend`, token);
}
}
| hideNotifs |
test__spectral.py | from __future__ import division, absolute_import, print_function
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
# Compare performance results to those listed in
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
# and
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
# and those produced by dfsane.f from M. Raydan's website.
#
# Where the results disagree, the largest limits are taken.
|
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
# The DF-SANE paper proves convergence for "strongly isolated"
# solutions.
#
# For linear systems F(x) = A x - b = 0, with A positive or
# negative definite, the solution is strongly isolated.
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
# Test linear pos.def. system
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
# Test linear neg.def. system
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
# Some of the test functions and initial guesses listed in
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
# Note: the first line is typoed in some of the references;
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def F_6(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
| e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg) |
octet_stream_test.go | package integration
import (
"encoding/json"
"fmt"
cls "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cls/v20201016"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/regions"
"io/ioutil"
"testing"
)
func TestOctetStreamAction(t *testing.T) | {
cr := getCredential()
cpf := profile.NewClientProfile()
cpf.HttpProfile.Endpoint = "cls.tencentcloudapi.com"
cpf.HttpProfile.ReqMethod = "POST"
client, _ := cls.NewClient(cr, regions.Guangzhou, cpf)
request := cls.NewUploadLogRequest()
request.TopicId = common.StringPtr("f6c4fa6f-367a-4f14-8289-1ff6f77ed975")
request.HashKey = common.StringPtr("0fffffffffffffffffffffffffffffff")
request.CompressType = common.StringPtr("")
data, _ := ioutil.ReadFile("./binary.data")
response, err := client.UploadLog(request, data)
if terr, ok := err.(*errors.TencentCloudSDKError); ok {
if terr.GetCode() == "OperationDenied" || terr.GetCode() == "ResourceNotFound.TopicNotExist" {
return
} else {
t.Errorf(fmt.Sprintf("fail to invoke api: %v", err))
}
}
if err != nil {
t.Errorf(fmt.Sprintf("fail to invoke api: %v", err))
}
b, _ := json.Marshal(response.Response)
t.Log(b)
} |
|
CustomDrawer.js | import React from 'react';
import PropTypes from 'prop-types';
import { withStyles } from '@material-ui/core/styles';
import MiniDrawer from './MiniDrawer';
import CssBaseline from '@material-ui/core/CssBaseline';
import { withRouter } from 'react-router';
const styles = theme => ({
root: {
display: 'flex',
overflow: 'hidden'
},
mainContent: {
display: 'flex',
flexDirection: 'column',
width: '100%',
height: '100vh',
overflow: 'hidden'
},
content: {
flexGrow: 1,
backgroundColor: '#555d6f',
// padding: '0 24px',
overflow: 'hidden'
},
beforeRoot: {
width: '100%',
height: '100%',
overflow: 'hidden'
}
});
class CustomDrawer extends React.Component {
state = {
toolbarTitle: 'Non title',
};
_changeTitle = (newTitle) => {
this.setState({
toolbarTitle: newTitle
})
}
_setAdditionalComponent = component => {
this.setState({
additional: component
})
}
render() {
const { classes } = this.props;
const Body = this.props.body;
return (
<div className={[classes.beforeRoot, "app-wrapper-web"].join(' ')}>
<div className={[classes.root, '_3dqpi'].join(' ')}>
<CssBaseline />
<MiniDrawer changeTitle={this._changeTitle} />
<div className={classes.mainContent}> | <main className={classes.content}>
<Body changeTitle={this._changeTitle} />
</main>
</div>
</div>
</div>
);
}
}
CustomDrawer.propTypes = {
classes: PropTypes.object.isRequired,
theme: PropTypes.object.isRequired,
};
export default withRouter(withStyles(styles, { withTheme: true })(CustomDrawer)); | |
leaks.spec.js | /******************************************************************************
*
* Copyright (c) 2017, the Perspective Authors.
*
* This file is part of the Perspective library, distributed under the terms of
* the Apache License 2.0. The full license can be found in the LICENSE file.
*
*/ |
const utils = require("@finos/perspective-test");
const path = require("path");
utils.with_server({}, () => {
describe.page(
"superstore.html",
() => {
// must specify timeout AND viewport
test.capture(
"doesn't leak tables.",
async page => {
const viewer = await page.$("perspective-viewer");
await page.shadow_click("perspective-viewer", "#config_button");
for (var i = 0; i < 100; i++) {
await page.evaluate(element => element.load(window.__CSV__), viewer);
await page.waitForSelector("perspective-viewer:not([updating])");
}
await page.evaluate(
element =>
element.load(
window.__CSV__
.split("\n")
.slice(0, 10)
.join("\n")
),
viewer
);
await page.waitForSelector("perspective-viewer:not([updating])");
},
{timeout: 60000}
);
test.capture(
"doesn't leak elements.",
async page => {
let viewer = await page.$("perspective-viewer");
//await page.shadow_click("perspective-viewer", "#config_button");
for (var i = 0; i < 100; i++) {
viewer = await page.$("perspective-viewer");
await page.evaluate(element => {
element.delete();
document.innerHTML = "<perspective_viewer></perspective-viewer>";
document.getElementsByTagName("perspective-viewer")[0].load(window.__CSV__);
}, viewer);
await page.waitForSelector("perspective-viewer:not([updating])");
}
await page.shadow_click("perspective-viewer", "#config_button");
await page.evaluate(
element =>
element.load(
window.__CSV__
.split("\n")
.slice(0, 10)
.join("\n")
),
viewer
);
await page.waitForSelector("perspective-viewer:not([updating])");
},
{timeout: 60000}
);
test.capture(
"doesn't leak views when setting row pivots.",
async page => {
const viewer = await page.$("perspective-viewer");
await page.shadow_click("perspective-viewer", "#config_button");
for (var i = 0; i < 100; i++) {
await page.evaluate(element => {
let pivots = ["State", "City", "Segment", "Ship Mode", "Region", "Category"];
let start = Math.floor(Math.random() * pivots.length);
let length = Math.ceil(Math.random() * (pivots.length - start));
element.setAttribute("row-pivots", JSON.stringify(pivots.slice(start, length)));
}, viewer);
await page.waitForSelector("perspective-viewer:not([updating])");
}
await page.evaluate(element => element.setAttribute("row-pivots", '["Category"]'), viewer);
await page.waitForSelector("perspective-viewer:not([updating])");
},
{timeout: 60000}
);
test.capture(
"doesn't leak views when setting filters.",
async page => {
const viewer = await page.$("perspective-viewer");
await page.shadow_click("perspective-viewer", "#config_button");
for (var i = 0; i < 100; i++) {
await page.evaluate(element => {
element.setAttribute("filters", JSON.stringify([["Sales", ">", Math.random() * 100 + 100]]));
}, viewer);
await page.waitForSelector("perspective-viewer:not([updating])");
}
await page.evaluate(element => element.setAttribute("filters", '[["Sales", "<", 10]]'), viewer);
await page.waitForSelector("perspective-viewer:not([updating])");
},
{timeout: 60000}
);
},
{root: path.join(__dirname, "..", "..")}
);
}); | |
lang.rs | use super::*;
/// Wrapper for creating language objects.
/// ```
/// use extendr_api::prelude::*;
/// test! {
/// let call_to_xyz = r!(Language::from_values(&[sym!(xyz), r!(1), r!(2)]));
/// assert_eq!(call_to_xyz.is_language(), true);
/// assert_eq!(call_to_xyz.len(), 3);
/// }
/// ```
///
/// Note: You can use the [lang!] macro for this.
#[derive(PartialEq, Clone)]
pub struct Language {
pub(crate) robj: Robj,
}
impl Language {
pub fn | <T>(values: T) -> Self
where
T: IntoIterator,
T::IntoIter: DoubleEndedIterator,
T::Item: Into<Robj>,
{
single_threaded(|| unsafe {
let mut res = R_NilValue;
let mut num_protected = 0;
for val in values.into_iter().rev() {
let val = Rf_protect(val.into().get());
res = Rf_protect(Rf_lcons(val, res));
num_protected += 2;
}
let robj = Robj::from_sexp(res);
Rf_unprotect(num_protected);
Language { robj }
})
}
pub fn iter(&self) -> PairlistIter {
unsafe {
PairlistIter {
robj: self.robj.clone(),
list_elem: self.robj.get(),
}
}
}
pub fn names(&self) -> impl Iterator<Item = &'static str> {
self.iter().map(|(tag, _)| tag)
}
pub fn values(&self) -> impl Iterator<Item = Robj> {
self.iter().map(|(_, robj)| robj)
}
}
impl std::fmt::Debug for Language {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"lang!({})",
self.iter()
.map(|(k, v)| if k.is_empty() {
format!("{:?}", v)
} else {
format!("{}={:?}", k, v)
})
.collect::<Vec<_>>()
.join(", ")
)?;
Ok(())
}
}
| from_values |
Train.py | from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import globalClockDelta
from direct.distributed.ClockDelta import NetworkTimePrecision
import random
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.directutil import Mopath
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
class Train(DirectObject):
notify = directNotify.newCategory('Train') | Sfx_TrainPass = 'phase_10/audio/sfx/CBHQ_TRAIN_pass.ogg'
Sfx_TrainStopStart = 'phase_10/audio/sfx/CBHQ_TRAIN_stopstart.ogg'
LocomotiveFile = 'phase_10/models/cogHQ/CashBotLocomotive'
CarFiles = ['phase_10/models/cogHQ/CashBotBoxCar', 'phase_10/models/cogHQ/CashBotTankCar', 'phase_10/models/cogHQ/CashBotFlatCar']
CarLength = 88
MarkDelta = 15
def __init__(self, trackStartPos, trackEndPos, trackNum, numTotalTracks):
self.trackStartPos = trackStartPos
self.trackEndPos = trackEndPos
self.numCars = len(self.CarFiles)
self.locomotive = loader.loadModel(self.LocomotiveFile)
self.cars = []
self.trainPassingSfx = base.loader.loadSfx(self.Sfx_TrainPass)
self.trainStopStartSfx = base.loader.loadSfx(self.Sfx_TrainStopStart)
self.trainId = trackNum
self.bFlipped = False
if trackStartPos[0] < trackEndPos[0]:
self.locomotive.setHpr(180, 0, 0)
self.bFlipped = True
self.collNodeName = 'CollNode-%s' % self.trainId
self.firstMark = self.MarkDelta / numTotalTracks * trackNum
currentTime = self.__networkTimeInSeconds()
currentRun = int((currentTime - self.firstMark) / self.MarkDelta)
self.lastMark = currentRun * self.MarkDelta + self.firstMark
self.doNextRun(True)
self.hide()
def hide(self):
if self.locomotive:
self.locomotive.reparentTo(hidden)
def show(self):
if self.locomotive:
self.locomotive.reparentTo(render)
def __networkTimeInSeconds(self):
time = globalClockDelta.getRealNetworkTime(bits=32) / NetworkTimePrecision
return time
def doNextRun(self, bFirstRun = False):
if self.locomotive:
if bFirstRun:
nextMark = self.lastMark
else:
nextMark = self.lastMark + self.MarkDelta
self.nextRun.finish()
self.notify.debug('Next mark %s' % nextMark)
currentTime = self.__networkTimeInSeconds()
timeTillNextMark = nextMark - currentTime
self.notify.debug('Time diff %s' % timeTillNextMark)
runNumber = int((nextMark - self.firstMark) / self.MarkDelta)
S = random.getstate()
random.seed(self.trainId + runNumber)
self.nextRun = self.__getNextRun()
random.setstate(S)
self.__startNextRun(timeTillNextMark)
self.lastMark = nextMark
return Task.done
def __startNextRun(self, timeTillMark):
if self.locomotive:
self.__disableCollisions()
if timeTillMark > 0:
self.nextRun = Sequence(Wait(timeTillMark), self.nextRun)
self.nextRun.start()
else:
self.nextRun.start(-1 * timeTillMark)
self.__enableCollisions()
return Task.done
def __cleanupCars(self):
self.__disableCollisions()
for car in self.cars:
car.removeNode()
self.cars = []
def __getCars(self):
self.__cleanupCars()
numCarsThisRun = random.randrange(1, 10)
for nCar in xrange(numCarsThisRun):
carType = random.randrange(0, self.numCars)
car = loader.loadModel(self.CarFiles[carType])
car.reparentTo(self.locomotive)
car.setPos(self.CarLength * (nCar + 1), 0, 0)
self.cars.append(car)
def __showStart(self):
self.notify.debug('Starting train %s at %s.' % (self.trainId, self.__networkTimeInSeconds()))
def __getNextRun(self):
self.__getCars()
trainShouldStop = random.randrange(0, 4)
nextRun = Sequence(Func(self.__showStart))
if trainShouldStop is 0:
waitTime = 3
totalTime = random.randrange(4, (self.MarkDelta - waitTime) / 2)
sfxStopTime = 4.3
halfway = (self.trackStartPos + self.trackEndPos) / 2
halfway.setX(150)
nextRun.append(Parallel(Sequence(Wait(totalTime - sfxStopTime), SoundInterval(self.trainStopStartSfx, volume=0.5)), Sequence(LerpPosInterval(self.locomotive, totalTime, halfway, self.trackStartPos, blendType='easeInOut'), WaitInterval(waitTime), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, halfway, blendType='easeIn'))))
else:
totalTime = random.randrange(6, self.MarkDelta - 1)
sfxTime = 7
sfxStartTime = totalTime / 2 - sfxTime / 2
if self.bFlipped:
sfxStartTime -= 1
else:
sfxStartTime += 1
nextRun.append(Parallel(Sequence(Wait(sfxStartTime), SoundInterval(self.trainPassingSfx, volume=0.5)), LerpPosInterval(self.locomotive, totalTime, self.trackEndPos, self.trackStartPos)))
nextRun.append(Func(self.doNextRun))
return nextRun
def delete(self):
self.__cleanupCars()
self.locomotive.removeNode()
self.locomotive = None
self.nextRun.finish()
self.nextRun = None
del self.trainPassingSfx
del self.trainStopStartSfx
return
def uniqueName(self, name):
Train.nameId += 1
return name + '-%d' % Train.nameId
def __enableCollisions(self):
allColls = self.locomotive.findAllMatches('**/+CollisionNode')
for car in self.cars:
carColls = car.findAllMatches('**/+CollisionNode')
allColls += carColls
for collNode in allColls:
collNode.setName(self.collNodeName)
collNode.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.collNodeName, self.__handleCollisionSphereEnter)
def __disableCollisions(self):
self.ignore('enter' + self.collNodeName)
def __handleCollisionSphereEnter(self, collEntry = None):
base.localAvatar.b_squish(10) | nameId = 0 |
204.go | package p204
/**
Count the number of prime numbers less than a non-negative number, n
*/
func countPrimes(n int) int {
if n <= 1 {
return 0
}
nums := make([]bool, n)
nums[0] = true
nums[1] = true
cur := 2
for cur*cur < n && nums[cur] == false {
tmp := cur * 2
for tmp < n {
nums[tmp] = true
tmp += cur
}
cur++
for nums[cur] && cur*cur < n {
cur++
}
}
cnt := 0
for _, v := range nums {
if v == false |
}
return cnt
}
| {
cnt++
} |
forward.rs | // Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use std::cmp::Ordering;
use engine::CF_DEFAULT;
use kvproto::kvrpcpb::IsolationLevel;
use crate::storage::kv::SEEK_BOUND;
use crate::storage::mvcc::write::{Write, WriteType};
use crate::storage::mvcc::Result;
use crate::storage::{Cursor, Key, Lock, Snapshot, Statistics, Value};
use super::util::CheckLockResult;
use super::ScannerConfig;
/// This struct can be used to scan keys starting from the given user key (greater than or equal).
///
/// Internally, for each key, rollbacks are ignored and smaller version will be tried. If the
/// isolation level is SI, locks will be checked first.
///
/// Use `ScannerBuilder` to build `ForwardScanner`.
pub struct ForwardScanner<S: Snapshot> {
cfg: ScannerConfig<S>,
lock_cursor: Cursor<S::Iter>,
write_cursor: Cursor<S::Iter>,
/// `default cursor` is lazy created only when it's needed.
default_cursor: Option<Cursor<S::Iter>>,
/// Is iteration started
is_started: bool,
statistics: Statistics,
}
impl<S: Snapshot> ForwardScanner<S> {
pub fn new(
cfg: ScannerConfig<S>,
lock_cursor: Cursor<S::Iter>,
write_cursor: Cursor<S::Iter>,
) -> ForwardScanner<S> {
ForwardScanner {
cfg,
lock_cursor,
write_cursor,
statistics: Statistics::default(),
default_cursor: None,
is_started: false,
}
}
/// Take out and reset the statistics collected so far.
pub fn take_statistics(&mut self) -> Statistics {
std::mem::replace(&mut self.statistics, Statistics::default())
}
/// Get the next key-value pair, in forward order.
pub fn read_next(&mut self) -> Result<Option<(Key, Value)>> {
if !self.is_started {
if self.cfg.lower_bound.is_some() {
// TODO: `seek_to_first` is better, however it has performance issues currently.
self.write_cursor.seek(
self.cfg.lower_bound.as_ref().unwrap(),
&mut self.statistics.write,
)?;
self.lock_cursor.seek(
self.cfg.lower_bound.as_ref().unwrap(),
&mut self.statistics.lock,
)?;
} else {
self.write_cursor.seek_to_first(&mut self.statistics.write);
self.lock_cursor.seek_to_first(&mut self.statistics.lock);
}
self.is_started = true;
}
// The general idea is to simultaneously step write cursor and lock cursor.
// TODO: We don't need to seek lock CF if isolation level is RC.
loop {
// `current_user_key` is `min(user_key(write_cursor), lock_cursor)`, indicating
// the encoded user key we are currently dealing with. It may not have a write, or
// may not have a lock. It is not a slice to avoid data being invalidated after
// cursor moving.
//
// `has_write` indicates whether `current_user_key` has at least one corresponding
// `write`. If there is one, it is what current write cursor pointing to. The pointed
// `write` must be the most recent (i.e. largest `commit_ts`) write of
// `current_user_key`.
//
// `has_lock` indicates whether `current_user_key` has a corresponding `lock`. If
// there is one, it is what current lock cursor pointing to.
let (current_user_key, has_write, has_lock) = {
let w_key = if self.write_cursor.valid() {
Some(self.write_cursor.key(&mut self.statistics.write))
} else {
None
};
let l_key = if self.lock_cursor.valid() {
Some(self.lock_cursor.key(&mut self.statistics.lock))
} else {
None
};
// `res` is `(current_user_key_slice, has_write, has_lock)`
let res = match (w_key, l_key) {
(None, None) => {
// Both cursors yield `None`: we know that there is nothing remaining.
return Ok(None);
}
(None, Some(k)) => {
// Write cursor yields `None` but lock cursor yields something:
// In RC, it means we got nothing.
// In SI, we need to check if the lock will cause conflict.
(k, false, true)
}
(Some(k), None) => {
// Write cursor yields something but lock cursor yields `None`:
// We need to further step write cursor to our desired version
(Key::truncate_ts_for(k)?, true, false)
}
(Some(wk), Some(lk)) => {
let write_user_key = Key::truncate_ts_for(wk)?;
match write_user_key.cmp(lk) {
Ordering::Less => {
// Write cursor user key < lock cursor, it means the lock of the
// current key that write cursor is pointing to does not exist.
(write_user_key, true, false)
}
Ordering::Greater => {
// Write cursor user key > lock cursor, it means we got a lock of a
// key that does not have a write. In SI, we need to check if the
// lock will cause conflict.
(lk, false, true)
}
Ordering::Equal => {
// Write cursor user key == lock cursor, it means the lock of the
// current key that write cursor is pointing to *exists*.
(lk, true, true)
}
}
}
};
// Use `from_encoded_slice` to reserve space for ts, so later we can append ts to
// the key or its clones without reallocation.
(Key::from_encoded_slice(res.0), res.1, res.2)
};
// `result` stores intermediate values, including KeyLocked errors (but not other kind
// of errors). If there is KeyLocked errors, we should be able to continue scanning.
let mut result = Ok(None);
// `get_ts` is the real used timestamp. If user specifies `MaxInt64` as the timestamp,
// we need to change it to a most recently available one.
let mut get_ts = self.cfg.ts;
// `met_next_user_key` stores whether the write cursor has been already pointing to
// the next user key. If so, we don't need to compare it again when trying to step
// to the next user key later.
let mut met_next_user_key = false;
if has_lock {
match self.cfg.isolation_level {
IsolationLevel::SI => {
// Only needs to check lock in SI
let lock = {
let lock_value = self.lock_cursor.value(&mut self.statistics.lock);
Lock::parse(lock_value)?
};
match super::util::check_lock(¤t_user_key, self.cfg.ts, &lock)? {
CheckLockResult::NotLocked => {}
CheckLockResult::Locked(e) => result = Err(e),
CheckLockResult::Ignored(ts) => get_ts = ts,
}
}
IsolationLevel::RC => {}
}
self.lock_cursor.next(&mut self.statistics.lock);
}
if has_write {
// We don't need to read version if there is a lock error already.
if result.is_ok() {
// Attempt to read specified version of the key. Note that we may get `None`
// indicating that no desired version is found, or a DELETE version is found
result = self.get(¤t_user_key, get_ts, &mut met_next_user_key);
}
// Even if there is a lock error, we still need to step the cursor for future
// calls. However if we are already pointing at next user key, we don't need to
// move it any more. `met_next_user_key` eliminates a key compare.
if !met_next_user_key {
self.move_write_cursor_to_next_user_key(¤t_user_key)?;
}
}
// If we got something, it can be just used as the return value. Otherwise, we need
// to continue stepping the cursor.
if let Some(v) = result? {
return Ok(Some((current_user_key, v)));
}
}
}
/// Attempt to get the value of a key specified by `user_key` and `self.cfg.ts`. This function
/// requires that the write cursor is currently pointing to the latest version of `user_key`.
#[inline]
fn get(
&mut self,
user_key: &Key,
ts: u64,
met_next_user_key: &mut bool,
) -> Result<Option<Value>> {
assert!(self.write_cursor.valid());
// The logic starting from here is similar to `PointGetter`.
// Try to iterate to `${user_key}_${ts}`. We first `next()` for a few times,
// and if we have not reached where we want, we use `seek()`.
// Whether we have *not* reached where we want by `next()`.
let mut needs_seek = true;
for i in 0..SEEK_BOUND {
if i > 0 {
self.write_cursor.next(&mut self.statistics.write);
if !self.write_cursor.valid() {
// Key space ended.
return Ok(None);
}
}
{
let current_key = self.write_cursor.key(&mut self.statistics.write);
if !Key::is_user_key_eq(current_key, user_key.as_encoded().as_slice()) {
// Meet another key.
*met_next_user_key = true;
return Ok(None);
}
if Key::decode_ts_from(current_key)? <= ts {
// Founded, don't need to seek again.
needs_seek = false;
break;
}
}
}
// If we have not found `${user_key}_${ts}` in a few `next()`, directly `seek()`.
if needs_seek {
// `user_key` must have reserved space here, so its clone has reserved space too. So no
// reallocation happens in `append_ts`.
self.write_cursor
.seek(&user_key.clone().append_ts(ts), &mut self.statistics.write)?;
if !self.write_cursor.valid() {
// Key space ended.
return Ok(None);
}
let current_key = self.write_cursor.key(&mut self.statistics.write);
if !Key::is_user_key_eq(current_key, user_key.as_encoded().as_slice()) {
// Meet another key.
*met_next_user_key = true;
return Ok(None);
}
}
// Now we must have reached the first key >= `${user_key}_${ts}`. However, we may
// meet `Lock` or `Rollback`. In this case, more versions needs to be looked up.
loop {
let write = Write::parse(self.write_cursor.value(&mut self.statistics.write))?;
self.statistics.write.processed += 1;
match write.write_type {
WriteType::Put => return Ok(Some(self.load_data_by_write(write, user_key)?)),
WriteType::Delete => return Ok(None),
WriteType::Lock | WriteType::Rollback => {
// Continue iterate next `write`.
}
}
self.write_cursor.next(&mut self.statistics.write);
if !self.write_cursor.valid() {
// Key space ended.
return Ok(None);
}
let current_key = self.write_cursor.key(&mut self.statistics.write);
if !Key::is_user_key_eq(current_key, user_key.as_encoded().as_slice()) {
// Meet another key.
*met_next_user_key = true;
return Ok(None);
}
}
}
/// Load the value by the given `write`. If value is carried in `write`, it will be returned
/// directly. Otherwise there will be a default CF look up.
///
/// The implementation is the same as `PointGetter::load_data_by_write`.
#[inline]
fn load_data_by_write(&mut self, write: Write, user_key: &Key) -> Result<Value> {
if self.cfg.omit_value {
return Ok(vec![]);
}
match write.short_value {
Some(value) => {
// Value is carried in `write`.
Ok(value)
}
None => {
// Value is in the default CF.
self.ensure_default_cursor()?;
let value = super::util::near_load_data_by_write(
&mut self.default_cursor.as_mut().unwrap(),
user_key,
write,
&mut self.statistics,
)?;
Ok(value)
}
}
}
/// After `self.get()`, our write cursor may be pointing to current user key (if we
/// found a desired version), or next user key (if there is no desired version), or
/// out of bound.
///
/// If it is pointing to current user key, we need to step it until we meet a new
/// key. We first try to `next()` a few times. If still not reaching another user
/// key, we `seek()`.
#[inline]
fn move_write_cursor_to_next_user_key(&mut self, current_user_key: &Key) -> Result<()> {
for i in 0..SEEK_BOUND {
if i > 0 {
self.write_cursor.next(&mut self.statistics.write);
}
if !self.write_cursor.valid() {
// Key space ended. We are done here.
return Ok(());
}
{
let current_key = self.write_cursor.key(&mut self.statistics.write);
if !Key::is_user_key_eq(current_key, current_user_key.as_encoded().as_slice()) {
// Found another user key. We are done here.
return Ok(());
}
}
}
// We have not found another user key for now, so we directly `seek()`.
// After that, we must pointing to another key, or out of bound.
// `current_user_key` must have reserved space here, so its clone has reserved space too.
// So no reallocation happens in `append_ts`.
self.write_cursor.internal_seek(
¤t_user_key.clone().append_ts(0),
&mut self.statistics.write,
)?;
Ok(())
}
/// Create the default cursor if it doesn't exist.
#[inline]
fn ensure_default_cursor(&mut self) -> Result<()> {
if self.default_cursor.is_some() {
return Ok(());
}
self.default_cursor = Some(self.cfg.create_cf_cursor(CF_DEFAULT)?);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::super::ScannerBuilder;
use super::*;
use crate::storage::mvcc::tests::*;
use crate::storage::Scanner;
use crate::storage::{Engine, Key, TestEngineBuilder};
use kvproto::kvrpcpb::Context;
/// Check whether everything works as usual when `ForwardScanner::get()` goes out of bound.
#[test]
fn test_get_out_of_bound() {
let engine = TestEngineBuilder::new().build().unwrap();
// Generate 1 put for [a].
must_prewrite_put(&engine, b"a", b"value", b"a", 7);
must_commit(&engine, b"a", 7, 7);
// Generate 5 rollback for [b].
for ts in 0..5 {
must_rollback(&engine, b"b", ts);
}
let snapshot = engine.snapshot(&Context::new()).unwrap();
let mut scanner = ScannerBuilder::new(snapshot, 10, false)
.range(None, None)
.build()
.unwrap();
// Initial position: 1 seek_to_first:
// a_7 b_4 b_3 b_2 b_1 b_0
// ^cursor
// After get the value, use 1 next to reach next user key:
// a_7 b_4 b_3 b_2 b_1 b_0
// ^cursor
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(b"a"), b"value".to_vec())),
);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 1);
assert_eq!(statistics.write.next, 1);
// Use 5 next and reach out of bound:
// a_7 b_4 b_3 b_2 b_1 b_0
// ^cursor
assert_eq!(scanner.next().unwrap(), None);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 0);
assert_eq!(statistics.write.next, 5);
// Cursor remains invalid, so nothing should happen.
assert_eq!(scanner.next().unwrap(), None);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 0);
assert_eq!(statistics.write.next, 0);
}
/// Check whether everything works as usual when
/// `ForwardScanner::move_write_cursor_to_next_user_key()` goes out of bound.
///
/// Case 1. next() out of bound
#[test]
fn test_move_next_user_key_out_of_bound_1() {
let engine = TestEngineBuilder::new().build().unwrap();
// Generate 1 put for [a].
must_prewrite_put(&engine, b"a", b"a_value", b"a", SEEK_BOUND * 2);
must_commit(&engine, b"a", SEEK_BOUND * 2, SEEK_BOUND * 2);
// Generate SEEK_BOUND / 2 rollback and 1 put for [b] .
for ts in 0..SEEK_BOUND / 2 {
must_rollback(&engine, b"b", ts as u64);
}
must_prewrite_put(&engine, b"b", b"b_value", b"a", SEEK_BOUND / 2);
must_commit(&engine, b"b", SEEK_BOUND / 2, SEEK_BOUND / 2);
let snapshot = engine.snapshot(&Context::new()).unwrap();
let mut scanner = ScannerBuilder::new(snapshot, SEEK_BOUND * 2, false)
.range(None, None)
.build()
.unwrap();
// The following illustration comments assume that SEEK_BOUND = 4.
// Initial position: 1 seek_to_first:
// a_8 b_2 b_1 b_0
// ^cursor
// After get the value, use 1 next to reach next user key:
// a_8 b_2 b_1 b_0
// ^cursor
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(b"a"), b"a_value".to_vec())),
);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 1);
assert_eq!(statistics.write.next, 1);
// Before:
// a_8 b_2 b_1 b_0
// ^cursor
// We should be able to get wanted value without any operation.
// After get the value, use SEEK_BOUND / 2 + 1 next to reach next user key and stop:
// a_8 b_2 b_1 b_0
// ^cursor
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(b"b"), b"b_value".to_vec())),
);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 0);
assert_eq!(statistics.write.next, (SEEK_BOUND / 2 + 1) as usize);
// Next we should get nothing.
assert_eq!(scanner.next().unwrap(), None);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 0);
assert_eq!(statistics.write.next, 0);
}
/// Check whether everything works as usual when
/// `ForwardScanner::move_write_cursor_to_next_user_key()` goes out of bound.
///
/// Case 2. seek() out of bound
#[test]
fn test_move_next_user_key_out_of_bound_2() {
let engine = TestEngineBuilder::new().build().unwrap();
// Generate 1 put for [a].
must_prewrite_put(&engine, b"a", b"a_value", b"a", SEEK_BOUND * 2);
must_commit(&engine, b"a", SEEK_BOUND * 2, SEEK_BOUND * 2);
// Generate SEEK_BOUND-1 rollback and 1 put for [b] .
for ts in 1..SEEK_BOUND {
must_rollback(&engine, b"b", ts as u64);
}
must_prewrite_put(&engine, b"b", b"b_value", b"a", SEEK_BOUND);
must_commit(&engine, b"b", SEEK_BOUND, SEEK_BOUND);
let snapshot = engine.snapshot(&Context::new()).unwrap();
let mut scanner = ScannerBuilder::new(snapshot, SEEK_BOUND * 2, false)
.range(None, None)
.build()
.unwrap();
// The following illustration comments assume that SEEK_BOUND = 4.
// Initial position: 1 seek_to_first:
// a_8 b_4 b_3 b_2 b_1
// ^cursor
// After get the value, use 1 next to reach next user key:
// a_8 b_4 b_3 b_2 b_1
// ^cursor
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(b"a"), b"a_value".to_vec())),
);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 1);
assert_eq!(statistics.write.next, 1);
// Before:
// a_8 b_4 b_3 b_2 b_1
// ^cursor
// We should be able to get wanted value without any operation.
// After get the value, use SEEK_BOUND-1 next: (TODO: fix it to SEEK_BOUND)
// a_8 b_4 b_3 b_2 b_1
// ^cursor
// We still pointing at current user key, so a seek:
// a_8 b_4 b_3 b_2 b_1
// ^cursor
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(b"b"), b"b_value".to_vec())),
);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 1);
assert_eq!(statistics.write.next, (SEEK_BOUND - 1) as usize);
// Next we should get nothing.
assert_eq!(scanner.next().unwrap(), None);
let statistics = scanner.take_statistics();
assert_eq!(statistics.write.seek, 0);
assert_eq!(statistics.write.next, 0);
}
/// Range is left open right closed.
#[test]
fn | () {
let engine = TestEngineBuilder::new().build().unwrap();
// Generate 1 put for [1], [2] ... [6].
for i in 1..7 {
// ts = 1: value = []
must_prewrite_put(&engine, &[i], &[], &[i], 1);
must_commit(&engine, &[i], 1, 1);
// ts = 7: value = [ts]
must_prewrite_put(&engine, &[i], &[i], &[i], 7);
must_commit(&engine, &[i], 7, 7);
// ts = 14: value = []
must_prewrite_put(&engine, &[i], &[], &[i], 14);
must_commit(&engine, &[i], 14, 14);
}
let snapshot = engine.snapshot(&Context::new()).unwrap();
// Test both bound specified.
let mut scanner = ScannerBuilder::new(snapshot.clone(), 10, false)
.range(Some(Key::from_raw(&[3u8])), Some(Key::from_raw(&[5u8])))
.build()
.unwrap();
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[3u8]), vec![3u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[4u8]), vec![4u8]))
);
assert_eq!(scanner.next().unwrap(), None);
// Test left bound not specified.
let mut scanner = ScannerBuilder::new(snapshot.clone(), 10, false)
.range(None, Some(Key::from_raw(&[3u8])))
.build()
.unwrap();
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[1u8]), vec![1u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[2u8]), vec![2u8]))
);
assert_eq!(scanner.next().unwrap(), None);
// Test right bound not specified.
let mut scanner = ScannerBuilder::new(snapshot.clone(), 10, false)
.range(Some(Key::from_raw(&[5u8])), None)
.build()
.unwrap();
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[5u8]), vec![5u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[6u8]), vec![6u8]))
);
assert_eq!(scanner.next().unwrap(), None);
// Test both bound not specified.
let mut scanner = ScannerBuilder::new(snapshot.clone(), 10, false)
.range(None, None)
.build()
.unwrap();
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[1u8]), vec![1u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[2u8]), vec![2u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[3u8]), vec![3u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[4u8]), vec![4u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[5u8]), vec![5u8]))
);
assert_eq!(
scanner.next().unwrap(),
Some((Key::from_raw(&[6u8]), vec![6u8]))
);
assert_eq!(scanner.next().unwrap(), None);
}
}
| test_range |
list_api_sessions_responses.go | // Code generated by go-swagger; DO NOT EDIT.
//
// Copyright NetFoundry Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// __ __ _
// \ \ / / (_)
// \ \ /\ / /_ _ _ __ _ __ _ _ __ __ _
// \ \/ \/ / _` | '__| '_ \| | '_ \ / _` |
// \ /\ / (_| | | | | | | | | | | (_| | : This file is generated, do not edit it.
// \/ \/ \__,_|_| |_| |_|_|_| |_|\__, |
// __/ |
// |___/
package api_session
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/openziti/edge/rest_model"
)
// ListAPISessionsReader is a Reader for the ListAPISessions structure.
type ListAPISessionsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *ListAPISessionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewListAPISessionsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil |
return result, nil
case 400:
result := NewListAPISessionsBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 401:
result := NewListAPISessionsUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewListAPISessionsOK creates a ListAPISessionsOK with default headers values
func NewListAPISessionsOK() *ListAPISessionsOK {
return &ListAPISessionsOK{}
}
/* ListAPISessionsOK describes a response with status code 200, with default header values.
A list of active API Sessions
*/
type ListAPISessionsOK struct {
Payload *rest_model.ListAPISessionsEnvelope
}
func (o *ListAPISessionsOK) Error() string {
return fmt.Sprintf("[GET /api-sessions][%d] listApiSessionsOK %+v", 200, o.Payload)
}
func (o *ListAPISessionsOK) GetPayload() *rest_model.ListAPISessionsEnvelope {
return o.Payload
}
func (o *ListAPISessionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(rest_model.ListAPISessionsEnvelope)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewListAPISessionsBadRequest creates a ListAPISessionsBadRequest with default headers values
func NewListAPISessionsBadRequest() *ListAPISessionsBadRequest {
return &ListAPISessionsBadRequest{}
}
/* ListAPISessionsBadRequest describes a response with status code 400, with default header values.
The supplied request contains invalid fields or could not be parsed (json and non-json bodies). The error's code, message, and cause fields can be inspected for further information
*/
type ListAPISessionsBadRequest struct {
Payload *rest_model.APIErrorEnvelope
}
func (o *ListAPISessionsBadRequest) Error() string {
return fmt.Sprintf("[GET /api-sessions][%d] listApiSessionsBadRequest %+v", 400, o.Payload)
}
func (o *ListAPISessionsBadRequest) GetPayload() *rest_model.APIErrorEnvelope {
return o.Payload
}
func (o *ListAPISessionsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(rest_model.APIErrorEnvelope)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewListAPISessionsUnauthorized creates a ListAPISessionsUnauthorized with default headers values
func NewListAPISessionsUnauthorized() *ListAPISessionsUnauthorized {
return &ListAPISessionsUnauthorized{}
}
/* ListAPISessionsUnauthorized describes a response with status code 401, with default header values.
The currently supplied session does not have the correct access rights to request this resource
*/
type ListAPISessionsUnauthorized struct {
Payload *rest_model.APIErrorEnvelope
}
func (o *ListAPISessionsUnauthorized) Error() string {
return fmt.Sprintf("[GET /api-sessions][%d] listApiSessionsUnauthorized %+v", 401, o.Payload)
}
func (o *ListAPISessionsUnauthorized) GetPayload() *rest_model.APIErrorEnvelope {
return o.Payload
}
func (o *ListAPISessionsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(rest_model.APIErrorEnvelope)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| {
return nil, err
} |
asm_generic_ioctl.py | of the published Quick2Wire API.
#
# Converted from <asm-generic/ioctl.h>
# ioctl command encoding: 32 bits total, command in lower 16 bits,
# size of the parameter structure in the lower 14 bits of the
# upper 16 bits.
#
# Encoding the size of the parameter structure in the ioctl request
# is useful for catching programs compiled with old versions
# and to avoid overwriting user space outside the user buffer area.
# The highest 2 bits are reserved for indicating the ``access mode''.
#
# NOTE: This limits the max parameter size to 16kB -1 !
# The following is for compatibility across the various Linux
# platforms. The generic ioctl numbering scheme doesn't really enforce
# a type field. De facto, however, the top 8 bits of the lower 16
# bits are indeed used as a type field, so we might just as well make
| # this explicit here. Please be sure to use the decoding macros
# below from now on.
import ctypes
_IOC_NRBITS = 8
_IOC_TYPEBITS = 8
_IOC_SIZEBITS = 14
_IOC_DIRBITS = 2
_IOC_NRMASK = (1 << _IOC_NRBITS) - 1
_IOC_TYPEMASK = (1 << _IOC_TYPEBITS) - 1
_IOC_SIZEMASK = (1 << _IOC_SIZEBITS) - 1
_IOC_DIRMASK = (1 << _IOC_DIRBITS) - 1
_IOC_NRSHIFT = 0
_IOC_TYPESHIFT = _IOC_NRSHIFT + _IOC_NRBITS
_IOC_SIZESHIFT = _IOC_TYPESHIFT + _IOC_TYPEBITS
_IOC_DIRSHIFT = _IOC_SIZESHIFT + _IOC_SIZEBITS
# Direction bits
_IOC_NONE = 0
_IOC_WRITE = 1
_IOC_READ = 2
def _IOC(dir, type, nr, size):
return (dir << _IOC_DIRSHIFT) | \
(type << _IOC_TYPESHIFT) | \
(nr << _IOC_NRSHIFT) | \
(size << _IOC_SIZESHIFT)
def _IOC_TYPECHECK(t):
return ctypes.sizeof(t)
# used to create ioctl numbers
def _IO(type, nr):
return _IOC(_IOC_NONE, type, nr, 0)
def _IOR(type, nr, size):
return _IOC(_IOC_READ, type, nr, _IOC_TYPECHECK(size))
def _IOW(type, nr, size):
return _IOC(_IOC_WRITE, type, nr, _IOC_TYPECHECK(size))
def _IOWR(type,nr,size):
return _IOC(_IOC_READ|_IOC_WRITE, type, nr, _IOC_TYPECHECK(size))
def _IOR_BAD(type,nr,size):
return _IOC(_IOC_READ, type, nr, sizeof(size))
def _IOW_BAD(type,nr,size):
return _IOC(_IOC_WRITE,type,nr, sizeof(size))
def _IOWR_BAD(type,nr,size):
return _IOC(_IOC_READ|_IOC_WRITE, type, nr, sizeof(size))
# ...and for the drivers/sound files...
IOC_IN = _IOC_WRITE << _IOC_DIRSHIFT
IOC_OUT = _IOC_READ << _IOC_DIRSHIFT
IOC_INOUT = (_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT
IOCSIZE_MASK = _IOC_SIZEMASK << _IOC_SIZESHIFT
IOCSIZE_SHIFT = _IOC_SIZESHIFT | |
lib.rs | use anchor_lang::prelude::*;
declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS");
#[program]
mod system_accounts {
use super::*;
pub fn initialize(_ctx: Context<Initialize>) -> ProgramResult {
Ok(())
}
}
#[derive(Accounts)]
pub struct | <'info> {
pub authority: Signer<'info>,
pub wallet: SystemAccount<'info>,
}
| Initialize |
sparse_tensor_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.sparse_tensor."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
class SparseTensorTest(test_util.TensorFlowTestCase):
def | (self):
indices = [[1, 2], [2, 0], [3, 4]]
values = [b"a", b"b", b"c"]
shape = [4, 5]
sp_value = sparse_tensor.SparseTensorValue(indices, values, shape)
for sp in [
sparse_tensor.SparseTensor(indices, values, shape),
sparse_tensor.SparseTensor.from_value(sp_value),
sparse_tensor.SparseTensor.from_value(
sparse_tensor.SparseTensor(indices, values, shape))]:
self.assertEqual(sp.indices.dtype, dtypes.int64)
self.assertEqual(sp.values.dtype, dtypes.string)
self.assertEqual(sp.dense_shape.dtype, dtypes.int64)
self.assertEqual(sp.get_shape(), (4, 5))
value = self.evaluate(sp)
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.dense_shape)
sp_value = self.evaluate(sp)
self.assertAllEqual(sp_value.indices, value.indices)
self.assertAllEqual(sp_value.values, value.values)
self.assertAllEqual(sp_value.dense_shape, value.dense_shape)
def testShape(self):
@def_function.function
def test_fn(tensor):
tensor = sparse_ops.sparse_transpose(tensor)
self.assertEqual(tensor.shape.rank, 2)
return tensor
tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
test_fn(tensor)
def testIsSparse(self):
self.assertFalse(sparse_tensor.is_sparse(3))
self.assertFalse(sparse_tensor.is_sparse("foo"))
self.assertFalse(sparse_tensor.is_sparse(np.array(3)))
self.assertTrue(
sparse_tensor.is_sparse(sparse_tensor.SparseTensor([[0]], [0], [1])))
self.assertTrue(
sparse_tensor.is_sparse(
sparse_tensor.SparseTensorValue([[0]], [0], [1])))
def testConsumers(self):
with context.graph_mode():
sp = sparse_tensor.SparseTensor([[0, 0], [1, 2]], [1.0, 3.0], [3, 4])
w = ops.convert_to_tensor(np.ones([4, 1], np.float32))
out = sparse_ops.sparse_tensor_dense_matmul(sp, w)
self.assertEqual(len(sp.consumers()), 1)
self.assertEqual(sp.consumers()[0], out.op)
dense = sparse_ops.sparse_tensor_to_dense(sp)
self.assertEqual(len(sp.consumers()), 2)
self.assertIn(dense.op, sp.consumers())
self.assertIn(out.op, sp.consumers())
def testWithValues(self):
source = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1., 2], dense_shape=[3, 4])
new_tensor = source.with_values([5.0, 1.0])
self.assertAllEqual(new_tensor.indices, source.indices)
self.assertAllEqual(new_tensor.values, [5.0, 1.0])
self.assertAllEqual(new_tensor.dense_shape, source.dense_shape)
# ensure new value's shape is checked
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
source.with_values([[5.0, 1.0]])
class ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):
def test_convert_dense(self):
value = [42, 43]
from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
value)
self.assertAllEqual(value, self.evaluate(from_value))
def test_convert_sparse(self):
indices = [[0, 1], [1, 0]]
values = [42, 43]
shape = [2, 2]
sparse_tensor_value = sparse_tensor.SparseTensorValue(
indices, values, shape)
st = sparse_tensor.SparseTensor.from_value(sparse_tensor_value)
from_value = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(sparse_tensor_value))
from_tensor = self.evaluate(
sparse_tensor.convert_to_tensor_or_sparse_tensor(st))
for convertee in [from_value, from_tensor]:
self.assertAllEqual(sparse_tensor_value.indices, convertee.indices)
self.assertAllEqual(sparse_tensor_value.values, convertee.values)
self.assertAllEqual(
sparse_tensor_value.dense_shape, convertee.dense_shape)
class SparseTensorShapeTest(test_util.TensorFlowTestCase):
def test_simple(self):
indices = [[0, 2]]
values = [1]
dense_shape = [5, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertIsInstance(sp.shape, tensor_shape.TensorShape)
self.assertIsInstance(sp.dense_shape, ops.Tensor)
self.assertEqual(sp.shape.as_list(), [5, 5])
def test_unknown_shape(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 2]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(
dtype=dtypes.int64, shape=[2,]))
def test_partial_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 2]]
values = [1]
y = ops.convert_to_tensor(3, dtype=dtypes.int64)
dense_shape = [x, y]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 3])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[]))
def test_neg_shape(self):
indices = [[0, 2]]
values = [1]
dense_shape = [-1, 5]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, 5])
def test_unknown_tensor_shape(self):
@def_function.function
def my_func(x):
indices = [[0, 0]]
values = [1]
dense_shape = array_ops.shape(x)
dense_shape = math_ops.cast(dense_shape, dtypes.int64)
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.as_list(), [None, None])
return sp
my_func.get_concrete_function(
x=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None, None]))
def test_unknown_rank(self):
@def_function.function
def my_func(dense_shape):
indices = [[0, 0]]
values = [1]
sp = sparse_tensor.SparseTensor(indices, values, dense_shape)
self.assertEqual(sp.shape.rank, None)
return sp
my_func.get_concrete_function(
dense_shape=tensor_spec.TensorSpec(dtype=dtypes.int64, shape=[None]))
@test_util.run_all_in_graph_and_eager_modes
class SparseTensorSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.shape.rank, None)
self.assertEqual(spec1.dtype, dtypes.float32)
spec2 = sparse_tensor.SparseTensorSpec([None, None], dtypes.string)
self.assertEqual(spec2.shape.as_list(), [None, None])
self.assertEqual(spec2.dtype, dtypes.string)
def testValueType(self):
spec1 = sparse_tensor.SparseTensorSpec()
self.assertEqual(spec1.value_type, sparse_tensor.SparseTensor)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(),
(tensor_shape.TensorShape(None), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32)),
(sparse_tensor.SparseTensorSpec(dtype=dtypes.int32),
(tensor_shape.TensorShape(None), dtypes.int32)),
]) # pyformat: disable
def testSerialize(self, st_spec, expected):
serialization = st_spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec(dtype=dtypes.string), [
tensor_spec.TensorSpec([None, None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64)
]),
(sparse_tensor.SparseTensorSpec(shape=[5, None, None]), [
tensor_spec.TensorSpec([None, 3], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.float32),
tensor_spec.TensorSpec([3], dtypes.int64)
]),
])
def testComponentSpecs(self, st_spec, expected):
self.assertEqual(st_spec._component_specs, expected)
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromComponents(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
actual_components = st_spec._to_components(st)
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = st_spec._from_components(actual_components)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("SparseTensorValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([[0], [8]])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec = sparse_tensor.SparseTensorSpec()
st = spec._from_components([indices, values, dense_shape])
self.assertIsInstance(st, sparse_tensor.SparseTensorValue)
self.assertAllEqual(st.indices, indices)
self.assertAllEqual(st.values, values)
self.assertAllEqual(st.dense_shape, dense_shape)
@parameterized.parameters([
sparse_tensor.SparseTensorSpec(dtype=dtypes.string),
sparse_tensor.SparseTensorSpec(shape=[5, None, None]),
])
def testFlatTensorSpecs(self, st_spec):
self.assertEqual(st_spec._flat_tensor_specs,
[tensor_spec.TensorSpec(None, dtypes.variant)])
@parameterized.parameters([
{
"st_spec": sparse_tensor.SparseTensorSpec(),
"indices": [[0, 1], [10, 8]],
"values": [3.0, 5.0],
"dense_shape": [100, 100]
},
{
"st_spec": sparse_tensor.SparseTensorSpec([100, None, None]),
"indices": [[0, 1, 3], [10, 8, 2]],
"values": [3.0, 5.0],
"dense_shape": [100, 20, 20]
},
])
def testToFromTensorList(self, st_spec, indices, values, dense_shape):
st = sparse_tensor.SparseTensor(indices, values, dense_shape)
tensor_list = st_spec._to_tensor_list(st)
st_reconstructed = st_spec._from_tensor_list(tensor_list)
self.assertAllEqual(st.indices, st_reconstructed.indices)
self.assertAllEqual(st.values, st_reconstructed.values)
self.assertAllEqual(st.dense_shape, st_reconstructed.dense_shape)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([2, None], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([4, None], dtypes.float32), None,
sparse_tensor.SparseTensorSpec([None, 4, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([2], dtypes.float32), 32,
sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32)),
])
def testBatch(self, spec, batch_size, expected):
self.assertEqual(spec._batch(batch_size), expected)
@parameterized.parameters([
(sparse_tensor.SparseTensorSpec([32, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([None, None, None], dtypes.float32),
sparse_tensor.SparseTensorSpec([None, None], dtypes.float32)),
(sparse_tensor.SparseTensorSpec([32, 2], dtypes.float32),
sparse_tensor.SparseTensorSpec([2], dtypes.float32)),
])
def testUnbatch(self, spec, expected):
self.assertEqual(spec._unbatch(), expected)
if __name__ == "__main__":
googletest.main()
| testPythonConstruction |
connect.dto.ts | import {
IsOptional, IsString, Length,
MaxLength,
} from 'class-validator';
export class | {
@MaxLength(30)
@IsString()
@IsOptional()
nick: string;
@Length(6, 64)
@IsString()
password: string;
}
| ConnectDto |
kustomize_test.go | package cert_manager_kube_system_resources
| import (
"github.com/kubeflow/manifests/tests"
"testing"
)
func TestKustomize(t *testing.T) {
testCase := &tests.KustomizeTestCase{
Package: "../../../../../stacks/azure/application/cert-manager-kube-system-resources",
Expected: "test_data/expected",
}
tests.RunTestCase(t, testCase)
} | |
model_get_dogma_effects_effect_id_not_found.go | /*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.8.6
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package esi
/* A list of GetDogmaEffectsEffectIdNotFound. */
//easyjson:json
type GetDogmaEffectsEffectIdNotFoundList []GetDogmaEffectsEffectIdNotFound
/* Not found */
//easyjson:json | type GetDogmaEffectsEffectIdNotFound struct {
Error_ string `json:"error,omitempty"` /* Not found message */
} | |
nodes.go | package protocol
import (
"fmt"
"io"
)
func | (conn *Conn, cmd string, arg []string, ofp io.Writer) error {
fmt.Fprintln(ofp, conn.Srv.Name)
fmt.Fprintln(ofp, ".")
return nil
}
| Nodes |
concept_discriminator.py | """
discriminator model
"""
import torch
import torch.nn as nn
import torchvision.models as models
import json
from easydict import EasyDict as edict
from graphs.weights_initializer import weights_init
class EncoderModel(nn.Module):
def __init__(self,config):
super(EncoderModel, self).__init__()
self.config = config
self.num_classes = self.config.num_classes
self.progress = 0.0
self.encoder = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32, kernel_size=3, stride=1, padding=1), # b, 32, 224, 224
nn.ReLU(True),
nn.MaxPool2d(2, stride=None), # b, 32, 112, 112
nn.Conv2d(in_channels=32,out_channels=64, kernel_size=3, stride=1, padding=1), # b, 64, 112, 112
nn.ReLU(True),
nn.MaxPool2d(2, stride=None), # b, 64, 56, 56
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1), # b, 128, 56, 56
nn.ReLU(True),
nn.MaxPool2d(2, stride=None), # b, 128, 28, 28
)
self.linear_layers = nn.Sequential(
nn.Linear(2*self.config.image_size*self.config.image_size, out_features=128),
nn.Linear(128, out_features=self.config.num_ways),
)
def forward(self, x):
#x = self.encoder(x)
#print(x.size())
#self.discriminator = nn.Sequential(self.encoder, self.fc())
|
class ConceptDiscriminatorModel(torch.nn.Module): #new model
def __init__(self, pretrained_model):
super(ConceptDiscriminatorModel, self).__init__()
self.new_model = nn.Sequential(
nn.Linear(in_features=512, out_features=30))
self.pretrained_model = pretrained_model
def forward(self, x):
x = self.pretrained_model(x)
return x
| x = self.encoder(x)
x = x.view(x.size(0), -1)
x = self.linear_layers(x)
#print(x.size())
#x = x.view(1, -1)
#x = self.fc(x)
return x |
arm64.rs | mod arm64simd;
use crate::Ops;
use crate::frame::MatMatMulImpl;
use crate::frame::SigmoidImpl;
use crate::frame::TanhImpl;
use tract_data::internal::DimLike;
fn is_cortex_a53() -> std::io::Result<bool> {
let cpu_info = std::fs::read_to_string("/proc/cpuinfo")?;
let a53 =
cpu_info.split("\n").any(|line| line.starts_with("CPU part") && line.contains("0xd03"));
Ok(a53)
}
pub fn | (ops: &mut Ops) {
if is_cortex_a53().unwrap_or(false) {
log::info!("arm64simd activated for smmm (cortex A53)");
ops.mmm_f32 = Box::new(|m, k, n| {
if n == 1 {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulF32x64x1A53, f32, f32>::new(m, k, 1))
} else if m >= 128 || m.div_ceil(12) * 12 <= m.div_ceil(8) * 8 {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulF32x12x8A53, f32, f32>::new(m, k, n))
} else {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulF32x8x8A53, f32, f32>::new(m, k, n))
}
})
} else {
log::info!("arm64simd activated for smmm (generic)");
ops.mmm_f32 = Box::new(|m, k, n| {
if n == 1 {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulF32x64x1, f32, f32>::new(m, k, 1))
} else if m >= 128 || m.div_ceil(12) * 12 <= m.div_ceil(8) * 8 {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulF32x12x8, f32, f32>::new(m, k, n))
} else {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulF32x8x8, f32, f32>::new(m, k, n))
}
})
}
ops.qmmm_i8_i8 = Box::new(|m, k, n| {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulI8x8x8, i8, i32>::new(m, k, n))
});
ops.qmmm_i8_i32 = Box::new(|m, k, n| {
Box::new(MatMatMulImpl::<arm64simd::MatMatMulI8xI32x8x8, i32, i32>::new(m, k, n))
});
ops.sigmoid_f32 = Box::new(|| Box::new(SigmoidImpl::<arm64simd::SigmoidF32x4n, f32>::new()));
ops.tanh_f32 = Box::new(|| Box::new(TanhImpl::<arm64simd::TanhF32x4n, f32>::new()));
}
| plug |
conformer-tiny-ctc.py | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import pyroomacoustics as pra
import numpy as np
from pydub import AudioSegment
from sklearn.utils import shuffle
from glob import glob
import random
import json
from malaya_speech.train.model.conformer.model import Model as ConformerModel
from malaya_speech.train.model import hubert, ctc
import malaya_speech.train as train
import malaya_speech.config
import malaya_speech.augmentation.waveform as augmentation
import malaya_speech
import tensorflow as tf
import os
import string
sr = 16000
maxlen = 18
minlen_text = 1
prob_aug = 0.95
unique_vocab = [''] + list(string.ascii_lowercase + string.digits) + [' ']
def augment_room(y, scale=1.0):
corners = np.array(
[[0, 0], [0, 5 * scale], [3 * scale, 5 * scale], [3 * scale, 0]]
).T
room = pra.Room.from_corners(
corners,
fs=sr,
materials=pra.Material(0.2, 0.15),
ray_tracing=True,
air_absorption=True,
)
room.extrude(3.5, materials=pra.Material(0.2, 0.15))
room.set_ray_tracing(
receiver_radius=0.5, n_rays=1000, energy_thres=1e-5
)
room.add_source([1.5 * scale, 4 * scale, 0.5], signal=y)
R = np.array([[1.5 * scale], [0.5 * scale], [0.5]])
room.add_microphone(R)
room.simulate()
return room.mic_array.signals[0]
def random_amplitude_threshold(sample, low=1, high=2, threshold=0.4):
y_aug = sample.copy()
dyn_change = np.random.uniform(low=low, high=high)
y_aug[np.abs(y_aug) >= threshold] = (
y_aug[np.abs(y_aug) >= threshold] * dyn_change
)
return np.clip(y_aug, -1, 1)
def add_uniform_noise(
sample, power=0.01, return_noise=False, scale=False
):
y_noise = sample.copy()
noise_amp = power * np.random.uniform() * np.amax(y_noise)
noise = noise_amp * np.random.normal(size=y_noise.shape[0])
y_noise = y_noise + noise
if scale:
y_noise = y_noise / (np.max(np.abs(y_noise)) + 1e-9)
if return_noise:
if scale:
noise = noise / (np.max(np.abs(y_noise)) + 1e-9)
return y_noise, noise
else:
return y_noise
def calc(signal, add_uniform=True):
choice = random.randint(0, 10)
print('choice', choice)
if choice == 0:
x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 50),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50), | x = augmentation.sox_augment_high(
signal,
min_bass_gain=random.randint(25, 70),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=0,
)
if choice == 2:
x = augmentation.sox_augment_low(
signal,
min_bass_gain=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 50),
negate=random.randint(0, 1),
)
if choice == 3:
x = augmentation.sox_augment_combine(
signal,
min_bass_gain_high=random.randint(25, 70),
min_bass_gain_low=random.randint(5, 30),
reverberance=random.randint(0, 80),
hf_damping=10,
room_scale=random.randint(0, 90),
)
if choice == 4:
x = augmentation.sox_reverb(
signal,
reverberance=random.randint(10, 80),
hf_damping=10,
room_scale=random.randint(10, 90),
)
if choice == 5:
x = random_amplitude_threshold(
signal, threshold=random.uniform(0.35, 0.8)
)
if choice == 6:
x = augmentation.lowpass_filter(
signal, sr=sr, cutoff=random.randint(200, 551)
)
if choice == 7:
x = augmentation.highpass_filter(
signal, sr=sr, cutoff=random.randint(551, 1653)
)
if choice == 8:
x = augmentation.bandpass_filter(
signal,
sr=sr,
cutoff_low=random.randint(200, 551),
cutoff_high=random.randint(551, 1653),
)
if choice == 9:
x = augment_room(signal)
if choice == 10:
x = signal
if choice not in [5] and random.gauss(0.5, 0.14) > 0.6:
x = random_amplitude_threshold(
x, low=1.0, high=2.0, threshold=random.uniform(0.6, 0.9)
)
if random.gauss(0.5, 0.14) > 0.6 and add_uniform:
x = add_uniform_noise(x, power=random.uniform(0.005, 0.015))
return x
def mp3_to_wav(file, sr=sr):
audio = AudioSegment.from_file(file)
audio = audio.set_frame_rate(sr).set_channels(1)
sample = np.array(audio.get_array_of_samples())
return malaya_speech.astype.int_to_float(sample), sr
def generate(file):
with open(file) as fopen:
dataset = json.load(fopen)
audios, cleaned_texts = dataset['X'], dataset['Y']
while True:
audios, cleaned_texts = shuffle(audios, cleaned_texts)
for i in range(len(audios)):
try:
if audios[i].endswith('.mp3'):
# print('found mp3', audios[i])
wav_data, _ = mp3_to_wav(audios[i])
else:
wav_data, _ = malaya_speech.load(audios[i], sr=sr)
if len(cleaned_texts[i]) < minlen_text:
# print(f'skipped text too short {audios[i]}')
continue
if (len(wav_data) / sr) > maxlen:
continue
t = [unique_vocab.index(c) for c in cleaned_texts[i]]
yield {
'waveforms': wav_data,
'waveforms_length': [len(wav_data)],
'targets': t,
'targets_length': [len(t)],
}
except Exception as e:
print(e)
def get_dataset(
file,
batch_size=12,
shuffle_size=20,
thread_count=24,
maxlen_feature=1800,
):
def get():
dataset = tf.data.Dataset.from_generator(
generate,
{
'waveforms': tf.float32,
'waveforms_length': tf.int32,
'targets': tf.int32,
'targets_length': tf.int32,
},
output_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
args=(file,),
)
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'waveforms': tf.TensorShape([None]),
'waveforms_length': tf.TensorShape([None]),
'targets': tf.TensorShape([None]),
'targets_length': tf.TensorShape([None]),
},
padding_values={
'waveforms': tf.constant(0, dtype=tf.float32),
'waveforms_length': tf.constant(0, dtype=tf.int32),
'targets': tf.constant(0, dtype=tf.int32),
'targets_length': tf.constant(0, dtype=tf.int32),
},
)
return dataset
return get
class Encoder:
def __init__(self, config):
self.config = config
self.encoder = ConformerModel(**self.config)
def __call__(self, x, input_mask, training=True):
return self.encoder(x, training=training)
total_steps = 2000000
def model_fn(features, labels, mode, params):
config_conformer = malaya_speech.config.conformer_tiny_encoder_config
config_conformer['subsampling']['type'] = 'none'
config_conformer['dropout'] = 0.0
encoder = Encoder(config_conformer)
cfg = hubert.HuBERTConfig(
extractor_mode='layer_norm',
dropout=0.0,
attention_dropout=0.0,
encoder_layerdrop=0.0,
dropout_input=0.0,
dropout_features=0.0,
final_dim=128,
)
model = hubert.Model(cfg, encoder, ['pad', 'eos', 'unk'] + [str(i) for i in range(100)])
X = features['waveforms']
X_len = features['waveforms_length'][:, 0]
targets = features['targets']
targets_int32 = tf.cast(targets, tf.int32)
targets_length = features['targets_length'][:, 0]
r = model(X, padding_mask=X_len, features_only=True, mask=False)
logits = tf.layers.dense(r['x'], len(unique_vocab) + 1)
seq_lens = tf.reduce_sum(
tf.cast(tf.logical_not(r['padding_mask']), tf.int32), axis=1
)
mean_error, sum_error, sum_weight = ctc.loss.ctc_loss(
logits, seq_lens, targets_int32, targets_length
)
loss = mean_error
accuracy = ctc.metrics.ctc_sequence_accuracy(
logits, seq_lens, targets_int32, targets_length,
)
tf.identity(loss, 'train_loss')
tf.identity(accuracy, name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy)
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
init_checkpoint = 'hubert-conformer-tiny/model.ckpt-1000000'
assignment_map, initialized_variable_names = train.get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = train.optimizer.adamw.create_optimizer(
loss,
init_lr=5e-5,
num_train_steps=total_steps,
num_warmup_steps=100000,
end_learning_rate=0.0,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
clip_norm=1.0,
)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': ctc.metrics.ctc_sequence_accuracy_estimator(
logits, seq_lens, targets_int32, targets_length
)
},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_dataset = get_dataset('bahasa-asr-train-combined.json')
dev_dataset = get_dataset('bahasa-asr-test.json')
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir='hubert-conformer-tiny-ctc-char',
num_gpus=1,
log_step=1,
save_checkpoint_step=20000,
max_steps=total_steps,
eval_fn=dev_dataset,
train_hooks=train_hooks,
) | negate=1,
)
if choice == 1: |
main.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
anyhow,
cm_rust::{FidlIntoNative, NativeIntoFidl},
fidl::endpoints::{ProtocolMarker, RequestStream},
fidl_fuchsia_component as fcomponent, fidl_fuchsia_data as fdata,
fidl_fuchsia_io::DirectoryProxy,
fidl_fuchsia_realm_builder as frealmbuilder, fidl_fuchsia_sys2 as fsys,
fuchsia_async as fasync,
fuchsia_component::server as fserver,
fuchsia_syslog as syslog,
futures::{future::BoxFuture, FutureExt, StreamExt, TryStreamExt},
io_util,
lazy_static::lazy_static,
log::*,
std::{
collections::HashMap,
convert::{TryFrom, TryInto},
fmt::{self, Display},
sync::Arc,
},
thiserror::{self, Error},
url::Url,
};
mod resolver;
mod runner;
lazy_static! {
pub static ref BINDER_PROTOCOL_CAPABILITY: frealmbuilder::Capability =
frealmbuilder::Capability::Protocol(frealmbuilder::ProtocolCapability {
name: Some(fcomponent::BinderMarker::DEBUG_NAME.to_owned()),
..frealmbuilder::ProtocolCapability::EMPTY
});
}
#[fasync::run_singlethreaded()]
async fn main() {
syslog::init_with_tags(&["fuchsia_component_test_framework_intermediary"])
.expect("failed to init logging");
info!("started");
let mut fs = fserver::ServiceFs::new_local();
let registry = resolver::Registry::new();
let runner = runner::Runner::new();
let registry_clone = registry.clone();
fs.dir("svc").add_fidl_service(move |stream| registry_clone.run_resolver_service(stream));
let runner_clone = runner.clone();
fs.dir("svc").add_fidl_service(move |stream| runner_clone.run_runner_service(stream));
fs.dir("svc").add_fidl_service(move |stream| {
let registry = registry.clone();
let runner = runner.clone();
fasync::Task::local(async move {
if let Err(e) = handle_framework_intermediary_stream(stream, registry, runner).await {
error!("error encountered while running framework intermediary service: {:?}", e);
}
})
.detach();
});
fs.take_and_serve_directory_handle().expect("did not receive directory handle");
fs.collect::<()>().await;
}
async fn handle_framework_intermediary_stream(
mut stream: frealmbuilder::FrameworkIntermediaryRequestStream,
registry: Arc<resolver::Registry>,
runner: Arc<runner::Runner>,
) -> Result<(), anyhow::Error> {
let mut realm_tree = RealmNode::default();
let mut test_pkg_dir = None;
while let Some(req) = stream.try_next().await? {
match req {
frealmbuilder::FrameworkIntermediaryRequest::Init { pkg_dir_handle, responder } => {
if test_pkg_dir.is_some() {
responder.send(&mut Err(Error::PkgDirAlreadySet.log_and_convert()))?;
} else {
test_pkg_dir = Some(
pkg_dir_handle.into_proxy().expect("failed to convert ClientEnd to proxy"),
);
responder.send(&mut Ok(()))?;
}
}
frealmbuilder::FrameworkIntermediaryRequest::SetComponent {
moniker,
component,
responder,
} => {
match realm_tree
.set_component(moniker.clone().into(), component.clone(), &test_pkg_dir)
.await
{
Ok(()) => responder.send(&mut Ok(()))?,
Err(e) => {
warn!(
"error occurred when setting component {:?} to {:?}",
moniker, component
);
responder.send(&mut Err(e.log_and_convert()))?;
}
}
}
frealmbuilder::FrameworkIntermediaryRequest::GetComponentDecl {
moniker,
responder,
} => match realm_tree.get_component_decl(moniker.clone().into()) {
Ok(decl) => responder.send(&mut Ok(decl.native_into_fidl()))?,
Err(e) => {
warn!("error occurred when getting decl for component {:?}", moniker);
responder.send(&mut Err(e.log_and_convert()))?;
}
},
frealmbuilder::FrameworkIntermediaryRequest::RouteCapability { route, responder } => {
match realm_tree.route_capability(route.clone()) {
Ok(()) => responder.send(&mut Ok(()))?,
Err(e) => {
warn!("error occurred when routing capability: {:?}", route);
responder.send(&mut Err(e.log_and_convert()))?
}
}
}
frealmbuilder::FrameworkIntermediaryRequest::MarkAsEager { moniker, responder } => {
match realm_tree.mark_as_eager(moniker.clone().into()) {
Ok(()) => responder.send(&mut Ok(()))?,
Err(e) => {
warn!("error occurred when marking {:?} as eager", moniker);
responder.send(&mut Err(e.log_and_convert()))?;
}
}
}
frealmbuilder::FrameworkIntermediaryRequest::Contains { moniker, responder } => {
responder.send(realm_tree.contains(moniker.clone().into()))?;
}
frealmbuilder::FrameworkIntermediaryRequest::Commit { responder } => {
match realm_tree
.clone()
.commit(registry.clone(), vec![], test_pkg_dir.clone())
.await
{
Ok(url) => responder.send(&mut Ok(url))?,
Err(e) => {
warn!("error occurred when committing");
responder.send(&mut Err(e.log_and_convert()))?;
}
}
}
frealmbuilder::FrameworkIntermediaryRequest::NewMockId { responder } => {
let mock_id = runner.register_mock(stream.control_handle()).await;
responder.send(mock_id.as_str())?;
}
}
}
Ok(())
}
#[derive(Debug, Error)]
enum Error {
#[error("unable to access components behind ChildDecls: {0}")]
NodeBehindChildDecl(Moniker),
#[error("component child doesn't exist: {0}")]
NoSuchChild(String),
#[error("unable to set the root component to a URL")]
RootCannotBeSetToUrl,
#[error("unable to set the root component as eager")]
RootCannotBeEager,
#[error("received malformed FIDL")]
BadFidl,
#[error("bad request: missing field {0}")]
MissingField(&'static str),
#[error("route targets cannot be empty")]
RouteTargetsEmpty,
#[error("the route source does not exist: {0}")]
MissingRouteSource(Moniker),
#[error("the route target does not exist: {0}")]
MissingRouteTarget(Moniker),
#[error("a route's target cannot be equal to its source: {0:?}")]
RouteSourceAndTargetMatch(frealmbuilder::RouteEndpoint),
#[error("can only use protocols from debug: {0:?}")]
InvalidCapabilityFromDebug(Moniker),
#[error("the component decl for {0} failed validation: {1:?}")]
ValidationError(Moniker, cm_fidl_validator::ErrorList),
#[error("{0} capabilities cannot be exposed")]
UnableToExpose(&'static str),
#[error("storage capabilities must come from above root")]
StorageSourceInvalid,
#[error("component with moniker {0} does not exist")]
MonikerNotFound(Moniker),
#[error("the package directory has already been set for this connection")]
PkgDirAlreadySet,
#[error("unable to load component from package, the package dir is not set")]
PkgDirNotSet,
#[error("failed to load component from package due to IO error")]
PkgDirIoError(io_util::node::OpenError),
#[error("failed to load component decl")]
FailedToLoadComponentDecl(anyhow::Error),
}
impl Error {
fn log_and_convert(self) -> frealmbuilder::RealmBuilderError {
warn!("sending error to client: {:?}", self);
match self {
Error::NodeBehindChildDecl(_) => frealmbuilder::RealmBuilderError::NodeBehindChildDecl,
Error::NoSuchChild(_) => frealmbuilder::RealmBuilderError::NoSuchChild,
Error::RootCannotBeSetToUrl => frealmbuilder::RealmBuilderError::RootCannotBeSetToUrl,
Error::RootCannotBeEager => frealmbuilder::RealmBuilderError::RootCannotBeEager,
Error::BadFidl => frealmbuilder::RealmBuilderError::BadFidl,
Error::MissingField(_) => frealmbuilder::RealmBuilderError::MissingField,
Error::RouteTargetsEmpty => frealmbuilder::RealmBuilderError::RouteTargetsEmpty,
Error::MissingRouteSource(_) => frealmbuilder::RealmBuilderError::MissingRouteSource,
Error::MissingRouteTarget(_) => frealmbuilder::RealmBuilderError::MissingRouteTarget,
Error::RouteSourceAndTargetMatch(_) => {
frealmbuilder::RealmBuilderError::RouteSourceAndTargetMatch
}
Error::ValidationError(_, _) => frealmbuilder::RealmBuilderError::ValidationError,
Error::UnableToExpose(_) => frealmbuilder::RealmBuilderError::UnableToExpose,
Error::StorageSourceInvalid => frealmbuilder::RealmBuilderError::StorageSourceInvalid,
Error::MonikerNotFound(_) => frealmbuilder::RealmBuilderError::MonikerNotFound,
Error::PkgDirAlreadySet => frealmbuilder::RealmBuilderError::PkgDirAlreadySet,
Error::PkgDirNotSet => frealmbuilder::RealmBuilderError::PkgDirNotSet,
Error::PkgDirIoError(_) => frealmbuilder::RealmBuilderError::PkgDirIoError,
Error::FailedToLoadComponentDecl(_) => {
frealmbuilder::RealmBuilderError::FailedToLoadComponentDecl
}
Error::InvalidCapabilityFromDebug(_) => {
frealmbuilder::RealmBuilderError::InvalidCapabilityFromDebug
}
}
}
}
#[derive(Debug, Clone, Default, PartialEq)]
struct RealmNode {
decl: cm_rust::ComponentDecl,
eager: bool,
environment: Option<String>,
/// When a component decl comes directly from the test package directory, we should check the
/// component's manifest during route generation to see if it matches our expectations, instead
/// of blindly pushing things into it. This way we can detect common issues like "the source
/// component doesn't declare that capability".
component_loaded_from_pkg: bool,
/// Children stored in this HashMap can be mutated. Children stored in `decl.children` can not.
/// Any children stored in `mutable_children` do NOT have a corresponding `ChildDecl` stored in
/// `decl.children`, the two should be fully mutually exclusive.
///
/// Suitable `ChildDecl`s for the contents of `mutable_children` are generated and added to
/// `decl.children` when `commit()` is called.
mutable_children: HashMap<String, RealmNode>,
}
#[derive(PartialEq)]
enum GetBehavior {
CreateIfMissing,
ErrorIfMissing,
}
impl RealmNode {
fn child<'a>(&'a mut self, child_name: &String) -> Result<&'a mut Self, Error> {
self.mutable_children.get_mut(child_name).ok_or(Error::NoSuchChild(child_name.clone()))
}
fn child_create_if_missing<'a>(&'a mut self, child_name: &String) -> &'a mut Self {
if !self.mutable_children.contains_key(child_name) {
self.mutable_children.insert(child_name.clone(), RealmNode::default());
}
self.child(child_name).unwrap()
}
/// Calls `cm_fidl_validator` on this node's decl, filtering out any errors caused by
/// missing ChildDecls, as these children may be added to the mutable_children list at a later
/// point. These decls are re-validated (without filtering out errors) during `commit()`.
/// `moniker` is used for error reporting.
fn validate(&self, moniker: &Moniker) -> Result<(), Error> {
if let Err(mut e) = cm_fidl_validator::validate(&self.decl.clone().native_into_fidl()) {
e.errs = e
.errs
.into_iter()
.filter(|e| match e {
cm_fidl_validator::Error::InvalidChild(_, _) => false,
_ => true,
})
.collect();
if !e.errs.is_empty() {
return Err(Error::ValidationError(moniker.clone(), e));
}
}
Ok(())
}
fn get_node_mut<'a>(
&'a mut self,
moniker: &Moniker,
behavior: GetBehavior,
) -> Result<&'a mut RealmNode, Error> {
let mut current_node = self;
for part in moniker.path() {
if current_node.decl.children.iter().any(|c| c.name == part.to_string()) {
return Err(Error::NodeBehindChildDecl(moniker.clone()));
}
current_node = match behavior {
GetBehavior::CreateIfMissing => current_node.child_create_if_missing(part),
GetBehavior::ErrorIfMissing => current_node.child(part)?,
}
}
Ok(current_node)
}
/// Returns true if the component exists in this realm.
fn contains(&mut self, moniker: Moniker) -> bool {
// The root node is an edge case. If the client hasn't set or modified the root
// component in any way it should expect the intermediary to state that the root
// component doesn't exist yet, but in this implementation the root node _always_
// exists. If we're checking for the root component and we're equal to the default
// RealmNode (aka there are no children and our decl is empty), then we return false.
if moniker.is_root() && self == &mut RealmNode::default() {
return false;
}
if let Ok(_) = self.get_node_mut(&moniker, GetBehavior::ErrorIfMissing) {
return true;
}
// `get_node_mut` only returns `Ok` for mutable nodes. This node could still be in our
// realm but be immutable, so let's check for that.
if let Some(parent_moniker) = moniker.parent() {
if let Ok(parent_node) = self.get_node_mut(&parent_moniker, GetBehavior::ErrorIfMissing)
{
let child_name = moniker.child_name().unwrap().to_string();
let res = parent_node.decl.children.iter().any(|c| c.name == child_name);
return res;
}
// If the parent node doesn't exist, then the component itself obviously does not
// either.
return false;
} else {
// The root component always exists
return true;
}
}
/// Sets the component to the provided component source. If the source is
/// a `Component::decl` then a new node is added to the internal tree
/// structure maintained for this connection. If the source is a
/// `Component::url` then a new ChildDecl is added to the parent of the
/// moniker. If any parents for the component do not exist then they are
/// added. If a different component already exists under this moniker,
/// then it is replaced.
async fn set_component(
&mut self,
moniker: Moniker,
component: frealmbuilder::Component,
test_pkg_dir: &Option<DirectoryProxy>,
) -> Result<(), Error> {
match component {
frealmbuilder::Component::Decl(decl) => {
if let Some(parent_moniker) = moniker.parent() {
let parent_node =
self.get_node_mut(&parent_moniker, GetBehavior::CreateIfMissing)?;
let child_name = moniker.child_name().unwrap().to_string();
parent_node.decl.children = parent_node
.decl
.children
.iter()
.filter(|c| c.name != child_name)
.cloned()
.collect();
}
let node = self.get_node_mut(&moniker, GetBehavior::CreateIfMissing)?;
node.decl = decl.fidl_into_native();
node.validate(&moniker)?;
}
frealmbuilder::Component::Url(url) => {
if is_relative_url(&url) {
return self
.load_decl_from_pkg(
moniker,
url,
test_pkg_dir.as_ref().cloned().ok_or(Error::PkgDirNotSet)?,
)
.await;
}
if moniker.is_root() {
return Err(Error::RootCannotBeSetToUrl);
}
let parent_node =
self.get_node_mut(&moniker.parent().unwrap(), GetBehavior::CreateIfMissing)?;
let child_name = moniker.child_name().unwrap().to_string();
parent_node.mutable_children.remove(&child_name);
parent_node.decl.children = parent_node
.decl
.children
.iter()
.filter(|c| c.name != child_name)
.cloned()
.collect();
parent_node.decl.children.push(cm_rust::ChildDecl {
name: child_name,
url,
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
});
}
frealmbuilder::Component::LegacyUrl(url) => {
if let Some(parent_moniker) = moniker.parent() {
let parent_node =
self.get_node_mut(&parent_moniker, GetBehavior::CreateIfMissing)?;
let child_name = moniker.child_name().unwrap().to_string();
parent_node.decl.children = parent_node
.decl
.children
.iter()
.filter(|c| c.name != child_name)
.cloned()
.collect();
}
let node = self.get_node_mut(&moniker, GetBehavior::CreateIfMissing)?;
node.decl = cm_rust::ComponentDecl {
program: Some(cm_rust::ProgramDecl {
runner: Some(crate::runner::RUNNER_NAME.try_into().unwrap()),
info: fdata::Dictionary {
entries: Some(vec![fdata::DictionaryEntry {
key: runner::LEGACY_URL_KEY.to_string(),
value: Some(Box::new(fdata::DictionaryValue::Str(url))),
}]),
..fdata::Dictionary::EMPTY
},
}),
..cm_rust::ComponentDecl::default()
};
node.validate(&moniker)?;
}
_ => return Err(Error::BadFidl),
}
Ok(())
}
/// Loads the file referenced by the relative url `url` from `test_pkg_dir`, and sets it as the
/// decl for the component referred to by `moniker`. Also loads in the declarations for any
/// additional relative URLs in the new decl in the same manner, and so forth until all
/// relative URLs have been processed.
async fn load_decl_from_pkg(
&mut self,
moniker: Moniker,
url: String,
test_pkg_dir: DirectoryProxy,
) -> Result<(), Error> {
// This can't be written recursively, because we need async here and the resulting
// BoxFuture would have to hold on to `&mut self`, which isn't possible because the
// reference is not `'static`.
//
// This is also written somewhat inefficiently, because holding a reference to the current
// working node in the stack would result to multiple mutable references from `&mut self`
// being held at the same time, which is disallowed. As a result, this re-fetches the
// current working node from the root of the tree on each iteration.
let mut relative_urls_to_process = vec![(moniker, url)];
while let Some((current_moniker, relative_url)) = relative_urls_to_process.pop() {
let current_node = self.get_node_mut(¤t_moniker, GetBehavior::CreateIfMissing)?;
// Load the decl and validate it
let path = relative_url.trim_start_matches('#');
let file_proxy =
io_util::directory::open_file(&test_pkg_dir, &path, io_util::OPEN_RIGHT_READABLE)
.await
.map_err(Error::PkgDirIoError)?;
let fidl_decl = io_util::read_file_fidl::<fsys::ComponentDecl>(&file_proxy)
.await
.map_err(Error::FailedToLoadComponentDecl)?;
current_node.decl = fidl_decl.fidl_into_native();
current_node.component_loaded_from_pkg = true;
current_node.validate(¤t_moniker)?;
// Look through the new decl's children. If there are any relative URLs, we need to
// handle those too.
let mut child_decls_to_keep = vec![];
let mut child_decls_to_load = vec![];
for child in current_node.decl.children.drain(..) {
if is_relative_url(&child.url) {
child_decls_to_load.push(child);
} else {
child_decls_to_keep.push(child);
}
}
current_node.decl.children = child_decls_to_keep;
for child in child_decls_to_load {
let child_node = current_node.child_create_if_missing(&child.name);
let child_moniker = current_moniker.child(child.name.clone());
if child.startup == fsys::StartupMode::Eager {
child_node.eager = true;
}
child_node.environment = child.environment;
relative_urls_to_process.push((child_moniker, child.url));
}
}
Ok(())
}
/// Returns the current value of a component decl in the realm being
/// constructed. Note that this cannot retrieve decls through external
/// URLs, so for example if `SetComponent` is called with `Component::url`
/// and then `GetComponentDecl` is called with the same moniker, an error
/// will be returned.
fn get_component_decl(&mut self, moniker: Moniker) -> Result<cm_rust::ComponentDecl, Error> {
Ok(self.get_node_mut(&moniker, GetBehavior::ErrorIfMissing)?.decl.clone())
}
/// Marks the component and any ancestors of it as eager, ensuring that the
/// component is started immediately once the realm is bound to.
fn mark_as_eager(&mut self, moniker: Moniker) -> Result<(), Error> {
if moniker.is_root() {
return Err(Error::RootCannotBeEager);
}
if !self.contains(moniker.clone()) {
return Err(Error::MonikerNotFound(moniker.clone()));
}
// The component we want to mark as eager could be either mutable or immutable. Mutable
// components are retrievable with `self.get_node_mut`, whereas immutable components are
// found in a ChildDecl in the decl of the node's parent.
if let Ok(node) = self.get_node_mut(&moniker, GetBehavior::ErrorIfMissing) {
node.eager = true;
}
let parent_node =
self.get_node_mut(&moniker.parent().unwrap(), GetBehavior::ErrorIfMissing)?;
if let Some(child_decl) =
parent_node.decl.children.iter_mut().find(|c| &c.name == moniker.child_name().unwrap())
{
child_decl.startup = fsys::StartupMode::Eager;
}
for ancestor in moniker.ancestry() {
let ancestor_node = self.get_node_mut(&ancestor, GetBehavior::ErrorIfMissing)?;
ancestor_node.eager = true;
}
Ok(())
}
/// Adds a capability route to the realm being constructed, adding any
/// necessary offers, exposes, uses, and capability declarations to any
/// component involved in the route. Note that components added with
/// `Component::url` can not be modified, and they are presumed to already
/// have the declarations needed for the route to be valid. If an error is
/// returned some of the components in the route may have been updated while
/// others were not.
fn route_capability(&mut self, route: frealmbuilder::CapabilityRoute) -> Result<(), Error> {
let capability = route.capability.ok_or(Error::MissingField("capability"))?;
let source = route.source.ok_or(Error::MissingField("source"))?;
let targets = route.targets.ok_or(Error::MissingField("targets"))?;
if targets.is_empty() {
return Err(Error::RouteTargetsEmpty);
}
if let frealmbuilder::RouteEndpoint::Component(moniker) = &source {
let moniker: Moniker = moniker.clone().into();
if !self.contains(moniker.clone()) {
return Err(Error::MissingRouteSource(moniker.clone()));
}
}
for target in &targets {
if &source == target {
return Err(Error::RouteSourceAndTargetMatch(source));
}
if let frealmbuilder::RouteEndpoint::Component(target_moniker) = target {
let target_moniker: Moniker = target_moniker.clone().into();
if !self.contains(target_moniker.clone()) {
return Err(Error::MissingRouteTarget(target_moniker));
}
}
}
let force_route = route.force_route.unwrap_or(false);
for target in targets {
if let frealmbuilder::RouteEndpoint::AboveRoot(_) = target {
// We're routing a capability from component within our constructed realm to
// somewhere above it
self.route_capability_to_above_root(
&capability,
source.clone().try_into()?,
force_route,
)?;
} else if let frealmbuilder::RouteEndpoint::AboveRoot(_) = &source {
// We're routing a capability from above our constructed realm to a component
// within it
self.route_capability_from_above_root(
&capability,
target.try_into()?,
force_route,
)?;
} else if let frealmbuilder::RouteEndpoint::Debug(_) = &source {
// We're routing a capability from the debug section of the component's environment.
self.route_capability_from_debug(&capability, target.try_into()?, force_route)?;
} else {
// We're routing a capability from one component within our constructed realm to
// another
let source_moniker = source.clone().try_into()?;
let target_moniker: Moniker = target.try_into()?;
if target_moniker.is_ancestor_of(&source_moniker) {
// The target is an ancestor of the source, so this is a "use from child"
// scenario
self.route_capability_use_from_child(
&capability,
source_moniker,
target_moniker,
force_route,
)?;
} else {
// The target is _not_ an ancestor of the source, so this is a classic "routing
// between two components" scenario, where the target uses the capability from
// its parent.
self.route_capability_between_components(
&capability,
source_moniker,
target_moniker,
force_route,
)?;
}
}
}
Ok(())
}
fn route_capability_to_above_root(
&mut self,
capability: &frealmbuilder::Capability,
source_moniker: Moniker,
force_route: bool,
) -> Result<(), Error> {
let mut current_ancestor = self.get_node_mut(&Moniker::root(), GetBehavior::ErrorIfMissing);
let mut current_moniker = Moniker::root();
for child_name in source_moniker.path() {
let current = current_ancestor?;
current.add_expose_for_capability(
&capability,
cm_rust::ExposeSource::Child(child_name.to_string()),
force_route,
)?;
current_ancestor = current.child(&child_name);
current_moniker = current_moniker.child(child_name.clone());
}
if let Ok(source_node) = self.get_node_mut(&source_moniker, GetBehavior::ErrorIfMissing) {
source_node.add_expose_for_capability(
&capability,
cm_rust::ExposeSource::Self_,
force_route,
)?;
source_node.add_capability_decl(&capability, force_route)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//source_node.validate(&source_moniker)?;
} else {
// `get_node_mut` only returns `Ok` for mutable nodes. If this node is immutable
// (located behind a ChildDecl) we have to presume that the component already declares
// and exposes thecapability
}
Ok(())
}
fn route_capability_from_above_root(
&mut self,
capability: &frealmbuilder::Capability,
target_moniker: Moniker,
force_route: bool,
) -> Result<(), Error> {
let mut current_ancestor = self.get_node_mut(&Moniker::root(), GetBehavior::ErrorIfMissing);
let mut current_moniker = Moniker::root();
for child_name in target_moniker.path() {
let current = current_ancestor?;
current.add_offer_for_capability(
&capability,
cm_rust::OfferSource::Parent,
&child_name,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//current.validate(¤t_moniker)?;
current_ancestor = current.child(&child_name);
current_moniker = current_moniker.child(child_name.clone());
}
if let Ok(target_node) = self.get_node_mut(&target_moniker, GetBehavior::ErrorIfMissing) {
target_node.add_use_for_capability(
&capability,
cm_rust::UseSource::Parent,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//target_node.validate(&target_moniker)?;
} else {
// `get_node_mut` only returns `Ok` for mutable nodes. If this node is immutable
// (located behind a ChildDecl) we have to presume that the component already uses
// the capability.
}
Ok(())
}
fn route_capability_from_debug(
&mut self,
capability: &frealmbuilder::Capability,
target_moniker: Moniker,
force_route: bool,
) -> Result<(), Error> {
match &capability {
frealmbuilder::Capability::Protocol(_) => { /*only this is supported */ }
_ => return Err(Error::InvalidCapabilityFromDebug(target_moniker)),
}
if let Ok(target_node) = self.get_node_mut(&target_moniker, GetBehavior::ErrorIfMissing) {
target_node.add_use_for_capability(
&capability,
cm_rust::UseSource::Debug,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//target_node.validate(&target_moniker)?;
} else {
// `get_node_mut` only returns `Ok` for mutable nodes. If this node is immutable
// (located behind a ChildDecl) we have to presume that the component already uses
// the capability.
}
Ok(())
}
// This will panic if `target_moniker.is_ancestor_of(source_moniker)` returns false
fn route_capability_use_from_child(
&mut self,
capability: &frealmbuilder::Capability,
source_moniker: Moniker,
target_moniker: Moniker,
force_route: bool,
) -> Result<(), Error> {
let target_node = self.get_node_mut(&target_moniker, GetBehavior::ErrorIfMissing)?;
let child_source = target_moniker.downward_path_to(&source_moniker).get(0).unwrap().clone();
target_node.add_use_for_capability(
&capability,
cm_rust::UseSource::Child(child_source),
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//target_node.validate(&target_moniker)?;
let mut path_to_source = target_moniker.downward_path_to(&source_moniker);
let first_expose_name = path_to_source.remove(0);
let mut current_moniker = target_moniker.child(first_expose_name.clone());
let mut current_node = target_node.child(&first_expose_name);
for child_name in path_to_source {
let current = current_node?;
current.add_expose_for_capability(
&capability,
cm_rust::ExposeSource::Child(child_name.to_string()),
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//current.validate(¤t_moniker)?;
current_node = current.child(&child_name);
current_moniker = current_moniker.child(child_name);
}
if let Ok(source_node) = current_node {
source_node.add_capability_decl(&capability, force_route)?;
source_node.add_expose_for_capability(
&capability,
cm_rust::ExposeSource::Self_,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//source_node.validate(¤t_moniker)?;
} else {
// `RealmNode::child` only returns `Ok` for mutable nodes. If this node is immutable
// (located behind a ChildDecl) we have to presume that the component already declares
// the capability.
}
Ok(())
}
fn route_capability_between_components(
&mut self,
capability: &frealmbuilder::Capability,
source_moniker: Moniker,
target_moniker: Moniker,
force_route: bool,
) -> Result<(), Error> |
/// Assembles the realm being constructed and returns the URL for the root
/// component in the realm, which may then be used to create a new component
/// in any collection where fuchsia-test-component is properly set up.
fn commit(
mut self,
registry: Arc<resolver::Registry>,
walked_path: Vec<String>,
package_dir: Option<DirectoryProxy>,
) -> BoxFuture<'static, Result<String, Error>> {
// This function is much cleaner written recursively, but we can't construct recursive
// futures as the size isn't knowable to rustc at compile time. Put the recursive call
// into a boxed future, as the redirection makes this possible
async move {
// Expose the fuchsia.component.Binder protocol from root in order to give users the ability to manually
// start the realm.
if walked_path.is_empty() {
let () = self.route_capability_to_above_root(
&*BINDER_PROTOCOL_CAPABILITY,
Moniker::root(),
true,
)?;
}
let mut mutable_children = self.mutable_children.into_iter().collect::<Vec<_>>();
mutable_children.sort_unstable_by_key(|t| t.0.clone());
for (name, node) in mutable_children {
let mut new_path = walked_path.clone();
new_path.push(name.clone());
let startup =
if node.eager { fsys::StartupMode::Eager } else { fsys::StartupMode::Lazy };
let environment = node.environment.clone();
let url = node.commit(registry.clone(), new_path, package_dir.clone()).await?;
self.decl.children.push(cm_rust::ChildDecl {
name,
url,
startup,
environment,
on_terminate: None,
});
}
let decl = self.decl.native_into_fidl();
registry
.validate_and_register(decl, package_dir.clone())
.await
.map_err(|e| Error::ValidationError(walked_path.into(), e))
}
.boxed()
}
/// This call ensures that an expose for the given capability exists in this component's decl.
/// If `self.component_loaded_from_pkg && !force_route` is true, we don't do anything.
fn add_expose_for_capability(
&mut self,
capability: &frealmbuilder::Capability,
source: cm_rust::ExposeSource,
force_route: bool,
) -> Result<(), Error> {
if self.component_loaded_from_pkg && !force_route {
// We don't modify package-local components unless force_route is true
return Ok(());
}
let capability_name = get_capability_name(&capability)?;
let new_decl = {
match &capability {
frealmbuilder::Capability::Protocol(_) => {
cm_rust::ExposeDecl::Protocol(cm_rust::ExposeProtocolDecl {
source,
source_name: capability_name.clone().into(),
target: cm_rust::ExposeTarget::Parent,
target_name: capability_name.into(),
})
}
frealmbuilder::Capability::Directory(_) => {
cm_rust::ExposeDecl::Directory(cm_rust::ExposeDirectoryDecl {
source,
source_name: capability_name.clone().into(),
target: cm_rust::ExposeTarget::Parent,
target_name: capability_name.into(),
rights: None,
subdir: None,
})
}
frealmbuilder::Capability::Storage(frealmbuilder::StorageCapability { .. }) => {
return Err(Error::UnableToExpose("storage"));
}
_ => return Err(Error::BadFidl),
}
};
// A decl with the same source and name but different options will be caught during decl
// validation later
if !self.decl.exposes.contains(&new_decl) {
self.decl.exposes.push(new_decl);
}
Ok(())
}
/// This call ensures that a declaration for the given capability and source exists in this
/// component's decl. If `self.component_loaded_from_pkg && !force_route` is true, we don't do
/// anything.
fn add_capability_decl(
&mut self,
capability: &frealmbuilder::Capability,
force_route: bool,
) -> Result<(), Error> {
if self.component_loaded_from_pkg && !force_route {
// We don't modify package-local components unless force_route is true
return Ok(());
}
let capability_name = get_capability_name(&capability)?;
let capability_decl = match capability {
frealmbuilder::Capability::Protocol(_) => {
Some(cm_rust::CapabilityDecl::Protocol(cm_rust::ProtocolDecl {
name: capability_name.as_str().try_into().unwrap(),
source_path: Some(
format!("/svc/{}", capability_name).as_str().try_into().unwrap(),
),
}))
}
frealmbuilder::Capability::Directory(frealmbuilder::DirectoryCapability {
path,
rights,
..
}) => Some(cm_rust::CapabilityDecl::Directory(cm_rust::DirectoryDecl {
name: capability_name.as_str().try_into().unwrap(),
source_path: Some(path.as_ref().unwrap().as_str().try_into().unwrap()),
rights: rights.as_ref().unwrap().clone(),
})),
frealmbuilder::Capability::Storage(_) => {
return Err(Error::StorageSourceInvalid);
}
_ => return Err(Error::BadFidl),
};
if let Some(decl) = capability_decl {
// A decl with the same source and name but different options will be caught during
// decl validation later
if !self.decl.capabilities.contains(&decl) {
self.decl.capabilities.push(decl);
}
}
Ok(())
}
/// This call ensures that a use for the given capability exists in this component's decl. If
/// `self.component_loaded_from_pkg && !force_route` is true, we don't do anything.
fn add_use_for_capability(
&mut self,
capability: &frealmbuilder::Capability,
use_source: cm_rust::UseSource,
force_route: bool,
) -> Result<(), Error> {
if self.component_loaded_from_pkg && !force_route {
// We don't modify package-local components unless force_route is true
return Ok(());
}
let capability_name = get_capability_name(&capability)?;
let use_decl = match capability {
frealmbuilder::Capability::Protocol(_) => {
cm_rust::UseDecl::Protocol(cm_rust::UseProtocolDecl {
source: use_source,
source_name: capability_name.as_str().try_into().unwrap(),
target_path: format!("/svc/{}", capability_name).as_str().try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})
}
frealmbuilder::Capability::Directory(frealmbuilder::DirectoryCapability {
path,
rights,
..
}) => cm_rust::UseDecl::Directory(cm_rust::UseDirectoryDecl {
source: use_source,
source_name: capability_name.as_str().try_into().unwrap(),
target_path: path.as_ref().unwrap().as_str().try_into().unwrap(),
rights: rights.as_ref().unwrap().clone(),
subdir: None,
dependency_type: cm_rust::DependencyType::Strong,
}),
frealmbuilder::Capability::Storage(frealmbuilder::StorageCapability {
path, ..
}) => {
if use_source != cm_rust::UseSource::Parent {
return Err(Error::UnableToExpose("storage"));
}
cm_rust::UseDecl::Storage(cm_rust::UseStorageDecl {
source_name: capability_name.as_str().try_into().unwrap(),
target_path: path.as_ref().unwrap().as_str().try_into().unwrap(),
})
}
_ => return Err(Error::BadFidl),
};
if !self.decl.uses.contains(&use_decl) {
self.decl.uses.push(use_decl);
}
Ok(())
}
/// This call ensures that a given offer for the given capability exists in this component's
/// decl. If `self.component_loaded_from_pkg && !force_route` is true, we don't do anything.
fn add_offer_for_capability(
&mut self,
capability: &frealmbuilder::Capability,
offer_source: cm_rust::OfferSource,
target_name: &str,
force_route: bool,
) -> Result<(), Error> {
if self.component_loaded_from_pkg && !force_route {
// We don't modify package-local components unless force_route is true
return Ok(());
}
if let cm_rust::OfferSource::Child(_) = &offer_source {
if let frealmbuilder::Capability::Storage(_) = capability {
return Err(Error::UnableToExpose("storage"));
}
}
let capability_name = get_capability_name(&capability)?;
let offer_decl = match &capability {
frealmbuilder::Capability::Protocol(_) => {
cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: offer_source,
source_name: capability_name.clone().into(),
target: cm_rust::OfferTarget::Child(target_name.to_string()),
target_name: capability_name.into(),
dependency_type: cm_rust::DependencyType::Strong,
})
}
frealmbuilder::Capability::Directory(_) => {
cm_rust::OfferDecl::Directory(cm_rust::OfferDirectoryDecl {
source: offer_source,
source_name: capability_name.clone().into(),
target: cm_rust::OfferTarget::Child(target_name.to_string()),
target_name: capability_name.into(),
rights: None,
subdir: None,
dependency_type: cm_rust::DependencyType::Strong,
})
}
frealmbuilder::Capability::Storage(_) => {
cm_rust::OfferDecl::Storage(cm_rust::OfferStorageDecl {
source: offer_source,
source_name: capability_name.clone().into(),
target: cm_rust::OfferTarget::Child(target_name.to_string()),
target_name: capability_name.into(),
})
}
_ => return Err(Error::BadFidl),
};
if !self.decl.offers.contains(&offer_decl) {
self.decl.offers.push(offer_decl);
}
Ok(())
}
}
// TODO(77771): use the moniker crate once there's an id-free version of it.
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
struct Moniker {
path: Vec<String>,
}
impl From<&str> for Moniker {
fn from(s: &str) -> Self {
Moniker {
path: match s {
"" => vec![],
_ => s.split('/').map(|s| s.to_string()).collect(),
},
}
}
}
impl From<String> for Moniker {
fn from(s: String) -> Self {
s.as_str().into()
}
}
impl From<Vec<String>> for Moniker {
fn from(path: Vec<String>) -> Self {
Moniker { path }
}
}
impl TryFrom<frealmbuilder::RouteEndpoint> for Moniker {
type Error = Error;
fn try_from(route_endpoint: frealmbuilder::RouteEndpoint) -> Result<Self, Error> {
match route_endpoint {
frealmbuilder::RouteEndpoint::AboveRoot(_) => {
panic!("tried to convert RouteEndpoint::AboveRoot into a moniker")
}
frealmbuilder::RouteEndpoint::Component(moniker) => Ok(moniker.into()),
_ => Err(Error::BadFidl),
}
}
}
impl Display for Moniker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.is_root() {
write!(f, "<root of test realm>")
} else {
write!(f, "{}", self.path.join("/"))
}
}
}
impl Moniker {
pub fn root() -> Self {
Moniker { path: vec![] }
}
fn is_root(&self) -> bool {
return self.path.is_empty();
}
fn child_name(&self) -> Option<&String> {
self.path.last()
}
fn path(&self) -> &Vec<String> {
&self.path
}
// If self is an ancestor of other_moniker, then returns the path to reach other_moniker from
// self. Panics if self is not a parent of other_moniker.
fn downward_path_to(&self, other_moniker: &Moniker) -> Vec<String> {
let our_path = self.path.clone();
let mut their_path = other_moniker.path.clone();
for item in our_path {
if Some(&item) != their_path.get(0) {
panic!("downward_path_to called on non-ancestor moniker");
}
their_path.remove(0);
}
their_path
}
/// Returns the list of components comprised of this component's parent, then that component's
/// parent, and so on. This list does not include the root component.
///
/// For example, `"a/b/c/d".into().ancestry()` would return `vec!["a/b/c".into(), "a/b".into(),
/// "a".into()]`
fn ancestry(&self) -> Vec<Moniker> {
let mut current_moniker = Moniker { path: vec![] };
let mut res = vec![];
let mut parent_path = self.path.clone();
parent_path.pop();
for part in parent_path {
current_moniker.path.push(part.clone());
res.push(current_moniker.clone());
}
res
}
fn parent(&self) -> Option<Self> {
let mut path = self.path.clone();
path.pop()?;
Some(Moniker { path })
}
fn child(&self, child_name: String) -> Self {
let mut path = self.path.clone();
path.push(child_name);
Moniker { path }
}
fn is_ancestor_of(&self, other_moniker: &Moniker) -> bool {
if self.path.len() >= other_moniker.path.len() {
return false;
}
for (element_from_us, element_from_them) in self.path.iter().zip(other_moniker.path.iter())
{
if element_from_us != element_from_them {
return false;
}
}
return true;
}
}
fn is_relative_url(url: &str) -> bool {
if url.len() == 0 || url.chars().nth(0) != Some('#') {
return false;
}
if Url::parse(url) != Err(url::ParseError::RelativeUrlWithoutBase) {
return false;
}
true
}
fn get_capability_name(capability: &frealmbuilder::Capability) -> Result<String, Error> {
match &capability {
frealmbuilder::Capability::Protocol(frealmbuilder::ProtocolCapability { name, .. }) => {
Ok(name.as_ref().unwrap().clone())
}
frealmbuilder::Capability::Directory(frealmbuilder::DirectoryCapability {
name, ..
}) => Ok(name.as_ref().unwrap().clone()),
frealmbuilder::Capability::Storage(frealmbuilder::StorageCapability { name, .. }) => {
Ok(name.as_ref().unwrap().clone())
}
_ => Err(Error::BadFidl),
}
}
#[cfg(test)]
mod tests {
use super::*;
use fidl_fuchsia_io2 as fio2;
#[fasync::run_singlethreaded(test)]
async fn set_component() {
let mut realm = RealmNode::default();
let root_decl = cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("a".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
..cm_rust::ComponentDecl::default()
};
let mut a_decl = cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("b".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
..cm_rust::ComponentDecl::default()
};
realm
.set_component(
Moniker::default(),
frealmbuilder::Component::Decl(root_decl.clone().native_into_fidl()),
&None,
)
.await
.unwrap();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(a_decl.clone().native_into_fidl()),
&None,
)
.await
.unwrap();
realm
.set_component(
"a/b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
a_decl.children.push(cm_rust::ChildDecl {
name: "b".to_string(),
url: "fuchsia-pkg://b".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
});
assert_eq!(
realm.get_node_mut(&Moniker::default(), GetBehavior::ErrorIfMissing).unwrap().decl,
root_decl
);
assert_eq!(
realm.get_node_mut(&"a".into(), GetBehavior::ErrorIfMissing).unwrap().decl,
a_decl
);
}
#[fasync::run_singlethreaded(test)]
async fn contains_component() {
let mut realm = RealmNode::default();
let root_decl = cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("a".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
..cm_rust::ComponentDecl::default()
};
let a_decl = cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("b".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![cm_rust::ChildDecl {
name: "b".to_string(),
url: "fuchsia-pkg://b".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
}],
..cm_rust::ComponentDecl::default()
};
realm
.set_component(
Moniker::default(),
frealmbuilder::Component::Decl(root_decl.clone().native_into_fidl()),
&None,
)
.await
.unwrap();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(a_decl.clone().native_into_fidl()),
&None,
)
.await
.unwrap();
assert_eq!(true, realm.contains(Moniker::default()));
assert_eq!(true, realm.contains("a".into()));
assert_eq!(true, realm.contains("a/b".into()));
assert_eq!(false, realm.contains("a/a".into()));
assert_eq!(false, realm.contains("b".into()));
}
#[fasync::run_singlethreaded(test)]
async fn mark_as_eager() {
let mut realm = RealmNode::default();
let root_decl = cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("a".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
..cm_rust::ComponentDecl::default()
};
let a_decl = cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("b".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
..cm_rust::ComponentDecl::default()
};
let b_decl = cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("c".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![cm_rust::ChildDecl {
name: "c".to_string(),
url: "fuchsia-pkg://c".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
}],
..cm_rust::ComponentDecl::default()
};
realm
.set_component(
Moniker::default(),
frealmbuilder::Component::Decl(root_decl.clone().native_into_fidl()),
&None,
)
.await
.unwrap();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(a_decl.clone().native_into_fidl()),
&None,
)
.await
.unwrap();
realm
.set_component(
"a/b".into(),
frealmbuilder::Component::Decl(b_decl.clone().native_into_fidl()),
&None,
)
.await
.unwrap();
realm.mark_as_eager("a/b/c".into()).unwrap();
assert_eq!(
realm.get_node_mut(&"a".into(), GetBehavior::ErrorIfMissing).unwrap().eager,
true
);
assert_eq!(
realm.get_node_mut(&"a/b".into(), GetBehavior::ErrorIfMissing).unwrap().decl.children,
vec![cm_rust::ChildDecl {
name: "c".to_string(),
url: "fuchsia-pkg://c".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
}]
);
}
fn check_results(
mut realm: RealmNode,
expected_results: Vec<(&'static str, cm_rust::ComponentDecl)>,
) {
assert!(!expected_results.is_empty(), "can't build an empty realm");
for (component, decl) in expected_results {
assert_eq!(
realm
.get_node_mut(&component.into(), GetBehavior::ErrorIfMissing)
.expect("component is missing from realm")
.decl,
decl,
"decl in realm doesn't match expectations for component {:?}",
component
);
}
}
#[fasync::run_singlethreaded(test)]
async fn missing_route_source_error() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
let res = realm.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("b".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("a".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
});
match res {
Ok(_) => panic!("builder commands should have errored"),
Err(Error::MissingRouteSource(m)) if m == "b".into() => (),
Err(e) => panic!("unexpected error: {:?}", e),
}
}
#[fasync::run_singlethreaded(test)]
async fn empty_route_targets() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
let res = realm.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![]),
..frealmbuilder::CapabilityRoute::EMPTY
});
match res {
Ok(_) => panic!("builder commands should have errored"),
Err(e) => {
if let Error::RouteTargetsEmpty = e {
()
} else {
panic!("unexpected error: {:?}", e);
}
}
}
}
#[fasync::run_singlethreaded(test)]
async fn multiple_offer_same_source() {
let mut realm = RealmNode::default();
realm
.set_component(
"1/src".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"2/target_1".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"2/target_2".into(),
frealmbuilder::Component::Url("fuchsia-pkg://c".to_string()),
&None,
)
.await
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("1/src".to_string())),
targets: Some(vec![
frealmbuilder::RouteEndpoint::Component("2/target_1".to_string()),
frealmbuilder::RouteEndpoint::Component("2/target_2".to_string()),
]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
}
#[fasync::run_singlethreaded(test)]
async fn same_capability_from_different_sources_in_same_node_error() {
{
let mut realm = RealmNode::default();
realm
.set_component(
"1/a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"1/b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"2/c".into(),
frealmbuilder::Component::Url("fuchsia-pkg://c".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"2/d".into(),
frealmbuilder::Component::Url("fuchsia-pkg://d".to_string()),
&None,
)
.await
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("1/a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("2/c".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("1/b".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("2/d".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
// get and set this component, to confirm that `set_component` runs `validate`
let decl = realm.get_component_decl("1".into()).unwrap().native_into_fidl();
let res =
realm.set_component("1".into(), frealmbuilder::Component::Decl(decl), &None).await;
match res {
Err(Error::ValidationError(_, e)) => {
assert_eq!(
e,
cm_fidl_validator::ErrorList {
errs: vec![cm_fidl_validator::Error::DuplicateField(
cm_fidl_validator::DeclField {
decl: "ExposeProtocolDecl".to_string(),
field: "target_name".to_string()
},
"fidl.examples.routing.echo.Echo".to_string()
)]
}
);
}
Err(e) => panic!("unexpected error: {:?}", e),
Ok(_) => panic!("builder commands should have errored"),
}
}
{
let mut realm = RealmNode::default();
realm
.set_component(
"1/a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"1/b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"2/c".into(),
frealmbuilder::Component::Url("fuchsia-pkg://c".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"2/d".into(),
frealmbuilder::Component::Url("fuchsia-pkg://d".to_string()),
&None,
)
.await
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("1/a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("1/b".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("2/c".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("2/d".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
}
}
#[fasync::run_singlethreaded(test)]
async fn missing_route_target_error() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
let res = realm.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("b".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
});
match res {
Ok(_) => panic!("builder commands should have errored"),
Err(Error::MissingRouteTarget(m)) => {
assert_eq!(m, "b".into());
}
Err(e) => panic!("unexpected error: {:?}", e),
}
}
#[test]
fn route_source_and_target_both_above_root_error() {
let mut realm = RealmNode::default();
let res = realm.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::AboveRoot(frealmbuilder::AboveRoot {})),
targets: Some(vec![frealmbuilder::RouteEndpoint::AboveRoot(
frealmbuilder::AboveRoot {},
)]),
..frealmbuilder::CapabilityRoute::EMPTY
});
match res {
Err(Error::RouteSourceAndTargetMatch(frealmbuilder::RouteEndpoint::AboveRoot(
frealmbuilder::AboveRoot {},
))) => (),
Ok(_) => panic!("builder commands should have errored"),
Err(e) => panic!("unexpected error: {:?}", e),
}
}
#[fasync::run_singlethreaded(test)]
async fn expose_storage_from_child_error() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
let res = realm.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Storage(
frealmbuilder::StorageCapability {
name: Some("foo".to_string()),
path: Some("foo".to_string()),
..frealmbuilder::StorageCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::AboveRoot(
frealmbuilder::AboveRoot {},
)]),
..frealmbuilder::CapabilityRoute::EMPTY
});
match res {
Ok(_) => panic!("builder commands should have errored"),
Err(Error::UnableToExpose("storage")) => (),
Err(e) => panic!("unexpected error: {:?}", e),
}
}
#[fasync::run_singlethreaded(test)]
async fn offer_storage_from_child_error() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
let res = realm.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Storage(
frealmbuilder::StorageCapability {
name: Some("foo".to_string()),
path: Some("/foo".to_string()),
..frealmbuilder::StorageCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("b".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
});
match res {
Ok(_) => panic!("builder commands should have errored"),
Err(Error::UnableToExpose("storage")) => (),
Err(e) => panic!("unexpected error: {:?}", e),
}
}
#[fasync::run_singlethreaded(test)]
async fn verify_storage_routing() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Storage(
frealmbuilder::StorageCapability {
name: Some("foo".to_string()),
path: Some("/bar".to_string()),
..frealmbuilder::StorageCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::AboveRoot(frealmbuilder::AboveRoot {})),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("a".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Storage(cm_rust::OfferStorageDecl {
source: cm_rust::OfferSource::Parent,
source_name: "foo".into(),
target: cm_rust::OfferTarget::Child("a".to_string()),
target_name: "foo".into(),
})],
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a",
cm_rust::ComponentDecl {
uses: vec![cm_rust::UseDecl::Storage(cm_rust::UseStorageDecl {
source_name: "foo".into(),
target_path: "/bar".try_into().unwrap(),
})],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
#[fasync::run_singlethreaded(test)]
async fn two_sibling_realm_no_mocks() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
realm.mark_as_eager("b".into()).unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("b".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![(
"",
cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Child("a".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("b".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![
cm_rust::ChildDecl {
name: "a".to_string(),
url: "fuchsia-pkg://a".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
},
cm_rust::ChildDecl {
name: "b".to_string(),
url: "fuchsia-pkg://b".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
},
],
..cm_rust::ComponentDecl::default()
},
)],
);
}
#[fasync::run_singlethreaded(test)]
async fn two_sibling_realm_both_mocks() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.set_component(
"b".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("b".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Child("a".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("b".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a",
cm_rust::ComponentDecl {
capabilities: vec![cm_rust::CapabilityDecl::Protocol(
cm_rust::ProtocolDecl {
name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
source_path: Some(
"/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
),
},
)],
exposes: vec![cm_rust::ExposeDecl::Protocol(cm_rust::ExposeProtocolDecl {
source: cm_rust::ExposeSource::Self_,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::ExposeTarget::Parent,
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
})],
..cm_rust::ComponentDecl::default()
},
),
(
"b",
cm_rust::ComponentDecl {
uses: vec![cm_rust::UseDecl::Protocol(cm_rust::UseProtocolDecl {
source: cm_rust::UseSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target_path: "/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
#[fasync::run_singlethreaded(test)]
async fn mock_with_child() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.set_component(
"a/b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("a/b".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a",
cm_rust::ComponentDecl {
capabilities: vec![cm_rust::CapabilityDecl::Protocol(
cm_rust::ProtocolDecl {
name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
source_path: Some(
"/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
),
},
)],
offers: vec![cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Self_,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("b".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![cm_rust::ChildDecl {
name: "b".to_string(),
url: "fuchsia-pkg://b".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
}],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
#[fasync::run_singlethreaded(test)]
async fn three_sibling_realm_one_mock() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"b".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.set_component(
"c".into(),
frealmbuilder::Component::Url("fuchsia-pkg://c".to_string()),
&None,
)
.await
.unwrap();
realm.mark_as_eager("c".into()).unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("b".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Directory(
frealmbuilder::DirectoryCapability {
name: Some("example-dir".to_string()),
path: Some("/example".to_string()),
rights: Some(fio2::RW_STAR_DIR),
..frealmbuilder::DirectoryCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("b".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("c".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
offers: vec![
cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Child("a".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("b".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
}),
cm_rust::OfferDecl::Directory(cm_rust::OfferDirectoryDecl {
source: cm_rust::OfferSource::Child("b".to_string()),
source_name: "example-dir".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("c".to_string()),
target_name: "example-dir".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: None,
subdir: None,
}),
],
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
cm_rust::ChildDecl {
name: "a".to_string(),
url: "fuchsia-pkg://a".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
},
cm_rust::ChildDecl {
name: "c".to_string(),
url: "fuchsia-pkg://c".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
},
],
..cm_rust::ComponentDecl::default()
},
),
(
"b",
cm_rust::ComponentDecl {
uses: vec![cm_rust::UseDecl::Protocol(cm_rust::UseProtocolDecl {
source: cm_rust::UseSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target_path: "/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
capabilities: vec![cm_rust::CapabilityDecl::Directory(
cm_rust::DirectoryDecl {
name: "example-dir".try_into().unwrap(),
source_path: Some("/example".try_into().unwrap()),
rights: fio2::RW_STAR_DIR,
},
)],
exposes: vec![cm_rust::ExposeDecl::Directory(
cm_rust::ExposeDirectoryDecl {
source: cm_rust::ExposeSource::Self_,
source_name: "example-dir".try_into().unwrap(),
target: cm_rust::ExposeTarget::Parent,
target_name: "example-dir".try_into().unwrap(),
rights: None,
subdir: None,
},
)],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
#[fasync::run_singlethreaded(test)]
async fn three_siblings_two_targets() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"c".into(),
frealmbuilder::Component::Url("fuchsia-pkg://c".to_string()),
&None,
)
.await
.unwrap();
realm.mark_as_eager("a".into()).unwrap();
realm.mark_as_eager("c".into()).unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("b".to_string())),
targets: Some(vec![
frealmbuilder::RouteEndpoint::Component("a".to_string()),
frealmbuilder::RouteEndpoint::Component("c".to_string()),
]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Directory(
frealmbuilder::DirectoryCapability {
name: Some("example-dir".to_string()),
path: Some("/example".to_string()),
rights: Some(fio2::RW_STAR_DIR),
..frealmbuilder::DirectoryCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("b".to_string())),
targets: Some(vec![
frealmbuilder::RouteEndpoint::Component("a".to_string()),
frealmbuilder::RouteEndpoint::Component("c".to_string()),
]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![(
"",
cm_rust::ComponentDecl {
offers: vec![
cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Child("b".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("a".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
}),
cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Child("b".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("c".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
}),
cm_rust::OfferDecl::Directory(cm_rust::OfferDirectoryDecl {
source: cm_rust::OfferSource::Child("b".to_string()),
source_name: "example-dir".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("a".to_string()),
target_name: "example-dir".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: None,
subdir: None,
}),
cm_rust::OfferDecl::Directory(cm_rust::OfferDirectoryDecl {
source: cm_rust::OfferSource::Child("b".to_string()),
source_name: "example-dir".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("c".to_string()),
target_name: "example-dir".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: None,
subdir: None,
}),
],
children: vec![
cm_rust::ChildDecl {
name: "a".to_string(),
url: "fuchsia-pkg://a".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
},
cm_rust::ChildDecl {
name: "b".to_string(),
url: "fuchsia-pkg://b".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
},
cm_rust::ChildDecl {
name: "c".to_string(),
url: "fuchsia-pkg://c".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
},
],
..cm_rust::ComponentDecl::default()
},
)],
);
}
#[fasync::run_singlethreaded(test)]
async fn two_cousins_realm_one_mock() {
let mut realm = RealmNode::default();
realm
.set_component(
"a/b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://a-b".to_string()),
&None,
)
.await
.unwrap();
realm
.set_component(
"c/d".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a/b".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("c/d".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Directory(
frealmbuilder::DirectoryCapability {
name: Some("example-dir".to_string()),
path: Some("/example".to_string()),
rights: Some(fio2::RW_STAR_DIR),
..frealmbuilder::DirectoryCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a/b".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("c/d".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
offers: vec![
cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Child("a".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("c".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
}),
cm_rust::OfferDecl::Directory(cm_rust::OfferDirectoryDecl {
source: cm_rust::OfferSource::Child("a".to_string()),
source_name: "example-dir".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("c".to_string()),
target_name: "example-dir".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: None,
subdir: None,
}),
],
children: vec![
// Generated children aren't inserted into the decls at this point, as
// their URLs are unknown until registration with the framework
// intermediary, and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a",
cm_rust::ComponentDecl {
exposes: vec![
cm_rust::ExposeDecl::Protocol(cm_rust::ExposeProtocolDecl {
source: cm_rust::ExposeSource::Child("b".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::ExposeTarget::Parent,
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
}),
cm_rust::ExposeDecl::Directory(cm_rust::ExposeDirectoryDecl {
source: cm_rust::ExposeSource::Child("b".to_string()),
source_name: "example-dir".try_into().unwrap(),
target: cm_rust::ExposeTarget::Parent,
target_name: "example-dir".try_into().unwrap(),
rights: None,
subdir: None,
}),
],
children: vec![cm_rust::ChildDecl {
name: "b".to_string(),
url: "fuchsia-pkg://a-b".to_string(),
startup: fsys::StartupMode::Lazy,
environment: None,
on_terminate: None,
}],
..cm_rust::ComponentDecl::default()
},
),
(
"c",
cm_rust::ComponentDecl {
offers: vec![
cm_rust::OfferDecl::Protocol(cm_rust::OfferProtocolDecl {
source: cm_rust::OfferSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("d".to_string()),
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
}),
cm_rust::OfferDecl::Directory(cm_rust::OfferDirectoryDecl {
source: cm_rust::OfferSource::Parent,
source_name: "example-dir".try_into().unwrap(),
target: cm_rust::OfferTarget::Child("d".to_string()),
target_name: "example-dir".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
rights: None,
subdir: None,
}),
],
..cm_rust::ComponentDecl::default()
},
),
(
"c/d",
cm_rust::ComponentDecl {
uses: vec![
cm_rust::UseDecl::Protocol(cm_rust::UseProtocolDecl {
source: cm_rust::UseSource::Parent,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target_path: "/svc/fidl.examples.routing.echo.Echo"
.try_into()
.unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
}),
cm_rust::UseDecl::Directory(cm_rust::UseDirectoryDecl {
source: cm_rust::UseSource::Parent,
source_name: "example-dir".try_into().unwrap(),
target_path: "/example".try_into().unwrap(),
rights: fio2::RW_STAR_DIR,
subdir: None,
dependency_type: cm_rust::DependencyType::Strong,
}),
],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
#[fasync::run_singlethreaded(test)]
async fn parent_use_from_url_child() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.set_component(
"a/b".into(),
frealmbuilder::Component::Url("fuchsia-pkg://b".to_string()),
&None,
)
.await
.unwrap();
realm.mark_as_eager("a/b".into()).unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a/b".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("a".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a",
cm_rust::ComponentDecl {
uses: vec![cm_rust::UseDecl::Protocol(cm_rust::UseProtocolDecl {
source: cm_rust::UseSource::Child("b".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target_path: "/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![cm_rust::ChildDecl {
name: "b".to_string(),
url: "fuchsia-pkg://b".to_string(),
startup: fsys::StartupMode::Eager,
environment: None,
on_terminate: None,
}],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
#[fasync::run_singlethreaded(test)]
async fn parent_use_from_mock_child() {
let mut realm = RealmNode::default();
realm
.set_component(
"a".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm
.set_component(
"a/b".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm.mark_as_eager("a/b".into()).unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a/b".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("a".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a",
cm_rust::ComponentDecl {
uses: vec![cm_rust::UseDecl::Protocol(cm_rust::UseProtocolDecl {
source: cm_rust::UseSource::Child("b".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target_path: "/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a/b",
cm_rust::ComponentDecl {
capabilities: vec![cm_rust::CapabilityDecl::Protocol(
cm_rust::ProtocolDecl {
name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
source_path: Some(
"/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
),
},
)],
exposes: vec![cm_rust::ExposeDecl::Protocol(cm_rust::ExposeProtocolDecl {
source: cm_rust::ExposeSource::Self_,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::ExposeTarget::Parent,
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
})],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
#[fasync::run_singlethreaded(test)]
async fn grandparent_use_from_mock_child() {
let mut realm = RealmNode::default();
realm
.set_component(
"a/b/c".into(),
frealmbuilder::Component::Decl(
cm_rust::ComponentDecl::default().native_into_fidl(),
),
&None,
)
.await
.unwrap();
realm.mark_as_eager("a/b/c".into()).unwrap();
realm
.route_capability(frealmbuilder::CapabilityRoute {
capability: Some(frealmbuilder::Capability::Protocol(
frealmbuilder::ProtocolCapability {
name: Some("fidl.examples.routing.echo.Echo".to_string()),
..frealmbuilder::ProtocolCapability::EMPTY
},
)),
source: Some(frealmbuilder::RouteEndpoint::Component("a/b/c".to_string())),
targets: Some(vec![frealmbuilder::RouteEndpoint::Component("a".to_string())]),
..frealmbuilder::CapabilityRoute::EMPTY
})
.unwrap();
check_results(
realm,
vec![
(
"",
cm_rust::ComponentDecl {
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a",
cm_rust::ComponentDecl {
uses: vec![cm_rust::UseDecl::Protocol(cm_rust::UseProtocolDecl {
source: cm_rust::UseSource::Child("b".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target_path: "/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
dependency_type: cm_rust::DependencyType::Strong,
})],
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a/b",
cm_rust::ComponentDecl {
exposes: vec![cm_rust::ExposeDecl::Protocol(cm_rust::ExposeProtocolDecl {
source: cm_rust::ExposeSource::Child("c".to_string()),
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::ExposeTarget::Parent,
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
})],
children: vec![
// Mock children aren't inserted into the decls at this point, as their
// URLs are unknown until registration with the framework intermediary,
// and that happens during Realm::create
],
..cm_rust::ComponentDecl::default()
},
),
(
"a/b/c",
cm_rust::ComponentDecl {
capabilities: vec![cm_rust::CapabilityDecl::Protocol(
cm_rust::ProtocolDecl {
name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
source_path: Some(
"/svc/fidl.examples.routing.echo.Echo".try_into().unwrap(),
),
},
)],
exposes: vec![cm_rust::ExposeDecl::Protocol(cm_rust::ExposeProtocolDecl {
source: cm_rust::ExposeSource::Self_,
source_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
target: cm_rust::ExposeTarget::Parent,
target_name: "fidl.examples.routing.echo.Echo".try_into().unwrap(),
})],
..cm_rust::ComponentDecl::default()
},
),
],
);
}
}
| {
if let Ok(target_node) = self.get_node_mut(&target_moniker, GetBehavior::ErrorIfMissing) {
target_node.add_use_for_capability(
&capability,
cm_rust::UseSource::Parent,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//target_node.validate(&target_moniker)?;
} else {
// `get_node_mut` only returns `Ok` for mutable nodes. If this node is immutable
// (located behind a ChildDecl) we have to presume that the component already uses
// the capability.
}
if let Ok(source_node) = self.get_node_mut(&source_moniker, GetBehavior::ErrorIfMissing) {
source_node.add_capability_decl(&capability, force_route)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//target_node.validate(&target_moniker)?;
} else {
// `get_node_mut` only returns `Ok` for mutable nodes. If this node is immutable
// (located behind a ChildDecl) we have to presume that the component already uses
// the capability.
}
let mut common_ancestor_moniker = target_moniker.parent().unwrap();
while common_ancestor_moniker != source_moniker
&& !common_ancestor_moniker.is_ancestor_of(&source_moniker)
{
common_ancestor_moniker = common_ancestor_moniker.parent().unwrap();
}
let common_ancestor =
self.get_node_mut(&common_ancestor_moniker, GetBehavior::ErrorIfMissing)?;
let mut path_to_target = common_ancestor_moniker.downward_path_to(&target_moniker);
let first_offer_name = path_to_target.remove(0);
let mut current_ancestor_moniker = common_ancestor_moniker.child(first_offer_name.clone());
let mut current_node = common_ancestor.child(&first_offer_name);
for child_name in path_to_target {
let current = current_node?;
current.add_offer_for_capability(
&capability,
cm_rust::OfferSource::Parent,
&child_name,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//current.validate(¤t_ancestor_moniker)?;
current_node = current.child(&child_name);
current_ancestor_moniker = current_ancestor_moniker.child(child_name.clone());
}
if common_ancestor_moniker == source_moniker {
// We don't need to add an expose chain, we reached the source moniker solely
// by walking up the tree
let common_ancestor =
self.get_node_mut(&common_ancestor_moniker, GetBehavior::ErrorIfMissing)?;
common_ancestor.add_offer_for_capability(
&capability,
cm_rust::OfferSource::Self_,
&first_offer_name,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//common_ancestor.validate(&common_ancestor_moniker)?;
return Ok(());
}
// We need an expose chain to descend down the tree to our source.
let mut path_to_target = common_ancestor_moniker.downward_path_to(&source_moniker);
let first_expose_name = path_to_target.remove(0);
let mut current_ancestor_moniker = common_ancestor_moniker.child(first_expose_name.clone());
let mut current_node = common_ancestor.child(&first_expose_name);
for child_name in path_to_target {
let current = current_node?;
current.add_expose_for_capability(
&capability,
cm_rust::ExposeSource::Child(child_name.to_string()),
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//current.validate(¤t_ancestor_moniker)?;
current_node = current.child(&child_name);
current_ancestor_moniker = current_ancestor_moniker.child(child_name.clone());
}
if let Ok(source_node) = current_node {
source_node.add_expose_for_capability(
&capability,
cm_rust::ExposeSource::Self_,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//source_node.validate(¤t_ancestor_moniker)?;
} else {
// `RealmNode::child` only returns `Ok` for mutable nodes. If this node is immutable
// (located behind a ChildDecl) we have to presume that the component already exposes
// the capability.
}
common_ancestor.add_offer_for_capability(
&capability,
cm_rust::OfferSource::Child(first_expose_name.clone()),
&first_offer_name,
force_route,
)?;
// TODO(fxbug.dev/74977): eagerly validate decls once weak routes are supported
//common_ancestor.validate(&common_ancestor_moniker)?;
Ok(())
} |
unix_time.rs | use crate::protocol;
use std::{self, time};
/// The number of seconds from 1st January 1900 UTC to the start of the Unix epoch.
pub const EPOCH_DELTA: i64 = 2_208_988_800;
// The NTP fractional scale.
const NTP_SCALE: f64 = std::u32::MAX as f64 + 1.0;
/// Describes an instant relative to the `UNIX_EPOCH` - 00:00:00 Coordinated Universal Time (UTC),
/// Thursay, 1 January 1970 in seconds with the fractional part in nanoseconds.
///
/// If the **Instant** describes some moment prior to `UNIX_EPOCH`, both the `secs` and
/// `subsec_nanos` components will be negative.
///
/// The sole purpose of this type is for retrieving the "current" time using the `std::time` module
/// and for converting between the ntp timestamp formats. If you are interested in converting from
/// unix time to some other more human readable format, perhaps see the [chrono
/// crate](https://crates.io/crates/chrono).
///
/// ## Example
///
/// Here is a demonstration of displaying the **Instant** in local time using the chrono crate:
///
/// ```
/// extern crate chrono;
/// extern crate ntp;
///
/// use chrono::TimeZone;
///
/// fn main() {
/// let unix_time = ntp::unix_time::Instant::now();
/// let local_time = chrono::Local.timestamp(unix_time.secs(), unix_time.subsec_nanos() as _);
/// println!("{}", local_time);
/// }
/// ```
#[derive(Copy, Clone, Debug)]
pub struct Instant {
secs: i64,
subsec_nanos: i32,
}
impl Instant {
/// Create a new **Instant** given its `secs` and `subsec_nanos` components.
///
/// To indicate a time following `UNIX_EPOCH`, both `secs` and `subsec_nanos` must be positive.
/// To indicate a time prior to `UNIX_EPOCH`, both `secs` and `subsec_nanos` must be negative.
/// Violating these invariants will result in a **panic!**.
pub fn new(secs: i64, subsec_nanos: i32) -> Instant {
if secs > 0 && subsec_nanos < 0 {
panic!("invalid instant: secs was positive but subsec_nanos was negative");
}
if secs < 0 && subsec_nanos > 0 {
panic!("invalid instant: secs was negative but subsec_nanos was positive");
}
Instant { secs, subsec_nanos } | /// Uses `std::time::SystemTime::now` and `std::time::UNIX_EPOCH` to determine the current
/// **Instant**.
///
/// ## Example
///
/// ```
/// extern crate ntp;
///
/// fn main() {
/// println!("{:?}", ntp::unix_time::Instant::now());
/// }
/// ```
pub fn now() -> Self {
match time::SystemTime::now().duration_since(time::UNIX_EPOCH) {
Ok(duration) => {
let secs = duration.as_secs() as i64;
let subsec_nanos = duration.subsec_nanos() as i32;
Instant::new(secs, subsec_nanos)
}
Err(sys_time_err) => {
let duration_pre_unix_epoch = sys_time_err.duration();
let secs = -(duration_pre_unix_epoch.as_secs() as i64);
let subsec_nanos = -(duration_pre_unix_epoch.subsec_nanos() as i32);
Instant::new(secs, subsec_nanos)
}
}
}
/// The "seconds" component of the **Instant**.
pub fn secs(&self) -> i64 {
self.secs
}
/// The fractional component of the **Instant** in nanoseconds.
pub fn subsec_nanos(&self) -> i32 {
self.subsec_nanos
}
}
// Conversion implementations.
impl From<protocol::ShortFormat> for Instant {
fn from(t: protocol::ShortFormat) -> Self {
let secs = i64::from(t.seconds) - EPOCH_DELTA;
let subsec_nanos = (f64::from(t.fraction) / NTP_SCALE * 1e9) as i32;
Instant::new(secs, subsec_nanos)
}
}
impl From<protocol::TimestampFormat> for Instant {
fn from(t: protocol::TimestampFormat) -> Self {
let secs = i64::from(t.seconds) - EPOCH_DELTA;
let subsec_nanos = (f64::from(t.fraction) / NTP_SCALE * 1e9) as i32;
Instant::new(secs, subsec_nanos)
}
}
impl From<Instant> for protocol::ShortFormat {
fn from(t: Instant) -> Self {
let sec = t.secs() + EPOCH_DELTA;
let frac = f64::from(t.subsec_nanos()) * NTP_SCALE / 1e9;
protocol::ShortFormat {
seconds: sec as u16,
fraction: frac as u16,
}
}
}
impl From<Instant> for protocol::TimestampFormat {
fn from(t: Instant) -> Self {
let sec = t.secs() + EPOCH_DELTA;
let frac = f64::from(t.subsec_nanos()) * NTP_SCALE / 1e9;
protocol::TimestampFormat {
seconds: sec as u32,
fraction: frac as u32,
}
}
} | }
|
app.js | /**
* First we will load all of this project's JavaScript dependencies which
* includes Vue and other libraries. It is a great starting point when
* building robust, powerful web applications using Vue and Laravel.
*/
require('./bootstrap');
window.Vue = require('vue');
// ES6
import Vue from 'vue'
// scroll
import VueChatScroll from 'vue-chat-scroll'
Vue.use(VueChatScroll)
// tostaer
import Toaster from 'v-toaster'
// You need a specific loader for CSS files like https://github.com/webpack/css-loader
import 'v-toaster/dist/v-toaster.css'
// optional set default imeout, the default is 10000 (10 seconds).
Vue.use(Toaster, {timeout: 5000})
/**
* The following block of code may be used to automatically register your
* Vue components. It will recursively scan this directory for the Vue
* components and automatically register them with their "basename".
*
* Eg. ./components/ExampleComponent.vue -> <example-component></example-component>
*/
// const files = require.context('./', true, /\.vue$/i)
// files.keys().map(key => Vue.component(key.split('/').pop().split('.')[0], files(key).default))
Vue.component('message', require('./components/message.vue').default);
/**
* Next, we will create a fresh Vue application instance and attach it to
* the page. Then, you may begin adding components to this application
* or customize the JavaScript scaffolding to fit your unique needs.
*/
const app = new Vue({
el: '#app',
data:{
message:'',
chat:{
message:[],
user:[],
color:[],
time:[]
},
typing:'',
numOfUsers:''
},
watch:{
message(){
Echo.private('chat')
.whisper('typing', {
name: this.message
});
}
},
methods:{
send(){
if (this.message.length!=0) {
this.chat.message.push(this.message);
this.chat.color.push('success');
this.chat.user.push('you');
this.chat.time.push(this.getTime());
axios.post('/send', {
message:this.message,
chat:this.chat
})
.then(response=>{
console.log(response);
this.message=''
})
.catch(error=>{
console.log(error);
});
}
},
getOldMessages(){
axios.post('/getOldMessages').then(response=>{
if (response.data!='') {
this.chat = response.data ;
}
});
},
deleteSession(){
axios.post('/deleteSession').then(response=>{
this.$toaster.error('deleted done . .');
this.chat=''
// axios.get('/chat').then(response=>{});
});
},
getTime(){
let time = new Date();
return time.getHours()+":"+time.getMinutes()+":"+time.getSeconds();
}
},
mounted(){
this.getOldMessages();
Echo.private('chat')
.listen('ChatEvent', (e) => {
this.chat.message.push(e.message);
this.chat.user.push(e.user.name);
this.chat.color.push('info');
this.chat.time.push(this.getTime());
axios.post('/saveToSession' , {
chat : this.chat
}).then(response=>{
});
},
)
.listenForWhisper('typing', (e) => {
if (e.name !='') {
this.typing = "t y p i n g - n o w . . .";
}else
this.typing = ''
});
// joining & leaving
Echo.join('chat')
.here((users) => {
this.numOfUsers = users.length ; | })
.joining((user) => {
this.numOfUsers +=1;
// console.log(user.name);
this.$toaster.success(user.name+' | Join Now .');
})
.leaving((user) => {
this.numOfUsers-=1;
// console.log(user.name);
this.$toaster.warning(user.name+' | Leave .');
});
},
}); |
// console.log(users); |
treemaps.py | # Squarified Treemap Layout
# Implements algorithm from Bruls, Huizing, van Wijk, "Squarified Treemaps" and Laserson with some modifications
# (but not using their pseudocode)
# INTERNAL FUNCTIONS not meant to be used by the user
def pad_rectangle(rect):
|
def layoutrow(sizes, x, y, dx, dy):
# generate rects for each size in sizes
# dx >= dy
# they will fill up height dy, and width will be determined by their area
# sizes should be pre-normalized wrt dx * dy (i.e., they should be same units)
covered_area = sum(sizes)
width = covered_area / dy
rects = []
for size in sizes:
rects.append({"x": x, "y": y, "dx": width, "dy": size / width})
y += size / width
return rects
def layoutcol(sizes, x, y, dx, dy):
# generate rects for each size in sizes
# dx < dy
# they will fill up width dx, and height will be determined by their area
# sizes should be pre-normalized wrt dx * dy (i.e., they should be same units)
covered_area = sum(sizes)
height = covered_area / dx
rects = []
for size in sizes:
rects.append({"x": x, "y": y, "dx": size / height, "dy": height})
x += size / height
return rects
def layout(sizes, x, y, dx, dy):
return (
layoutrow(sizes, x, y, dx, dy) if dx >= dy else layoutcol(sizes, x, y, dx, dy)
)
def leftoverrow(sizes, x, y, dx, dy):
# compute remaining area when dx >= dy
covered_area = sum(sizes)
width = covered_area / dy
leftover_x = x + width
leftover_y = y
leftover_dx = dx - width
leftover_dy = dy
return (leftover_x, leftover_y, leftover_dx, leftover_dy)
def leftovercol(sizes, x, y, dx, dy):
# compute remaining area when dx >= dy
covered_area = sum(sizes)
height = covered_area / dx
leftover_x = x
leftover_y = y + height
leftover_dx = dx
leftover_dy = dy - height
return (leftover_x, leftover_y, leftover_dx, leftover_dy)
def leftover(sizes, x, y, dx, dy):
return (
leftoverrow(sizes, x, y, dx, dy)
if dx >= dy
else leftovercol(sizes, x, y, dx, dy)
)
def worst_ratio(sizes, x, y, dx, dy):
return max(
[
max(rect["dx"] / rect["dy"], rect["dy"] / rect["dx"])
for rect in layout(sizes, x, y, dx, dy)
]
)
# PUBLIC API
def squarify(sizes, x, y, dx, dy):
"""Compute treemap rectangles.
Given a set of values, computes a treemap layout in the specified geometry
using an algorithm based on Bruls, Huizing, van Wijk, "Squarified Treemaps".
See README for example usage.
Parameters
----------
sizes : list-like of numeric values
The set of values to compute a treemap for. `sizes` must be positive
values sorted in descending order and they should be normalized to the
total area (i.e., `dx * dy == sum(sizes)`)
x, y : numeric
The coordinates of the "origin".
dx, dy : numeric
The full width (`dx`) and height (`dy`) of the treemap.
Returns
-------
list[dict]
Each dict in the returned list represents a single rectangle in the
treemap. The order corresponds to the input order.
"""
sizes = list(map(float, sizes))
if len(sizes) == 0:
return []
if len(sizes) == 1:
return layout(sizes, x, y, dx, dy)
# figure out where 'split' should be
i = 1
while i < len(sizes) and worst_ratio(sizes[:i], x, y, dx, dy) >= worst_ratio(
sizes[: (i + 1)], x, y, dx, dy
):
i += 1
current = sizes[:i]
remaining = sizes[i:]
(leftover_x, leftover_y, leftover_dx, leftover_dy) = leftover(current, x, y, dx, dy)
return layout(current, x, y, dx, dy) + squarify(
remaining, leftover_x, leftover_y, leftover_dx, leftover_dy
)
def padded_squarify(sizes, x, y, dx, dy):
"""Compute padded treemap rectangles.
See `squarify` docstring for details. The only difference is that the
returned rectangles have been "padded" to allow for a visible border.
"""
rects = squarify(sizes, x, y, dx, dy)
for rect in rects:
pad_rectangle(rect)
return rects
def normalize_sizes(sizes, dx, dy):
"""Normalize list of values.
Normalizes a list of numeric values so that `sum(sizes) == dx * dy`.
Parameters
----------
sizes : list-like of numeric values
Input list of numeric values to normalize.
dx, dy : numeric
The dimensions of the full rectangle to normalize total values to.
Returns
-------
list[numeric]
The normalized values.
"""
total_size = sum(sizes)
total_area = dx * dy
sizes = map(float, sizes)
sizes = map(lambda size: size * total_area / total_size, sizes)
return list(sizes)
def plot(
sizes,
norm_x=100,
norm_y=100,
color=None,
label=None,
value=None,
ax=None,
pad=False,
bar_kwargs=None,
text_kwargs=None,
**kwargs
):
"""Plotting with Matplotlib.
Parameters
----------
sizes
input for squarify
norm_x, norm_y
x and y values for normalization
color
color string or list-like (see Matplotlib documentation for details)
label
list-like used as label text
value
list-like used as value text (in most cases identical with sizes argument)
ax
Matplotlib Axes instance
pad
draw rectangles with a small gap between them
bar_kwargs : dict
keyword arguments passed to matplotlib.Axes.bar
text_kwargs : dict
keyword arguments passed to matplotlib.Axes.text
**kwargs
Any additional kwargs are merged into `bar_kwargs`. Explicitly provided
kwargs here will take precedence.
Returns
-------
matplotlib.axes.Axes
Matplotlib Axes
"""
import matplotlib.pyplot as plt
from math import ceil
if ax is None:
ax = plt.gca()
if color is None:
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
if len(sizes) > len(color_cycle):
color_cycle = color_cycle * ceil(len(color_cycle)/len(sizes))
color = color_cycle[:len(sizes)]
if bar_kwargs is None:
bar_kwargs = {}
if text_kwargs is None:
text_kwargs = {}
if len(kwargs) > 0:
bar_kwargs.update(kwargs)
normed = normalize_sizes(sizes, norm_x, norm_y)
if pad:
rects = padded_squarify(normed, 0, 0, norm_x, norm_y)
else:
rects = squarify(normed, 0, 0, norm_x, norm_y)
x = [rect["x"] for rect in rects]
y = [rect["y"] for rect in rects]
dx = [rect["dx"] for rect in rects]
dy = [rect["dy"] for rect in rects]
ax.bar(
x, dy, width=dx, bottom=y, color=color, label=label, align="edge", **bar_kwargs
)
if value is not None:
va = "center" if label is None else "top"
for v, r in zip(value, rects):
x, y, dx, dy = r["x"], r["y"], r["dx"], r["dy"]
ax.text(x + dx / 2, y + dy / 2, v, va=va, ha="center", **text_kwargs)
if label is not None:
va = "center" if value is None else "bottom"
for l, r in zip(label, rects):
x, y, dx, dy = r["x"], r["y"], r["dx"], r["dy"]
ax.text(x + dx / 2, y + dy / 2, l, va=va, ha="center", **text_kwargs)
ax.set_xlim(0, norm_x)
ax.set_ylim(0, norm_y)
ax.axis('off')
return ax | if rect["dx"] > 2:
rect["x"] += 1
rect["dx"] -= 2
if rect["dy"] > 2:
rect["y"] += 1
rect["dy"] -= 2 |
client.go | package main
import (
"fmt"
zmq "github.com/pebbe/zmq4"
"strings"
)
func | () {
zctx, _ := zmq.NewContext()
requester, _ := zctx.NewSocket(zmq.REQ)
defer requester.Close()
err := requester.Connect("tcp://localhost:5559")
if err != nil {
fmt.Print("very sad no connect")
return
}
for i := 0; i < 1000000; i++ {
requester.SendMessage("Hello")
reply, _ := requester.RecvMessage(0)
fmt.Printf("Received reply %d [%s]\n", i, strings.Join(reply, " "))
}
}
| main |
entry_test.go | package logrus
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestEntryWithError(t *testing.T) {
assert := assert.New(t)
defer func() {
ErrorKey = "error"
}()
err := fmt.Errorf("kaboom at layer %d", 4711)
assert.Equal(err, WithError(err).Data["error"])
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
assert.Equal(err, entry.WithError(err).Data["error"])
ErrorKey = "err"
assert.Equal(err, entry.WithError(err).Data["err"])
}
func TestEntryWithContext(t *testing.T) {
assert := assert.New(t)
ctx := context.WithValue(context.Background(), "foo", "bar")
assert.Equal(ctx, WithContext(ctx).Context)
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
assert.Equal(ctx, entry.WithContext(ctx).Context)
}
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
}
func TestEntryPanicf(t *testing.T) {
errBoom := fmt.Errorf("boom again")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom true", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
}
const (
badMessage = "this is going to panic"
panicMessage = "this is broken"
)
type panickyHook struct{}
func (p *panickyHook) Levels() []Level {
return []Level{InfoLevel}
}
func (p *panickyHook) Fire(entry *Entry) error {
if entry.Message == badMessage {
panic(panicMessage)
}
return nil
}
func TestEntryHooksPanic(t *testing.T) {
logger := New()
logger.Out = &bytes.Buffer{}
logger.Level = InfoLevel
logger.Hooks.Add(&panickyHook{})
defer func() {
p := recover()
assert.NotNil(t, p)
assert.Equal(t, panicMessage, p)
entry := NewEntry(logger)
entry.Info("another message")
}()
entry := NewEntry(logger)
entry.Info(badMessage)
}
func TestEntryWithIncorrectField(t *testing.T) {
assert := assert.New(t)
fn := func() {}
e := Entry{}
eWithFunc := e.WithFields(Fields{"func": fn})
eWithFuncPtr := e.WithFields(Fields{"funcPtr": &fn})
assert.Equal(eWithFunc.err, `can not add field "func"`)
assert.Equal(eWithFuncPtr.err, `can not add field "funcPtr"`)
eWithFunc = eWithFunc.WithField("not_a_func", "it is a string")
eWithFuncPtr = eWithFuncPtr.WithField("not_a_func", "it is a string")
assert.Equal(eWithFunc.err, `can not add field "func"`)
assert.Equal(eWithFuncPtr.err, `can not add field "funcPtr"`)
eWithFunc = eWithFunc.WithTime(time.Now())
eWithFuncPtr = eWithFuncPtr.WithTime(time.Now())
assert.Equal(eWithFunc.err, `can not add field "func"`)
assert.Equal(eWithFuncPtr.err, `can not add field "funcPtr"`)
}
func TestEntryLogfLevel(t *testing.T) | {
logger := New()
buffer := &bytes.Buffer{}
logger.Out = buffer
logger.SetLevel(InfoLevel)
entry := NewEntry(logger)
entry.Logf(DebugLevel, "%s", "debug")
assert.NotContains(t, buffer.String(), "debug", )
entry.Logf(WarnLevel, "%s", "warn")
assert.Contains(t, buffer.String(), "warn", )
} |
|
deps.ts | import * as oak from "https://deno.land/x/oak/mod.ts";
import * as sqlite from "https://deno.land/x/sqlite/mod.ts";
export {
oak, | sqlite
} |
|
open_test.py | import torch
from torch import nn
from torch.nn import Sequential
#model definition
class Unet1D(nn.Module):
def | (self):
super(Unet1D, self).__init__()
ch = 32
self.maxpool = nn.MaxPool2d((1,2))
self.unpool = nn.Upsample(scale_factor=(1,2))
self.startLayer = nn.Conv2d(1, ch, (1,3), padding=(0,1))
self.endLayer = nn.Conv2d(ch, 1, (1,1))
self.tb1 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.tb2 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.tb3 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.tb4 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.tb5 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.db1 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.db2 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.db3 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.db4 = Sequential(nn.Conv2d(ch * 2, ch, (1,3), padding=(0,1), bias=False), PReLU())
self.db5 = Sequential(nn.Conv2d(ch, ch, (1,3), padding=(0,1), bias=False), PReLU())
def forward(self, x):
data = self.startLayer(x)
data1 = self.tb1(data)
data2 = self.tb2(self.maxpool(data1))
data3 = self.tb3(self.maxpool(data2))
data4 = self.tb4(self.maxpool(data3))
data5 = self.tb5(self.maxpool(data4))
data5 = self.db5(data5)
data4 = self.db4(torch.cat([data4, nn.Upsample(size=(data4.shape[2], data4.shape[3]))(data5)], dim=1))
data3 = self.db3(torch.cat([data3, nn.Upsample(size=(data3.shape[2], data3.shape[3]))(data4)], dim=1))
data2 = self.db2(torch.cat([data2, nn.Upsample(size=(data2.shape[2], data2.shape[3]))(data3)], dim=1))
data1 = self.db1(torch.cat([data1, nn.Upsample(size=(data1.shape[2], data1.shape[3]))(data2)], dim=1))
return self.endLayer(data1)
#we use cuda for model
model = torch.load("model_unet1d.pkl").cpu()
import numpy as np
#load train and val data
#input sinograms with noise
noised_sin = torch.from_numpy(np.load("data/noised_sin.npy")).unsqueeze(1)
#filtered sinograms without noise
filtered_sin = torch.from_numpy(np.load("data/clear_sin.npy")).unsqueeze(1)
#groundtruth phantoms
phantoms = torch.from_numpy(np.load("data/phantoms.npy")).unsqueeze(1)
import odl
#define radon scheme
detectors = 183
angles = 128
angles_parallel = np.linspace(0, 180, angles, False)
reco_space = odl.uniform_discr(min_pt=[-20,-20], max_pt=[20,20], shape=[128, 128], dtype='float32')
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
import math
l = 40 * math.sqrt(2)
angle_partition = odl.uniform_partition(-np.pi / 2, np.pi / 2, angles)
detector_partition = odl.uniform_partition(-l / 2, l / 2, detectors)
geometry = odl.tomo.Parallel2dGeometry(angle_partition, detector_partition)
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl="astra_cuda")
def ramp_op(ray_trafo):
fourier = odl.trafos.FourierTransform(ray_trafo.range, axes=[1])
# Create ramp in the detector direction
ramp_function = fourier.range.element(lambda x: np.abs(x[1]) / (2 * np.pi))
# Create ramp filter via the convolution formula with fourier transforms
ramp_filter = fourier.inverse * ramp_function * fourier
return ramp_filter
ramp = ramp_op(ray_trafo)
test_data_idx = 1000
inp = noised_sin[test_data_idx:test_data_idx+1]
f_sin = filtered_sin[test_data_idx]
groundtruth = phantoms[test_data_idx, 0].numpy()
#plot and measure experiments
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, 3)
fig.set_figheight(15)
fig.set_figwidth(15)
proposed_rec = ray_trafo.adjoint(model(inp).detach().numpy()[0,0]).data
proposed_rec *= (proposed_rec > 0)
fbp_rec = ray_trafo.adjoint(ramp(inp[0,0])).data
fbp_rec *= (fbp_rec > 0)
proposed_diff = np.abs(proposed_rec - groundtruth)
fbp_diff = np.abs(fbp_rec - groundtruth)
# diff_max = max(np.max(proposed_diff), np.max(fbp_diff))
# proposed_diff /= diff_max
# fbp_diff /= diff_max
#show phantom
im_ph = axs[0,0].imshow(groundtruth, cmap='gray')
axs[0,0].set_title('a) Фантом')
#show fbp reconstruction
axs[0,1].imshow(fbp_rec, cmap='gray')
axs[0,1].set_title('б) FBP')
axs[0,1].axhline(y=64, color='orange', ls='--')
#show reconstruction of proposed models
axs[0,2].imshow(proposed_rec, cmap='gray')
axs[0,2].set_title('в) UNet1D')
axs[0,2].axhline(y=64, color='blue', ls='--')
#show diff slice
# axs[1, 2].plot(groundtruth[64], label='Phantom')
axs[1, 0].plot(proposed_rec[64], '-', label='UNet1D', color='blue')
axs[1, 0].plot(fbp_rec[64], '--', label='FBP', color='orange')
axs[1, 0].set_title('г) Срез реконструкции от FBP и UNet1D')
axs[1, 0].grid()
axs[1, 0].legend()
#diff fbp to groundtruth
axs[1,1].imshow(fbp_diff, vmax=groundtruth.max(), vmin=0, cmap='gray')
axs[1,1].set_title('д) Разница между FBP и фантомом')
#diff proposed to groundtruth
axs[1,2].imshow(proposed_diff, vmax=groundtruth.max(), vmin=0, cmap='gray')
axs[1,2].set_title('е) Разница между UNet1D и фантомом')
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.91, 0.53, 0.02, 0.35])
fig.colorbar(im_ph, cax=cbar_ax)
plt.show() | __init__ |
retrylayer.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
// Code generated by "make store-layers"
// DO NOT EDIT
package retrylayer
import (
"context"
"github.com/go-sql-driver/mysql"
"github.com/lib/pq"
"github.com/mattermost/mattermost-server/v6/model"
"github.com/mattermost/mattermost-server/v6/store"
"github.com/pkg/errors"
)
const mySQLDeadlockCode = uint16(1213)
type RetryLayer struct {
store.Store
AuditStore store.AuditStore
BotStore store.BotStore
ChannelStore store.ChannelStore
ChannelMemberHistoryStore store.ChannelMemberHistoryStore
ClusterDiscoveryStore store.ClusterDiscoveryStore
CommandStore store.CommandStore
CommandWebhookStore store.CommandWebhookStore
ComplianceStore store.ComplianceStore
EmojiStore store.EmojiStore
FileInfoStore store.FileInfoStore
GroupStore store.GroupStore
JobStore store.JobStore
LicenseStore store.LicenseStore
LinkMetadataStore store.LinkMetadataStore
OAuthStore store.OAuthStore
PluginStore store.PluginStore
PostStore store.PostStore
PreferenceStore store.PreferenceStore
ProductNoticesStore store.ProductNoticesStore
ReactionStore store.ReactionStore
RemoteClusterStore store.RemoteClusterStore
RetentionPolicyStore store.RetentionPolicyStore
RoleStore store.RoleStore
SchemeStore store.SchemeStore
SessionStore store.SessionStore
SharedChannelStore store.SharedChannelStore
StatusStore store.StatusStore
SystemStore store.SystemStore
TeamStore store.TeamStore
TermsOfServiceStore store.TermsOfServiceStore
ThreadStore store.ThreadStore
TokenStore store.TokenStore
UploadSessionStore store.UploadSessionStore
UserStore store.UserStore
UserAccessTokenStore store.UserAccessTokenStore
UserTermsOfServiceStore store.UserTermsOfServiceStore
WebhookStore store.WebhookStore
}
func (s *RetryLayer) Audit() store.AuditStore {
return s.AuditStore
}
func (s *RetryLayer) Bot() store.BotStore {
return s.BotStore
}
func (s *RetryLayer) Channel() store.ChannelStore {
return s.ChannelStore
}
func (s *RetryLayer) ChannelMemberHistory() store.ChannelMemberHistoryStore {
return s.ChannelMemberHistoryStore
}
func (s *RetryLayer) ClusterDiscovery() store.ClusterDiscoveryStore {
return s.ClusterDiscoveryStore
}
func (s *RetryLayer) Command() store.CommandStore {
return s.CommandStore
}
func (s *RetryLayer) CommandWebhook() store.CommandWebhookStore {
return s.CommandWebhookStore
}
func (s *RetryLayer) Compliance() store.ComplianceStore {
return s.ComplianceStore
}
func (s *RetryLayer) Emoji() store.EmojiStore {
return s.EmojiStore
}
func (s *RetryLayer) FileInfo() store.FileInfoStore {
return s.FileInfoStore
}
func (s *RetryLayer) Group() store.GroupStore {
return s.GroupStore
}
func (s *RetryLayer) Job() store.JobStore {
return s.JobStore
}
func (s *RetryLayer) License() store.LicenseStore {
return s.LicenseStore
}
func (s *RetryLayer) LinkMetadata() store.LinkMetadataStore {
return s.LinkMetadataStore
}
func (s *RetryLayer) OAuth() store.OAuthStore {
return s.OAuthStore
}
func (s *RetryLayer) Plugin() store.PluginStore {
return s.PluginStore
}
func (s *RetryLayer) Post() store.PostStore {
return s.PostStore
}
func (s *RetryLayer) Preference() store.PreferenceStore {
return s.PreferenceStore
}
func (s *RetryLayer) ProductNotices() store.ProductNoticesStore {
return s.ProductNoticesStore
}
func (s *RetryLayer) Reaction() store.ReactionStore {
return s.ReactionStore
}
func (s *RetryLayer) RemoteCluster() store.RemoteClusterStore {
return s.RemoteClusterStore
}
func (s *RetryLayer) RetentionPolicy() store.RetentionPolicyStore {
return s.RetentionPolicyStore
}
func (s *RetryLayer) Role() store.RoleStore {
return s.RoleStore
}
func (s *RetryLayer) Scheme() store.SchemeStore {
return s.SchemeStore
}
func (s *RetryLayer) Session() store.SessionStore {
return s.SessionStore
}
func (s *RetryLayer) SharedChannel() store.SharedChannelStore {
return s.SharedChannelStore
}
func (s *RetryLayer) Status() store.StatusStore {
return s.StatusStore
}
func (s *RetryLayer) System() store.SystemStore {
return s.SystemStore
}
func (s *RetryLayer) Team() store.TeamStore {
return s.TeamStore
}
func (s *RetryLayer) TermsOfService() store.TermsOfServiceStore {
return s.TermsOfServiceStore
}
func (s *RetryLayer) Thread() store.ThreadStore {
return s.ThreadStore
}
func (s *RetryLayer) Token() store.TokenStore {
return s.TokenStore
}
func (s *RetryLayer) UploadSession() store.UploadSessionStore {
return s.UploadSessionStore
}
func (s *RetryLayer) User() store.UserStore {
return s.UserStore
}
func (s *RetryLayer) UserAccessToken() store.UserAccessTokenStore {
return s.UserAccessTokenStore
}
func (s *RetryLayer) UserTermsOfService() store.UserTermsOfServiceStore {
return s.UserTermsOfServiceStore
}
func (s *RetryLayer) Webhook() store.WebhookStore {
return s.WebhookStore
}
type RetryLayerAuditStore struct {
store.AuditStore
Root *RetryLayer
}
type RetryLayerBotStore struct {
store.BotStore
Root *RetryLayer
}
type RetryLayerChannelStore struct {
store.ChannelStore
Root *RetryLayer
}
type RetryLayerChannelMemberHistoryStore struct {
store.ChannelMemberHistoryStore
Root *RetryLayer
}
type RetryLayerClusterDiscoveryStore struct {
store.ClusterDiscoveryStore
Root *RetryLayer
}
type RetryLayerCommandStore struct {
store.CommandStore
Root *RetryLayer
}
type RetryLayerCommandWebhookStore struct {
store.CommandWebhookStore
Root *RetryLayer
}
type RetryLayerComplianceStore struct {
store.ComplianceStore
Root *RetryLayer
}
type RetryLayerEmojiStore struct {
store.EmojiStore
Root *RetryLayer
}
type RetryLayerFileInfoStore struct {
store.FileInfoStore
Root *RetryLayer
}
type RetryLayerGroupStore struct {
store.GroupStore
Root *RetryLayer
}
type RetryLayerJobStore struct {
store.JobStore
Root *RetryLayer
}
type RetryLayerLicenseStore struct {
store.LicenseStore
Root *RetryLayer
}
type RetryLayerLinkMetadataStore struct {
store.LinkMetadataStore
Root *RetryLayer
}
type RetryLayerOAuthStore struct {
store.OAuthStore
Root *RetryLayer
}
type RetryLayerPluginStore struct {
store.PluginStore
Root *RetryLayer
}
type RetryLayerPostStore struct {
store.PostStore
Root *RetryLayer
}
type RetryLayerPreferenceStore struct {
store.PreferenceStore
Root *RetryLayer
}
type RetryLayerProductNoticesStore struct {
store.ProductNoticesStore
Root *RetryLayer
}
type RetryLayerReactionStore struct {
store.ReactionStore
Root *RetryLayer
}
type RetryLayerRemoteClusterStore struct {
store.RemoteClusterStore
Root *RetryLayer
}
type RetryLayerRetentionPolicyStore struct {
store.RetentionPolicyStore
Root *RetryLayer
}
type RetryLayerRoleStore struct {
store.RoleStore
Root *RetryLayer
}
type RetryLayerSchemeStore struct {
store.SchemeStore
Root *RetryLayer
}
type RetryLayerSessionStore struct {
store.SessionStore
Root *RetryLayer
}
type RetryLayerSharedChannelStore struct {
store.SharedChannelStore
Root *RetryLayer
}
type RetryLayerStatusStore struct {
store.StatusStore
Root *RetryLayer
}
type RetryLayerSystemStore struct {
store.SystemStore
Root *RetryLayer
}
type RetryLayerTeamStore struct {
store.TeamStore
Root *RetryLayer
}
type RetryLayerTermsOfServiceStore struct {
store.TermsOfServiceStore
Root *RetryLayer
}
type RetryLayerThreadStore struct {
store.ThreadStore
Root *RetryLayer
}
type RetryLayerTokenStore struct {
store.TokenStore
Root *RetryLayer
}
type RetryLayerUploadSessionStore struct {
store.UploadSessionStore
Root *RetryLayer
}
type RetryLayerUserStore struct {
store.UserStore
Root *RetryLayer
}
type RetryLayerUserAccessTokenStore struct {
store.UserAccessTokenStore
Root *RetryLayer
}
type RetryLayerUserTermsOfServiceStore struct {
store.UserTermsOfServiceStore
Root *RetryLayer
}
type RetryLayerWebhookStore struct {
store.WebhookStore
Root *RetryLayer
}
func isRepeatableError(err error) bool {
var pqErr *pq.Error
var mysqlErr *mysql.MySQLError
switch {
case errors.As(errors.Cause(err), &pqErr):
if pqErr.Code == "40001" || pqErr.Code == "40P01" {
return true
}
case errors.As(errors.Cause(err), &mysqlErr):
if mysqlErr.Number == mySQLDeadlockCode {
return true
}
}
return false
}
func (s *RetryLayerAuditStore) Get(user_id string, offset int, limit int) (model.Audits, error) {
tries := 0
for {
result, err := s.AuditStore.Get(user_id, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerAuditStore) PermanentDeleteByUser(userID string) error {
tries := 0
for {
err := s.AuditStore.PermanentDeleteByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerAuditStore) Save(audit *model.Audit) error {
tries := 0
for {
err := s.AuditStore.Save(audit)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerBotStore) Get(userID string, includeDeleted bool) (*model.Bot, error) {
tries := 0
for {
result, err := s.BotStore.Get(userID, includeDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerBotStore) GetAll(options *model.BotGetOptions) ([]*model.Bot, error) {
tries := 0
for {
result, err := s.BotStore.GetAll(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerBotStore) PermanentDelete(userID string) error {
tries := 0
for {
err := s.BotStore.PermanentDelete(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerBotStore) Save(bot *model.Bot) (*model.Bot, error) {
tries := 0
for {
result, err := s.BotStore.Save(bot)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerBotStore) Update(bot *model.Bot) (*model.Bot, error) {
tries := 0
for {
result, err := s.BotStore.Update(bot)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) AnalyticsDeletedTypeCount(teamID string, channelType string) (int64, error) {
tries := 0
for {
result, err := s.ChannelStore.AnalyticsDeletedTypeCount(teamID, channelType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) AnalyticsTypeCount(teamID string, channelType model.ChannelType) (int64, error) {
tries := 0
for {
result, err := s.ChannelStore.AnalyticsTypeCount(teamID, channelType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) AutocompleteInTeam(teamID string, term string, includeDeleted bool) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.AutocompleteInTeam(teamID, term, includeDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) AutocompleteInTeamForSearch(teamID string, userID string, term string, includeDeleted bool) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.AutocompleteInTeamForSearch(teamID, userID, term, includeDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) ClearAllCustomRoleAssignments() error {
tries := 0
for {
err := s.ChannelStore.ClearAllCustomRoleAssignments()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) ClearCaches() {
s.ChannelStore.ClearCaches()
}
func (s *RetryLayerChannelStore) ClearSidebarOnTeamLeave(userID string, teamID string) error {
tries := 0
for {
err := s.ChannelStore.ClearSidebarOnTeamLeave(userID, teamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) CountPostsAfter(channelID string, timestamp int64, userID string) (int, int, error) {
tries := 0
for {
result, resultVar1, err := s.ChannelStore.CountPostsAfter(channelID, timestamp, userID)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerChannelStore) CreateDirectChannel(userID *model.User, otherUserID *model.User, channelOptions ...model.ChannelOption) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.CreateDirectChannel(userID, otherUserID, channelOptions...)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) CreateInitialSidebarCategories(userID string, teamID string) (*model.OrderedSidebarCategories, error) {
tries := 0
for {
result, err := s.ChannelStore.CreateInitialSidebarCategories(userID, teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) CreateSidebarCategory(userID string, teamID string, newCategory *model.SidebarCategoryWithChannels) (*model.SidebarCategoryWithChannels, error) {
tries := 0
for {
result, err := s.ChannelStore.CreateSidebarCategory(userID, teamID, newCategory)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) Delete(channelID string, time int64) error {
tries := 0
for {
err := s.ChannelStore.Delete(channelID, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) DeleteSidebarCategory(categoryID string) error {
tries := 0
for {
err := s.ChannelStore.DeleteSidebarCategory(categoryID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) DeleteSidebarChannelsByPreferences(preferences *model.Preferences) error {
tries := 0
for {
err := s.ChannelStore.DeleteSidebarChannelsByPreferences(preferences)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) Get(id string, allowFromCache bool) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.Get(id, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetAll(teamID string) ([]*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetAll(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetAllChannelMembersForUser(userID string, allowFromCache bool, includeDeleted bool) (map[string]string, error) {
tries := 0
for {
result, err := s.ChannelStore.GetAllChannelMembersForUser(userID, allowFromCache, includeDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetAllChannelMembersNotifyPropsForChannel(channelID string, allowFromCache bool) (map[string]model.StringMap, error) {
tries := 0
for {
result, err := s.ChannelStore.GetAllChannelMembersNotifyPropsForChannel(channelID, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetAllChannels(page int, perPage int, opts store.ChannelSearchOpts) (*model.ChannelListWithTeamData, error) {
tries := 0
for {
result, err := s.ChannelStore.GetAllChannels(page, perPage, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetAllChannelsCount(opts store.ChannelSearchOpts) (int64, error) {
tries := 0
for {
result, err := s.ChannelStore.GetAllChannelsCount(opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetAllChannelsForExportAfter(limit int, afterID string) ([]*model.ChannelForExport, error) {
tries := 0
for {
result, err := s.ChannelStore.GetAllChannelsForExportAfter(limit, afterID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetAllDirectChannelsForExportAfter(limit int, afterID string) ([]*model.DirectChannelForExport, error) {
tries := 0
for {
result, err := s.ChannelStore.GetAllDirectChannelsForExportAfter(limit, afterID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetByName(team_id string, name string, allowFromCache bool) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetByName(team_id, name, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetByNameIncludeDeleted(team_id string, name string, allowFromCache bool) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetByNameIncludeDeleted(team_id, name, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetByNames(team_id string, names []string, allowFromCache bool) ([]*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetByNames(team_id, names, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetChannelCounts(teamID string, userID string) (*model.ChannelCounts, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannelCounts(teamID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetChannelMembersForExport(userID string, teamID string) ([]*model.ChannelMemberForExport, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannelMembersForExport(userID, teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetChannelMembersTimezones(channelID string) ([]model.StringMap, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannelMembersTimezones(channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 |
}
}
func (s *RetryLayerChannelStore) GetChannelUnread(channelID string, userID string) (*model.ChannelUnread, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannelUnread(channelID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetChannels(teamID string, userID string, includeDeleted bool, lastDeleteAt int) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannels(teamID, userID, includeDeleted, lastDeleteAt)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetChannelsBatchForIndexing(startTime int64, endTime int64, limit int) ([]*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannelsBatchForIndexing(startTime, endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetChannelsByIds(channelIds []string, includeDeleted bool) ([]*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannelsByIds(channelIds, includeDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetChannelsByScheme(schemeID string, offset int, limit int) (model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetChannelsByScheme(schemeID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetDeleted(team_id string, offset int, limit int, userID string) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetDeleted(team_id, offset, limit, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetDeletedByName(team_id string, name string) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetDeletedByName(team_id, name)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetForPost(postID string) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetForPost(postID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetFromMaster(id string) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.GetFromMaster(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetGuestCount(channelID string, allowFromCache bool) (int64, error) {
tries := 0
for {
result, err := s.ChannelStore.GetGuestCount(channelID, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMember(ctx context.Context, channelID string, userID string) (*model.ChannelMember, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMember(ctx, channelID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMemberCount(channelID string, allowFromCache bool) (int64, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMemberCount(channelID, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMemberCountFromCache(channelID string) int64 {
return s.ChannelStore.GetMemberCountFromCache(channelID)
}
func (s *RetryLayerChannelStore) GetMemberCountsByGroup(ctx context.Context, channelID string, includeTimezones bool) ([]*model.ChannelMemberCountByGroup, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMemberCountsByGroup(ctx, channelID, includeTimezones)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMemberForPost(postID string, userID string) (*model.ChannelMember, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMemberForPost(postID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMembers(channelID string, offset int, limit int) (*model.ChannelMembers, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMembers(channelID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMembersByChannelIds(channelIds []string, userID string) (*model.ChannelMembers, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMembersByChannelIds(channelIds, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMembersByIds(channelID string, userIds []string) (*model.ChannelMembers, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMembersByIds(channelID, userIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMembersForUser(teamID string, userID string) (*model.ChannelMembers, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMembersForUser(teamID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMembersForUserWithPagination(teamID string, userID string, page int, perPage int) (*model.ChannelMembers, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMembersForUserWithPagination(teamID, userID, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetMoreChannels(teamID string, userID string, offset int, limit int) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetMoreChannels(teamID, userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetPinnedPostCount(channelID string, allowFromCache bool) (int64, error) {
tries := 0
for {
result, err := s.ChannelStore.GetPinnedPostCount(channelID, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetPinnedPosts(channelID string) (*model.PostList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetPinnedPosts(channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetPrivateChannelsForTeam(teamID string, offset int, limit int) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetPrivateChannelsForTeam(teamID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetPublicChannelsByIdsForTeam(teamID string, channelIds []string) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetPublicChannelsByIdsForTeam(teamID, channelIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetPublicChannelsForTeam(teamID string, offset int, limit int) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetPublicChannelsForTeam(teamID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetSidebarCategories(userID string, teamID string) (*model.OrderedSidebarCategories, error) {
tries := 0
for {
result, err := s.ChannelStore.GetSidebarCategories(userID, teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetSidebarCategory(categoryID string) (*model.SidebarCategoryWithChannels, error) {
tries := 0
for {
result, err := s.ChannelStore.GetSidebarCategory(categoryID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetSidebarCategoryOrder(userID string, teamID string) ([]string, error) {
tries := 0
for {
result, err := s.ChannelStore.GetSidebarCategoryOrder(userID, teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetTeamChannels(teamID string) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.GetTeamChannels(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GetTeamForChannel(channelID string) (*model.Team, error) {
tries := 0
for {
result, err := s.ChannelStore.GetTeamForChannel(channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) GroupSyncedChannelCount() (int64, error) {
tries := 0
for {
result, err := s.ChannelStore.GroupSyncedChannelCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) IncrementMentionCount(channelID string, userID string, updateThreads bool, isRoot bool) error {
tries := 0
for {
err := s.ChannelStore.IncrementMentionCount(channelID, userID, updateThreads, isRoot)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) InvalidateAllChannelMembersForUser(userID string) {
s.ChannelStore.InvalidateAllChannelMembersForUser(userID)
}
func (s *RetryLayerChannelStore) InvalidateCacheForChannelMembersNotifyProps(channelID string) {
s.ChannelStore.InvalidateCacheForChannelMembersNotifyProps(channelID)
}
func (s *RetryLayerChannelStore) InvalidateChannel(id string) {
s.ChannelStore.InvalidateChannel(id)
}
func (s *RetryLayerChannelStore) InvalidateChannelByName(teamID string, name string) {
s.ChannelStore.InvalidateChannelByName(teamID, name)
}
func (s *RetryLayerChannelStore) InvalidateGuestCount(channelID string) {
s.ChannelStore.InvalidateGuestCount(channelID)
}
func (s *RetryLayerChannelStore) InvalidateMemberCount(channelID string) {
s.ChannelStore.InvalidateMemberCount(channelID)
}
func (s *RetryLayerChannelStore) InvalidatePinnedPostCount(channelID string) {
s.ChannelStore.InvalidatePinnedPostCount(channelID)
}
func (s *RetryLayerChannelStore) IsUserInChannelUseCache(userID string, channelID string) bool {
return s.ChannelStore.IsUserInChannelUseCache(userID, channelID)
}
func (s *RetryLayerChannelStore) MigrateChannelMembers(fromChannelID string, fromUserID string) (map[string]string, error) {
tries := 0
for {
result, err := s.ChannelStore.MigrateChannelMembers(fromChannelID, fromUserID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) MigratePublicChannels() error {
tries := 0
for {
err := s.ChannelStore.MigratePublicChannels()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) PermanentDelete(channelID string) error {
tries := 0
for {
err := s.ChannelStore.PermanentDelete(channelID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) PermanentDeleteByTeam(teamID string) error {
tries := 0
for {
err := s.ChannelStore.PermanentDeleteByTeam(teamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) PermanentDeleteMembersByChannel(channelID string) error {
tries := 0
for {
err := s.ChannelStore.PermanentDeleteMembersByChannel(channelID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) PermanentDeleteMembersByUser(userID string) error {
tries := 0
for {
err := s.ChannelStore.PermanentDeleteMembersByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) RemoveAllDeactivatedMembers(channelID string) error {
tries := 0
for {
err := s.ChannelStore.RemoveAllDeactivatedMembers(channelID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) RemoveMember(channelID string, userID string) error {
tries := 0
for {
err := s.ChannelStore.RemoveMember(channelID, userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) RemoveMembers(channelID string, userIds []string) error {
tries := 0
for {
err := s.ChannelStore.RemoveMembers(channelID, userIds)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) ResetAllChannelSchemes() error {
tries := 0
for {
err := s.ChannelStore.ResetAllChannelSchemes()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) Restore(channelID string, time int64) error {
tries := 0
for {
err := s.ChannelStore.Restore(channelID, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) Save(channel *model.Channel, maxChannelsPerTeam int64) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.Save(channel, maxChannelsPerTeam)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SaveDirectChannel(channel *model.Channel, member1 *model.ChannelMember, member2 *model.ChannelMember) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.SaveDirectChannel(channel, member1, member2)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SaveMember(member *model.ChannelMember) (*model.ChannelMember, error) {
tries := 0
for {
result, err := s.ChannelStore.SaveMember(member)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SaveMultipleMembers(members []*model.ChannelMember) ([]*model.ChannelMember, error) {
tries := 0
for {
result, err := s.ChannelStore.SaveMultipleMembers(members)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SearchAllChannels(term string, opts store.ChannelSearchOpts) (*model.ChannelListWithTeamData, int64, error) {
tries := 0
for {
result, resultVar1, err := s.ChannelStore.SearchAllChannels(term, opts)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerChannelStore) SearchArchivedInTeam(teamID string, term string, userID string) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.SearchArchivedInTeam(teamID, term, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SearchForUserInTeam(userID string, teamID string, term string, includeDeleted bool) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.SearchForUserInTeam(userID, teamID, term, includeDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SearchGroupChannels(userID string, term string) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.SearchGroupChannels(userID, term)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SearchInTeam(teamID string, term string, includeDeleted bool) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.SearchInTeam(teamID, term, includeDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SearchMore(userID string, teamID string, term string) (*model.ChannelList, error) {
tries := 0
for {
result, err := s.ChannelStore.SearchMore(userID, teamID, term)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) SetDeleteAt(channelID string, deleteAt int64, updateAt int64) error {
tries := 0
for {
err := s.ChannelStore.SetDeleteAt(channelID, deleteAt, updateAt)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) SetShared(channelId string, shared bool) error {
tries := 0
for {
err := s.ChannelStore.SetShared(channelId, shared)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) Update(channel *model.Channel) (*model.Channel, error) {
tries := 0
for {
result, err := s.ChannelStore.Update(channel)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) UpdateLastViewedAt(channelIds []string, userID string, updateThreads bool) (map[string]int64, error) {
tries := 0
for {
result, err := s.ChannelStore.UpdateLastViewedAt(channelIds, userID, updateThreads)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) UpdateLastViewedAtPost(unreadPost *model.Post, userID string, mentionCount int, mentionCountRoot int, updateThreads bool, setUnreadCountRoot bool) (*model.ChannelUnreadAt, error) {
tries := 0
for {
result, err := s.ChannelStore.UpdateLastViewedAtPost(unreadPost, userID, mentionCount, mentionCountRoot, updateThreads, setUnreadCountRoot)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) UpdateMember(member *model.ChannelMember) (*model.ChannelMember, error) {
tries := 0
for {
result, err := s.ChannelStore.UpdateMember(member)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) UpdateMembersRole(channelID string, userIDs []string) error {
tries := 0
for {
err := s.ChannelStore.UpdateMembersRole(channelID, userIDs)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) UpdateMultipleMembers(members []*model.ChannelMember) ([]*model.ChannelMember, error) {
tries := 0
for {
result, err := s.ChannelStore.UpdateMultipleMembers(members)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelStore) UpdateSidebarCategories(userID string, teamID string, categories []*model.SidebarCategoryWithChannels) ([]*model.SidebarCategoryWithChannels, []*model.SidebarCategoryWithChannels, error) {
tries := 0
for {
result, resultVar1, err := s.ChannelStore.UpdateSidebarCategories(userID, teamID, categories)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerChannelStore) UpdateSidebarCategoryOrder(userID string, teamID string, categoryOrder []string) error {
tries := 0
for {
err := s.ChannelStore.UpdateSidebarCategoryOrder(userID, teamID, categoryOrder)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) UpdateSidebarChannelCategoryOnMove(channel *model.Channel, newTeamID string) error {
tries := 0
for {
err := s.ChannelStore.UpdateSidebarChannelCategoryOnMove(channel, newTeamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) UpdateSidebarChannelsByPreferences(preferences *model.Preferences) error {
tries := 0
for {
err := s.ChannelStore.UpdateSidebarChannelsByPreferences(preferences)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelStore) UserBelongsToChannels(userID string, channelIds []string) (bool, error) {
tries := 0
for {
result, err := s.ChannelStore.UserBelongsToChannels(userID, channelIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelMemberHistoryStore) DeleteOrphanedRows(limit int) (int64, error) {
tries := 0
for {
result, err := s.ChannelMemberHistoryStore.DeleteOrphanedRows(limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelMemberHistoryStore) GetUsersInChannelDuring(startTime int64, endTime int64, channelID string) ([]*model.ChannelMemberHistoryResult, error) {
tries := 0
for {
result, err := s.ChannelMemberHistoryStore.GetUsersInChannelDuring(startTime, endTime, channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelMemberHistoryStore) LogJoinEvent(userID string, channelID string, joinTime int64) error {
tries := 0
for {
err := s.ChannelMemberHistoryStore.LogJoinEvent(userID, channelID, joinTime)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelMemberHistoryStore) LogLeaveEvent(userID string, channelID string, leaveTime int64) error {
tries := 0
for {
err := s.ChannelMemberHistoryStore.LogLeaveEvent(userID, channelID, leaveTime)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerChannelMemberHistoryStore) PermanentDeleteBatch(endTime int64, limit int64) (int64, error) {
tries := 0
for {
result, err := s.ChannelMemberHistoryStore.PermanentDeleteBatch(endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerChannelMemberHistoryStore) PermanentDeleteBatchForRetentionPolicies(now int64, globalPolicyEndTime int64, limit int64, cursor model.RetentionPolicyCursor) (int64, model.RetentionPolicyCursor, error) {
tries := 0
for {
result, resultVar1, err := s.ChannelMemberHistoryStore.PermanentDeleteBatchForRetentionPolicies(now, globalPolicyEndTime, limit, cursor)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerClusterDiscoveryStore) Cleanup() error {
tries := 0
for {
err := s.ClusterDiscoveryStore.Cleanup()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerClusterDiscoveryStore) Delete(discovery *model.ClusterDiscovery) (bool, error) {
tries := 0
for {
result, err := s.ClusterDiscoveryStore.Delete(discovery)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerClusterDiscoveryStore) Exists(discovery *model.ClusterDiscovery) (bool, error) {
tries := 0
for {
result, err := s.ClusterDiscoveryStore.Exists(discovery)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerClusterDiscoveryStore) GetAll(discoveryType string, clusterName string) ([]*model.ClusterDiscovery, error) {
tries := 0
for {
result, err := s.ClusterDiscoveryStore.GetAll(discoveryType, clusterName)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerClusterDiscoveryStore) Save(discovery *model.ClusterDiscovery) error {
tries := 0
for {
err := s.ClusterDiscoveryStore.Save(discovery)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerClusterDiscoveryStore) SetLastPingAt(discovery *model.ClusterDiscovery) error {
tries := 0
for {
err := s.ClusterDiscoveryStore.SetLastPingAt(discovery)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerCommandStore) AnalyticsCommandCount(teamID string) (int64, error) {
tries := 0
for {
result, err := s.CommandStore.AnalyticsCommandCount(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandStore) Delete(commandID string, time int64) error {
tries := 0
for {
err := s.CommandStore.Delete(commandID, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerCommandStore) Get(id string) (*model.Command, error) {
tries := 0
for {
result, err := s.CommandStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandStore) GetByTeam(teamID string) ([]*model.Command, error) {
tries := 0
for {
result, err := s.CommandStore.GetByTeam(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandStore) GetByTrigger(teamID string, trigger string) (*model.Command, error) {
tries := 0
for {
result, err := s.CommandStore.GetByTrigger(teamID, trigger)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandStore) PermanentDeleteByTeam(teamID string) error {
tries := 0
for {
err := s.CommandStore.PermanentDeleteByTeam(teamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerCommandStore) PermanentDeleteByUser(userID string) error {
tries := 0
for {
err := s.CommandStore.PermanentDeleteByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerCommandStore) Save(webhook *model.Command) (*model.Command, error) {
tries := 0
for {
result, err := s.CommandStore.Save(webhook)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandStore) Update(hook *model.Command) (*model.Command, error) {
tries := 0
for {
result, err := s.CommandStore.Update(hook)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandWebhookStore) Cleanup() {
s.CommandWebhookStore.Cleanup()
}
func (s *RetryLayerCommandWebhookStore) Get(id string) (*model.CommandWebhook, error) {
tries := 0
for {
result, err := s.CommandWebhookStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandWebhookStore) Save(webhook *model.CommandWebhook) (*model.CommandWebhook, error) {
tries := 0
for {
result, err := s.CommandWebhookStore.Save(webhook)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerCommandWebhookStore) TryUse(id string, limit int) error {
tries := 0
for {
err := s.CommandWebhookStore.TryUse(id, limit)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerComplianceStore) ComplianceExport(compliance *model.Compliance, cursor model.ComplianceExportCursor, limit int) ([]*model.CompliancePost, model.ComplianceExportCursor, error) {
tries := 0
for {
result, resultVar1, err := s.ComplianceStore.ComplianceExport(compliance, cursor, limit)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerComplianceStore) Get(id string) (*model.Compliance, error) {
tries := 0
for {
result, err := s.ComplianceStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerComplianceStore) GetAll(offset int, limit int) (model.Compliances, error) {
tries := 0
for {
result, err := s.ComplianceStore.GetAll(offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerComplianceStore) MessageExport(cursor model.MessageExportCursor, limit int) ([]*model.MessageExport, model.MessageExportCursor, error) {
tries := 0
for {
result, resultVar1, err := s.ComplianceStore.MessageExport(cursor, limit)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerComplianceStore) Save(compliance *model.Compliance) (*model.Compliance, error) {
tries := 0
for {
result, err := s.ComplianceStore.Save(compliance)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerComplianceStore) Update(compliance *model.Compliance) (*model.Compliance, error) {
tries := 0
for {
result, err := s.ComplianceStore.Update(compliance)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerEmojiStore) Delete(emoji *model.Emoji, time int64) error {
tries := 0
for {
err := s.EmojiStore.Delete(emoji, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerEmojiStore) Get(ctx context.Context, id string, allowFromCache bool) (*model.Emoji, error) {
tries := 0
for {
result, err := s.EmojiStore.Get(ctx, id, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerEmojiStore) GetByName(ctx context.Context, name string, allowFromCache bool) (*model.Emoji, error) {
tries := 0
for {
result, err := s.EmojiStore.GetByName(ctx, name, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerEmojiStore) GetList(offset int, limit int, sort string) ([]*model.Emoji, error) {
tries := 0
for {
result, err := s.EmojiStore.GetList(offset, limit, sort)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerEmojiStore) GetMultipleByName(names []string) ([]*model.Emoji, error) {
tries := 0
for {
result, err := s.EmojiStore.GetMultipleByName(names)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerEmojiStore) Save(emoji *model.Emoji) (*model.Emoji, error) {
tries := 0
for {
result, err := s.EmojiStore.Save(emoji)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerEmojiStore) Search(name string, prefixOnly bool, limit int) ([]*model.Emoji, error) {
tries := 0
for {
result, err := s.EmojiStore.Search(name, prefixOnly, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) AttachToPost(fileID string, postID string, creatorID string) error {
tries := 0
for {
err := s.FileInfoStore.AttachToPost(fileID, postID, creatorID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerFileInfoStore) ClearCaches() {
s.FileInfoStore.ClearCaches()
}
func (s *RetryLayerFileInfoStore) CountAll() (int64, error) {
tries := 0
for {
result, err := s.FileInfoStore.CountAll()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) DeleteForPost(postID string) (string, error) {
tries := 0
for {
result, err := s.FileInfoStore.DeleteForPost(postID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) Get(id string) (*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) GetByIds(ids []string) ([]*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.GetByIds(ids)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) GetByPath(path string) (*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.GetByPath(path)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) GetFilesBatchForIndexing(startTime int64, endTime int64, limit int) ([]*model.FileForIndexing, error) {
tries := 0
for {
result, err := s.FileInfoStore.GetFilesBatchForIndexing(startTime, endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) GetForPost(postID string, readFromMaster bool, includeDeleted bool, allowFromCache bool) ([]*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.GetForPost(postID, readFromMaster, includeDeleted, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) GetForUser(userID string) ([]*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.GetForUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) GetFromMaster(id string) (*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.GetFromMaster(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) GetWithOptions(page int, perPage int, opt *model.GetFileInfosOptions) ([]*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.GetWithOptions(page, perPage, opt)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) InvalidateFileInfosForPostCache(postID string, deleted bool) {
s.FileInfoStore.InvalidateFileInfosForPostCache(postID, deleted)
}
func (s *RetryLayerFileInfoStore) PermanentDelete(fileID string) error {
tries := 0
for {
err := s.FileInfoStore.PermanentDelete(fileID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerFileInfoStore) PermanentDeleteBatch(endTime int64, limit int64) (int64, error) {
tries := 0
for {
result, err := s.FileInfoStore.PermanentDeleteBatch(endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) PermanentDeleteByUser(userID string) (int64, error) {
tries := 0
for {
result, err := s.FileInfoStore.PermanentDeleteByUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) Save(info *model.FileInfo) (*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.Save(info)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) Search(paramsList []*model.SearchParams, userID string, teamID string, page int, perPage int) (*model.FileInfoList, error) {
tries := 0
for {
result, err := s.FileInfoStore.Search(paramsList, userID, teamID, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerFileInfoStore) SetContent(fileID string, content string) error {
tries := 0
for {
err := s.FileInfoStore.SetContent(fileID, content)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerFileInfoStore) Upsert(info *model.FileInfo) (*model.FileInfo, error) {
tries := 0
for {
result, err := s.FileInfoStore.Upsert(info)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) AdminRoleGroupsForSyncableMember(userID string, syncableID string, syncableType model.GroupSyncableType) ([]string, error) {
tries := 0
for {
result, err := s.GroupStore.AdminRoleGroupsForSyncableMember(userID, syncableID, syncableType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) ChannelMembersMinusGroupMembers(channelID string, groupIDs []string, page int, perPage int) ([]*model.UserWithGroups, error) {
tries := 0
for {
result, err := s.GroupStore.ChannelMembersMinusGroupMembers(channelID, groupIDs, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) ChannelMembersToAdd(since int64, channelID *string, includeRemovedMembers bool) ([]*model.UserChannelIDPair, error) {
tries := 0
for {
result, err := s.GroupStore.ChannelMembersToAdd(since, channelID, includeRemovedMembers)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) ChannelMembersToRemove(channelID *string) ([]*model.ChannelMember, error) {
tries := 0
for {
result, err := s.GroupStore.ChannelMembersToRemove(channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) CountChannelMembersMinusGroupMembers(channelID string, groupIDs []string) (int64, error) {
tries := 0
for {
result, err := s.GroupStore.CountChannelMembersMinusGroupMembers(channelID, groupIDs)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) CountGroupsByChannel(channelID string, opts model.GroupSearchOpts) (int64, error) {
tries := 0
for {
result, err := s.GroupStore.CountGroupsByChannel(channelID, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) CountGroupsByTeam(teamID string, opts model.GroupSearchOpts) (int64, error) {
tries := 0
for {
result, err := s.GroupStore.CountGroupsByTeam(teamID, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) CountTeamMembersMinusGroupMembers(teamID string, groupIDs []string) (int64, error) {
tries := 0
for {
result, err := s.GroupStore.CountTeamMembersMinusGroupMembers(teamID, groupIDs)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) Create(group *model.Group) (*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.Create(group)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) CreateGroupSyncable(groupSyncable *model.GroupSyncable) (*model.GroupSyncable, error) {
tries := 0
for {
result, err := s.GroupStore.CreateGroupSyncable(groupSyncable)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) Delete(groupID string) (*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.Delete(groupID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) DeleteGroupSyncable(groupID string, syncableID string, syncableType model.GroupSyncableType) (*model.GroupSyncable, error) {
tries := 0
for {
result, err := s.GroupStore.DeleteGroupSyncable(groupID, syncableID, syncableType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) DeleteMember(groupID string, userID string) (*model.GroupMember, error) {
tries := 0
for {
result, err := s.GroupStore.DeleteMember(groupID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) DistinctGroupMemberCount() (int64, error) {
tries := 0
for {
result, err := s.GroupStore.DistinctGroupMemberCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) Get(groupID string) (*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.Get(groupID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetAllBySource(groupSource model.GroupSource) ([]*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.GetAllBySource(groupSource)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetAllGroupSyncablesByGroupId(groupID string, syncableType model.GroupSyncableType) ([]*model.GroupSyncable, error) {
tries := 0
for {
result, err := s.GroupStore.GetAllGroupSyncablesByGroupId(groupID, syncableType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetByIDs(groupIDs []string) ([]*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.GetByIDs(groupIDs)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetByName(name string, opts model.GroupSearchOpts) (*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.GetByName(name, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetByRemoteID(remoteID string, groupSource model.GroupSource) (*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.GetByRemoteID(remoteID, groupSource)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetByUser(userID string) ([]*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.GetByUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetGroupSyncable(groupID string, syncableID string, syncableType model.GroupSyncableType) (*model.GroupSyncable, error) {
tries := 0
for {
result, err := s.GroupStore.GetGroupSyncable(groupID, syncableID, syncableType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetGroups(page int, perPage int, opts model.GroupSearchOpts) ([]*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.GetGroups(page, perPage, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetGroupsAssociatedToChannelsByTeam(teamID string, opts model.GroupSearchOpts) (map[string][]*model.GroupWithSchemeAdmin, error) {
tries := 0
for {
result, err := s.GroupStore.GetGroupsAssociatedToChannelsByTeam(teamID, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetGroupsByChannel(channelID string, opts model.GroupSearchOpts) ([]*model.GroupWithSchemeAdmin, error) {
tries := 0
for {
result, err := s.GroupStore.GetGroupsByChannel(channelID, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetGroupsByTeam(teamID string, opts model.GroupSearchOpts) ([]*model.GroupWithSchemeAdmin, error) {
tries := 0
for {
result, err := s.GroupStore.GetGroupsByTeam(teamID, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetMemberCount(groupID string) (int64, error) {
tries := 0
for {
result, err := s.GroupStore.GetMemberCount(groupID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetMemberUsers(groupID string) ([]*model.User, error) {
tries := 0
for {
result, err := s.GroupStore.GetMemberUsers(groupID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetMemberUsersInTeam(groupID string, teamID string) ([]*model.User, error) {
tries := 0
for {
result, err := s.GroupStore.GetMemberUsersInTeam(groupID, teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetMemberUsersNotInChannel(groupID string, channelID string) ([]*model.User, error) {
tries := 0
for {
result, err := s.GroupStore.GetMemberUsersNotInChannel(groupID, channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GetMemberUsersPage(groupID string, page int, perPage int) ([]*model.User, error) {
tries := 0
for {
result, err := s.GroupStore.GetMemberUsersPage(groupID, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GroupChannelCount() (int64, error) {
tries := 0
for {
result, err := s.GroupStore.GroupChannelCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GroupCount() (int64, error) {
tries := 0
for {
result, err := s.GroupStore.GroupCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GroupCountWithAllowReference() (int64, error) {
tries := 0
for {
result, err := s.GroupStore.GroupCountWithAllowReference()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GroupMemberCount() (int64, error) {
tries := 0
for {
result, err := s.GroupStore.GroupMemberCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) GroupTeamCount() (int64, error) {
tries := 0
for {
result, err := s.GroupStore.GroupTeamCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) PermanentDeleteMembersByUser(userID string) error {
tries := 0
for {
err := s.GroupStore.PermanentDeleteMembersByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerGroupStore) PermittedSyncableAdmins(syncableID string, syncableType model.GroupSyncableType) ([]string, error) {
tries := 0
for {
result, err := s.GroupStore.PermittedSyncableAdmins(syncableID, syncableType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) TeamMembersMinusGroupMembers(teamID string, groupIDs []string, page int, perPage int) ([]*model.UserWithGroups, error) {
tries := 0
for {
result, err := s.GroupStore.TeamMembersMinusGroupMembers(teamID, groupIDs, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) TeamMembersToAdd(since int64, teamID *string, includeRemovedMembers bool) ([]*model.UserTeamIDPair, error) {
tries := 0
for {
result, err := s.GroupStore.TeamMembersToAdd(since, teamID, includeRemovedMembers)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) TeamMembersToRemove(teamID *string) ([]*model.TeamMember, error) {
tries := 0
for {
result, err := s.GroupStore.TeamMembersToRemove(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) Update(group *model.Group) (*model.Group, error) {
tries := 0
for {
result, err := s.GroupStore.Update(group)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) UpdateGroupSyncable(groupSyncable *model.GroupSyncable) (*model.GroupSyncable, error) {
tries := 0
for {
result, err := s.GroupStore.UpdateGroupSyncable(groupSyncable)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerGroupStore) UpsertMember(groupID string, userID string) (*model.GroupMember, error) {
tries := 0
for {
result, err := s.GroupStore.UpsertMember(groupID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) Delete(id string) (string, error) {
tries := 0
for {
result, err := s.JobStore.Delete(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) Get(id string) (*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetAllByStatus(status string) ([]*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.GetAllByStatus(status)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetAllByType(jobType string) ([]*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.GetAllByType(jobType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.GetAllByTypePage(jobType, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetAllByTypesPage(jobTypes []string, offset int, limit int) ([]*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.GetAllByTypesPage(jobTypes, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetAllPage(offset int, limit int) ([]*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.GetAllPage(offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetCountByStatusAndType(status string, jobType string) (int64, error) {
tries := 0
for {
result, err := s.JobStore.GetCountByStatusAndType(status, jobType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetNewestJobByStatusAndType(status string, jobType string) (*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.GetNewestJobByStatusAndType(status, jobType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) GetNewestJobByStatusesAndType(statuses []string, jobType string) (*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.GetNewestJobByStatusesAndType(statuses, jobType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) Save(job *model.Job) (*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.Save(job)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) UpdateOptimistically(job *model.Job, currentStatus string) (bool, error) {
tries := 0
for {
result, err := s.JobStore.UpdateOptimistically(job, currentStatus)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) UpdateStatus(id string, status string) (*model.Job, error) {
tries := 0
for {
result, err := s.JobStore.UpdateStatus(id, status)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerJobStore) UpdateStatusOptimistically(id string, currentStatus string, newStatus string) (bool, error) {
tries := 0
for {
result, err := s.JobStore.UpdateStatusOptimistically(id, currentStatus, newStatus)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerLicenseStore) Get(id string) (*model.LicenseRecord, error) {
tries := 0
for {
result, err := s.LicenseStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerLicenseStore) GetAll() ([]*model.LicenseRecord, error) {
tries := 0
for {
result, err := s.LicenseStore.GetAll()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerLicenseStore) Save(license *model.LicenseRecord) (*model.LicenseRecord, error) {
tries := 0
for {
result, err := s.LicenseStore.Save(license)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerLinkMetadataStore) Get(url string, timestamp int64) (*model.LinkMetadata, error) {
tries := 0
for {
result, err := s.LinkMetadataStore.Get(url, timestamp)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerLinkMetadataStore) Save(linkMetadata *model.LinkMetadata) (*model.LinkMetadata, error) {
tries := 0
for {
result, err := s.LinkMetadataStore.Save(linkMetadata)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) DeleteApp(id string) error {
tries := 0
for {
err := s.OAuthStore.DeleteApp(id)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerOAuthStore) GetAccessData(token string) (*model.AccessData, error) {
tries := 0
for {
result, err := s.OAuthStore.GetAccessData(token)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetAccessDataByRefreshToken(token string) (*model.AccessData, error) {
tries := 0
for {
result, err := s.OAuthStore.GetAccessDataByRefreshToken(token)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetAccessDataByUserForApp(userID string, clientId string) ([]*model.AccessData, error) {
tries := 0
for {
result, err := s.OAuthStore.GetAccessDataByUserForApp(userID, clientId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetApp(id string) (*model.OAuthApp, error) {
tries := 0
for {
result, err := s.OAuthStore.GetApp(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetAppByUser(userID string, offset int, limit int) ([]*model.OAuthApp, error) {
tries := 0
for {
result, err := s.OAuthStore.GetAppByUser(userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetApps(offset int, limit int) ([]*model.OAuthApp, error) {
tries := 0
for {
result, err := s.OAuthStore.GetApps(offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetAuthData(code string) (*model.AuthData, error) {
tries := 0
for {
result, err := s.OAuthStore.GetAuthData(code)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetAuthorizedApps(userID string, offset int, limit int) ([]*model.OAuthApp, error) {
tries := 0
for {
result, err := s.OAuthStore.GetAuthorizedApps(userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) GetPreviousAccessData(userID string, clientId string) (*model.AccessData, error) {
tries := 0
for {
result, err := s.OAuthStore.GetPreviousAccessData(userID, clientId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) PermanentDeleteAuthDataByUser(userID string) error {
tries := 0
for {
err := s.OAuthStore.PermanentDeleteAuthDataByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerOAuthStore) RemoveAccessData(token string) error {
tries := 0
for {
err := s.OAuthStore.RemoveAccessData(token)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerOAuthStore) RemoveAllAccessData() error {
tries := 0
for {
err := s.OAuthStore.RemoveAllAccessData()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerOAuthStore) RemoveAuthData(code string) error {
tries := 0
for {
err := s.OAuthStore.RemoveAuthData(code)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerOAuthStore) SaveAccessData(accessData *model.AccessData) (*model.AccessData, error) {
tries := 0
for {
result, err := s.OAuthStore.SaveAccessData(accessData)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) SaveApp(app *model.OAuthApp) (*model.OAuthApp, error) {
tries := 0
for {
result, err := s.OAuthStore.SaveApp(app)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) SaveAuthData(authData *model.AuthData) (*model.AuthData, error) {
tries := 0
for {
result, err := s.OAuthStore.SaveAuthData(authData)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) UpdateAccessData(accessData *model.AccessData) (*model.AccessData, error) {
tries := 0
for {
result, err := s.OAuthStore.UpdateAccessData(accessData)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerOAuthStore) UpdateApp(app *model.OAuthApp) (*model.OAuthApp, error) {
tries := 0
for {
result, err := s.OAuthStore.UpdateApp(app)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPluginStore) CompareAndDelete(keyVal *model.PluginKeyValue, oldValue []byte) (bool, error) {
tries := 0
for {
result, err := s.PluginStore.CompareAndDelete(keyVal, oldValue)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPluginStore) CompareAndSet(keyVal *model.PluginKeyValue, oldValue []byte) (bool, error) {
tries := 0
for {
result, err := s.PluginStore.CompareAndSet(keyVal, oldValue)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPluginStore) Delete(pluginID string, key string) error {
tries := 0
for {
err := s.PluginStore.Delete(pluginID, key)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPluginStore) DeleteAllExpired() error {
tries := 0
for {
err := s.PluginStore.DeleteAllExpired()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPluginStore) DeleteAllForPlugin(PluginID string) error {
tries := 0
for {
err := s.PluginStore.DeleteAllForPlugin(PluginID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPluginStore) Get(pluginID string, key string) (*model.PluginKeyValue, error) {
tries := 0
for {
result, err := s.PluginStore.Get(pluginID, key)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPluginStore) List(pluginID string, page int, perPage int) ([]string, error) {
tries := 0
for {
result, err := s.PluginStore.List(pluginID, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPluginStore) SaveOrUpdate(keyVal *model.PluginKeyValue) (*model.PluginKeyValue, error) {
tries := 0
for {
result, err := s.PluginStore.SaveOrUpdate(keyVal)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPluginStore) SetWithOptions(pluginID string, key string, value []byte, options model.PluginKVSetOptions) (bool, error) {
tries := 0
for {
result, err := s.PluginStore.SetWithOptions(pluginID, key, value, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) AnalyticsPostCount(teamID string, mustHaveFile bool, mustHaveHashtag bool) (int64, error) {
tries := 0
for {
result, err := s.PostStore.AnalyticsPostCount(teamID, mustHaveFile, mustHaveHashtag)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) AnalyticsPostCountsByDay(options *model.AnalyticsPostCountsOptions) (model.AnalyticsRows, error) {
tries := 0
for {
result, err := s.PostStore.AnalyticsPostCountsByDay(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) AnalyticsUserCountsWithPostsByDay(teamID string) (model.AnalyticsRows, error) {
tries := 0
for {
result, err := s.PostStore.AnalyticsUserCountsWithPostsByDay(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) ClearCaches() {
s.PostStore.ClearCaches()
}
func (s *RetryLayerPostStore) Delete(postID string, time int64, deleteByID string) error {
tries := 0
for {
err := s.PostStore.Delete(postID, time, deleteByID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPostStore) DeleteOrphanedRows(limit int) (int64, error) {
tries := 0
for {
result, err := s.PostStore.DeleteOrphanedRows(limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) Get(ctx context.Context, id string, skipFetchThreads bool, collapsedThreads bool, collapsedThreadsExtended bool, userID string) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.Get(ctx, id, skipFetchThreads, collapsedThreads, collapsedThreadsExtended, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetDirectPostParentsForExportAfter(limit int, afterID string) ([]*model.DirectPostForExport, error) {
tries := 0
for {
result, err := s.PostStore.GetDirectPostParentsForExportAfter(limit, afterID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetEtag(channelID string, allowFromCache bool, collapsedThreads bool) string {
return s.PostStore.GetEtag(channelID, allowFromCache, collapsedThreads)
}
func (s *RetryLayerPostStore) GetFlaggedPosts(userID string, offset int, limit int) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.GetFlaggedPosts(userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetFlaggedPostsForChannel(userID string, channelID string, offset int, limit int) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.GetFlaggedPostsForChannel(userID, channelID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetFlaggedPostsForTeam(userID string, teamID string, offset int, limit int) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.GetFlaggedPostsForTeam(userID, teamID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetMaxPostSize() int {
return s.PostStore.GetMaxPostSize()
}
func (s *RetryLayerPostStore) GetOldest() (*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.GetOldest()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetOldestEntityCreationTime() (int64, error) {
tries := 0
for {
result, err := s.PostStore.GetOldestEntityCreationTime()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetParentsForExportAfter(limit int, afterID string) ([]*model.PostForExport, error) {
tries := 0
for {
result, err := s.PostStore.GetParentsForExportAfter(limit, afterID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostAfterTime(channelID string, time int64, collapsedThreads bool) (*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.GetPostAfterTime(channelID, time, collapsedThreads)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostIdAfterTime(channelID string, time int64, collapsedThreads bool) (string, error) {
tries := 0
for {
result, err := s.PostStore.GetPostIdAfterTime(channelID, time, collapsedThreads)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostIdBeforeTime(channelID string, time int64, collapsedThreads bool) (string, error) {
tries := 0
for {
result, err := s.PostStore.GetPostIdBeforeTime(channelID, time, collapsedThreads)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPosts(options model.GetPostsOptions, allowFromCache bool) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.GetPosts(options, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostsAfter(options model.GetPostsOptions) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.GetPostsAfter(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostsBatchForIndexing(startTime int64, endTime int64, limit int) ([]*model.PostForIndexing, error) {
tries := 0
for {
result, err := s.PostStore.GetPostsBatchForIndexing(startTime, endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostsBefore(options model.GetPostsOptions) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.GetPostsBefore(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostsByIds(postIds []string) ([]*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.GetPostsByIds(postIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostsCreatedAt(channelID string, time int64) ([]*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.GetPostsCreatedAt(channelID, time)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostsSince(options model.GetPostsSinceOptions, allowFromCache bool) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.GetPostsSince(options, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetPostsSinceForSync(options model.GetPostsSinceForSyncOptions, cursor model.GetPostsSinceForSyncCursor, limit int) ([]*model.Post, model.GetPostsSinceForSyncCursor, error) {
tries := 0
for {
result, resultVar1, err := s.PostStore.GetPostsSinceForSync(options, cursor, limit)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerPostStore) GetRepliesForExport(parentID string) ([]*model.ReplyForExport, error) {
tries := 0
for {
result, err := s.PostStore.GetRepliesForExport(parentID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) GetSingle(id string, inclDeleted bool) (*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.GetSingle(id, inclDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) HasAutoResponsePostByUserSince(options model.GetPostsSinceOptions, userId string) (bool, error) {
tries := 0
for {
result, err := s.PostStore.HasAutoResponsePostByUserSince(options, userId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) InvalidateLastPostTimeCache(channelID string) {
s.PostStore.InvalidateLastPostTimeCache(channelID)
}
func (s *RetryLayerPostStore) Overwrite(post *model.Post) (*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.Overwrite(post)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) OverwriteMultiple(posts []*model.Post) ([]*model.Post, int, error) {
tries := 0
for {
result, resultVar1, err := s.PostStore.OverwriteMultiple(posts)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerPostStore) PermanentDeleteBatch(endTime int64, limit int64) (int64, error) {
tries := 0
for {
result, err := s.PostStore.PermanentDeleteBatch(endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) PermanentDeleteBatchForRetentionPolicies(now int64, globalPolicyEndTime int64, limit int64, cursor model.RetentionPolicyCursor) (int64, model.RetentionPolicyCursor, error) {
tries := 0
for {
result, resultVar1, err := s.PostStore.PermanentDeleteBatchForRetentionPolicies(now, globalPolicyEndTime, limit, cursor)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerPostStore) PermanentDeleteByChannel(channelID string) error {
tries := 0
for {
err := s.PostStore.PermanentDeleteByChannel(channelID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPostStore) PermanentDeleteByUser(userID string) error {
tries := 0
for {
err := s.PostStore.PermanentDeleteByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPostStore) Save(post *model.Post) (*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.Save(post)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) SaveMultiple(posts []*model.Post) ([]*model.Post, int, error) {
tries := 0
for {
result, resultVar1, err := s.PostStore.SaveMultiple(posts)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerPostStore) Search(teamID string, userID string, params *model.SearchParams) (*model.PostList, error) {
tries := 0
for {
result, err := s.PostStore.Search(teamID, userID, params)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) SearchPostsInTeamForUser(paramsList []*model.SearchParams, userID string, teamID string, page int, perPage int) (*model.PostSearchResults, error) {
tries := 0
for {
result, err := s.PostStore.SearchPostsInTeamForUser(paramsList, userID, teamID, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPostStore) Update(newPost *model.Post, oldPost *model.Post) (*model.Post, error) {
tries := 0
for {
result, err := s.PostStore.Update(newPost, oldPost)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPreferenceStore) CleanupFlagsBatch(limit int64) (int64, error) {
tries := 0
for {
result, err := s.PreferenceStore.CleanupFlagsBatch(limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPreferenceStore) Delete(userID string, category string, name string) error {
tries := 0
for {
err := s.PreferenceStore.Delete(userID, category, name)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPreferenceStore) DeleteCategory(userID string, category string) error {
tries := 0
for {
err := s.PreferenceStore.DeleteCategory(userID, category)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPreferenceStore) DeleteCategoryAndName(category string, name string) error {
tries := 0
for {
err := s.PreferenceStore.DeleteCategoryAndName(category, name)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPreferenceStore) DeleteOrphanedRows(limit int) (int64, error) {
tries := 0
for {
result, err := s.PreferenceStore.DeleteOrphanedRows(limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPreferenceStore) Get(userID string, category string, name string) (*model.Preference, error) {
tries := 0
for {
result, err := s.PreferenceStore.Get(userID, category, name)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPreferenceStore) GetAll(userID string) (model.Preferences, error) {
tries := 0
for {
result, err := s.PreferenceStore.GetAll(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPreferenceStore) GetCategory(userID string, category string) (model.Preferences, error) {
tries := 0
for {
result, err := s.PreferenceStore.GetCategory(userID, category)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerPreferenceStore) PermanentDeleteByUser(userID string) error {
tries := 0
for {
err := s.PreferenceStore.PermanentDeleteByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerPreferenceStore) Save(preferences *model.Preferences) error {
tries := 0
for {
err := s.PreferenceStore.Save(preferences)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerProductNoticesStore) Clear(notices []string) error {
tries := 0
for {
err := s.ProductNoticesStore.Clear(notices)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerProductNoticesStore) ClearOldNotices(currentNotices *model.ProductNotices) error {
tries := 0
for {
err := s.ProductNoticesStore.ClearOldNotices(currentNotices)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerProductNoticesStore) GetViews(userID string) ([]model.ProductNoticeViewState, error) {
tries := 0
for {
result, err := s.ProductNoticesStore.GetViews(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerProductNoticesStore) View(userID string, notices []string) error {
tries := 0
for {
err := s.ProductNoticesStore.View(userID, notices)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerReactionStore) BulkGetForPosts(postIds []string) ([]*model.Reaction, error) {
tries := 0
for {
result, err := s.ReactionStore.BulkGetForPosts(postIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerReactionStore) Delete(reaction *model.Reaction) (*model.Reaction, error) {
tries := 0
for {
result, err := s.ReactionStore.Delete(reaction)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerReactionStore) DeleteAllWithEmojiName(emojiName string) error {
tries := 0
for {
err := s.ReactionStore.DeleteAllWithEmojiName(emojiName)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerReactionStore) DeleteOrphanedRows(limit int) (int64, error) {
tries := 0
for {
result, err := s.ReactionStore.DeleteOrphanedRows(limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerReactionStore) GetForPost(postID string, allowFromCache bool) ([]*model.Reaction, error) {
tries := 0
for {
result, err := s.ReactionStore.GetForPost(postID, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerReactionStore) GetForPostSince(postId string, since int64, excludeRemoteId string, inclDeleted bool) ([]*model.Reaction, error) {
tries := 0
for {
result, err := s.ReactionStore.GetForPostSince(postId, since, excludeRemoteId, inclDeleted)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerReactionStore) PermanentDeleteBatch(endTime int64, limit int64) (int64, error) {
tries := 0
for {
result, err := s.ReactionStore.PermanentDeleteBatch(endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerReactionStore) Save(reaction *model.Reaction) (*model.Reaction, error) {
tries := 0
for {
result, err := s.ReactionStore.Save(reaction)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRemoteClusterStore) Delete(remoteClusterId string) (bool, error) {
tries := 0
for {
result, err := s.RemoteClusterStore.Delete(remoteClusterId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRemoteClusterStore) Get(remoteClusterId string) (*model.RemoteCluster, error) {
tries := 0
for {
result, err := s.RemoteClusterStore.Get(remoteClusterId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRemoteClusterStore) GetAll(filter model.RemoteClusterQueryFilter) ([]*model.RemoteCluster, error) {
tries := 0
for {
result, err := s.RemoteClusterStore.GetAll(filter)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRemoteClusterStore) Save(rc *model.RemoteCluster) (*model.RemoteCluster, error) {
tries := 0
for {
result, err := s.RemoteClusterStore.Save(rc)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRemoteClusterStore) SetLastPingAt(remoteClusterId string) error {
tries := 0
for {
err := s.RemoteClusterStore.SetLastPingAt(remoteClusterId)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerRemoteClusterStore) Update(rc *model.RemoteCluster) (*model.RemoteCluster, error) {
tries := 0
for {
result, err := s.RemoteClusterStore.Update(rc)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRemoteClusterStore) UpdateTopics(remoteClusterId string, topics string) (*model.RemoteCluster, error) {
tries := 0
for {
result, err := s.RemoteClusterStore.UpdateTopics(remoteClusterId, topics)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) AddChannels(policyId string, channelIds []string) error {
tries := 0
for {
err := s.RetentionPolicyStore.AddChannels(policyId, channelIds)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerRetentionPolicyStore) AddTeams(policyId string, teamIds []string) error {
tries := 0
for {
err := s.RetentionPolicyStore.AddTeams(policyId, teamIds)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerRetentionPolicyStore) Delete(id string) error {
tries := 0
for {
err := s.RetentionPolicyStore.Delete(id)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerRetentionPolicyStore) DeleteOrphanedRows(limit int) (int64, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.DeleteOrphanedRows(limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) Get(id string) (*model.RetentionPolicyWithTeamAndChannelCounts, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetAll(offset int, limit int) ([]*model.RetentionPolicyWithTeamAndChannelCounts, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetAll(offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetChannelPoliciesCountForUser(userID string) (int64, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetChannelPoliciesCountForUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetChannelPoliciesForUser(userID string, offset int, limit int) ([]*model.RetentionPolicyForChannel, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetChannelPoliciesForUser(userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetChannels(policyId string, offset int, limit int) (model.ChannelListWithTeamData, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetChannels(policyId, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetChannelsCount(policyId string) (int64, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetChannelsCount(policyId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetCount() (int64, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetTeamPoliciesCountForUser(userID string) (int64, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetTeamPoliciesCountForUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetTeamPoliciesForUser(userID string, offset int, limit int) ([]*model.RetentionPolicyForTeam, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetTeamPoliciesForUser(userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetTeams(policyId string, offset int, limit int) ([]*model.Team, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetTeams(policyId, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) GetTeamsCount(policyId string) (int64, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.GetTeamsCount(policyId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) Patch(patch *model.RetentionPolicyWithTeamAndChannelIDs) (*model.RetentionPolicyWithTeamAndChannelCounts, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.Patch(patch)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRetentionPolicyStore) RemoveChannels(policyId string, channelIds []string) error {
tries := 0
for {
err := s.RetentionPolicyStore.RemoveChannels(policyId, channelIds)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerRetentionPolicyStore) RemoveTeams(policyId string, teamIds []string) error {
tries := 0
for {
err := s.RetentionPolicyStore.RemoveTeams(policyId, teamIds)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerRetentionPolicyStore) Save(policy *model.RetentionPolicyWithTeamAndChannelIDs) (*model.RetentionPolicyWithTeamAndChannelCounts, error) {
tries := 0
for {
result, err := s.RetentionPolicyStore.Save(policy)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) AllChannelSchemeRoles() ([]*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.AllChannelSchemeRoles()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) ChannelHigherScopedPermissions(roleNames []string) (map[string]*model.RolePermissions, error) {
tries := 0
for {
result, err := s.RoleStore.ChannelHigherScopedPermissions(roleNames)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) ChannelRolesUnderTeamRole(roleName string) ([]*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.ChannelRolesUnderTeamRole(roleName)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) Delete(roleID string) (*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.Delete(roleID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) Get(roleID string) (*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.Get(roleID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) GetAll() ([]*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.GetAll()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) GetByName(ctx context.Context, name string) (*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.GetByName(ctx, name)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) GetByNames(names []string) ([]*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.GetByNames(names)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerRoleStore) PermanentDeleteAll() error {
tries := 0
for {
err := s.RoleStore.PermanentDeleteAll()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerRoleStore) Save(role *model.Role) (*model.Role, error) {
tries := 0
for {
result, err := s.RoleStore.Save(role)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSchemeStore) CountByScope(scope string) (int64, error) {
tries := 0
for {
result, err := s.SchemeStore.CountByScope(scope)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSchemeStore) CountWithoutPermission(scope string, permissionID string, roleScope model.RoleScope, roleType model.RoleType) (int64, error) {
tries := 0
for {
result, err := s.SchemeStore.CountWithoutPermission(scope, permissionID, roleScope, roleType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSchemeStore) Delete(schemeID string) (*model.Scheme, error) {
tries := 0
for {
result, err := s.SchemeStore.Delete(schemeID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSchemeStore) Get(schemeID string) (*model.Scheme, error) {
tries := 0
for {
result, err := s.SchemeStore.Get(schemeID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSchemeStore) GetAllPage(scope string, offset int, limit int) ([]*model.Scheme, error) {
tries := 0
for {
result, err := s.SchemeStore.GetAllPage(scope, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSchemeStore) GetByName(schemeName string) (*model.Scheme, error) {
tries := 0
for {
result, err := s.SchemeStore.GetByName(schemeName)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSchemeStore) PermanentDeleteAll() error {
tries := 0
for {
err := s.SchemeStore.PermanentDeleteAll()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSchemeStore) Save(scheme *model.Scheme) (*model.Scheme, error) {
tries := 0
for {
result, err := s.SchemeStore.Save(scheme)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) AnalyticsSessionCount() (int64, error) {
tries := 0
for {
result, err := s.SessionStore.AnalyticsSessionCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) Cleanup(expiryTime int64, batchSize int64) {
s.SessionStore.Cleanup(expiryTime, batchSize)
}
func (s *RetryLayerSessionStore) Get(ctx context.Context, sessionIDOrToken string) (*model.Session, error) {
tries := 0
for {
result, err := s.SessionStore.Get(ctx, sessionIDOrToken)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) GetSessions(userID string) ([]*model.Session, error) {
tries := 0
for {
result, err := s.SessionStore.GetSessions(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) GetSessionsExpired(thresholdMillis int64, mobileOnly bool, unnotifiedOnly bool) ([]*model.Session, error) {
tries := 0
for {
result, err := s.SessionStore.GetSessionsExpired(thresholdMillis, mobileOnly, unnotifiedOnly)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) GetSessionsWithActiveDeviceIds(userID string) ([]*model.Session, error) {
tries := 0
for {
result, err := s.SessionStore.GetSessionsWithActiveDeviceIds(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) PermanentDeleteSessionsByUser(teamID string) error {
tries := 0
for {
err := s.SessionStore.PermanentDeleteSessionsByUser(teamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSessionStore) Remove(sessionIDOrToken string) error {
tries := 0
for {
err := s.SessionStore.Remove(sessionIDOrToken)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSessionStore) RemoveAllSessions() error {
tries := 0
for {
err := s.SessionStore.RemoveAllSessions()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSessionStore) Save(session *model.Session) (*model.Session, error) {
tries := 0
for {
result, err := s.SessionStore.Save(session)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) UpdateDeviceId(id string, deviceID string, expiresAt int64) (string, error) {
tries := 0
for {
result, err := s.SessionStore.UpdateDeviceId(id, deviceID, expiresAt)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSessionStore) UpdateExpiredNotify(sessionid string, notified bool) error {
tries := 0
for {
err := s.SessionStore.UpdateExpiredNotify(sessionid, notified)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSessionStore) UpdateExpiresAt(sessionID string, time int64) error {
tries := 0
for {
err := s.SessionStore.UpdateExpiresAt(sessionID, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSessionStore) UpdateLastActivityAt(sessionID string, time int64) error {
tries := 0
for {
err := s.SessionStore.UpdateLastActivityAt(sessionID, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSessionStore) UpdateProps(session *model.Session) error {
tries := 0
for {
err := s.SessionStore.UpdateProps(session)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSessionStore) UpdateRoles(userID string, roles string) (string, error) {
tries := 0
for {
result, err := s.SessionStore.UpdateRoles(userID, roles)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) Delete(channelId string) (bool, error) {
tries := 0
for {
result, err := s.SharedChannelStore.Delete(channelId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) DeleteRemote(remoteId string) (bool, error) {
tries := 0
for {
result, err := s.SharedChannelStore.DeleteRemote(remoteId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) Get(channelId string) (*model.SharedChannel, error) {
tries := 0
for {
result, err := s.SharedChannelStore.Get(channelId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetAll(offset int, limit int, opts model.SharedChannelFilterOpts) ([]*model.SharedChannel, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetAll(offset, limit, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetAllCount(opts model.SharedChannelFilterOpts) (int64, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetAllCount(opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetAttachment(fileId string, remoteId string) (*model.SharedChannelAttachment, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetAttachment(fileId, remoteId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetRemote(id string) (*model.SharedChannelRemote, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetRemote(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetRemoteByIds(channelId string, remoteId string) (*model.SharedChannelRemote, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetRemoteByIds(channelId, remoteId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetRemoteForUser(remoteId string, userId string) (*model.RemoteCluster, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetRemoteForUser(remoteId, userId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetRemotes(opts model.SharedChannelRemoteFilterOpts) ([]*model.SharedChannelRemote, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetRemotes(opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetRemotesStatus(channelId string) ([]*model.SharedChannelRemoteStatus, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetRemotesStatus(channelId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetSingleUser(userID string, channelID string, remoteID string) (*model.SharedChannelUser, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetSingleUser(userID, channelID, remoteID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetUsersForSync(filter model.GetUsersForSyncFilter) ([]*model.User, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetUsersForSync(filter)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) GetUsersForUser(userID string) ([]*model.SharedChannelUser, error) {
tries := 0
for {
result, err := s.SharedChannelStore.GetUsersForUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) HasChannel(channelID string) (bool, error) {
tries := 0
for {
result, err := s.SharedChannelStore.HasChannel(channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) HasRemote(channelID string, remoteId string) (bool, error) {
tries := 0
for {
result, err := s.SharedChannelStore.HasRemote(channelID, remoteId)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) Save(sc *model.SharedChannel) (*model.SharedChannel, error) {
tries := 0
for {
result, err := s.SharedChannelStore.Save(sc)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) SaveAttachment(remote *model.SharedChannelAttachment) (*model.SharedChannelAttachment, error) {
tries := 0
for {
result, err := s.SharedChannelStore.SaveAttachment(remote)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) SaveRemote(remote *model.SharedChannelRemote) (*model.SharedChannelRemote, error) {
tries := 0
for {
result, err := s.SharedChannelStore.SaveRemote(remote)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) SaveUser(remote *model.SharedChannelUser) (*model.SharedChannelUser, error) {
tries := 0
for {
result, err := s.SharedChannelStore.SaveUser(remote)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) Update(sc *model.SharedChannel) (*model.SharedChannel, error) {
tries := 0
for {
result, err := s.SharedChannelStore.Update(sc)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) UpdateAttachmentLastSyncAt(id string, syncTime int64) error {
tries := 0
for {
err := s.SharedChannelStore.UpdateAttachmentLastSyncAt(id, syncTime)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSharedChannelStore) UpdateRemote(remote *model.SharedChannelRemote) (*model.SharedChannelRemote, error) {
tries := 0
for {
result, err := s.SharedChannelStore.UpdateRemote(remote)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSharedChannelStore) UpdateRemoteCursor(id string, cursor model.GetPostsSinceForSyncCursor) error {
tries := 0
for {
err := s.SharedChannelStore.UpdateRemoteCursor(id, cursor)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSharedChannelStore) UpdateUserLastSyncAt(userID string, channelID string, remoteID string) error {
tries := 0
for {
err := s.SharedChannelStore.UpdateUserLastSyncAt(userID, channelID, remoteID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSharedChannelStore) UpsertAttachment(remote *model.SharedChannelAttachment) (string, error) {
tries := 0
for {
result, err := s.SharedChannelStore.UpsertAttachment(remote)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerStatusStore) Get(userID string) (*model.Status, error) {
tries := 0
for {
result, err := s.StatusStore.Get(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerStatusStore) GetByIds(userIds []string) ([]*model.Status, error) {
tries := 0
for {
result, err := s.StatusStore.GetByIds(userIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerStatusStore) GetTotalActiveUsersCount() (int64, error) {
tries := 0
for {
result, err := s.StatusStore.GetTotalActiveUsersCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerStatusStore) ResetAll() error {
tries := 0
for {
err := s.StatusStore.ResetAll()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerStatusStore) SaveOrUpdate(status *model.Status) error {
tries := 0
for {
err := s.StatusStore.SaveOrUpdate(status)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerStatusStore) UpdateExpiredDNDStatuses() ([]*model.Status, error) {
tries := 0
for {
result, err := s.StatusStore.UpdateExpiredDNDStatuses()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerStatusStore) UpdateLastActivityAt(userID string, lastActivityAt int64) error {
tries := 0
for {
err := s.StatusStore.UpdateLastActivityAt(userID, lastActivityAt)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSystemStore) Get() (model.StringMap, error) {
tries := 0
for {
result, err := s.SystemStore.Get()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSystemStore) GetByName(name string) (*model.System, error) {
tries := 0
for {
result, err := s.SystemStore.GetByName(name)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSystemStore) InsertIfExists(system *model.System) (*model.System, error) {
tries := 0
for {
result, err := s.SystemStore.InsertIfExists(system)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSystemStore) PermanentDeleteByName(name string) (*model.System, error) {
tries := 0
for {
result, err := s.SystemStore.PermanentDeleteByName(name)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerSystemStore) Save(system *model.System) error {
tries := 0
for {
err := s.SystemStore.Save(system)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSystemStore) SaveOrUpdate(system *model.System) error {
tries := 0
for {
err := s.SystemStore.SaveOrUpdate(system)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSystemStore) SaveOrUpdateWithWarnMetricHandling(system *model.System) error {
tries := 0
for {
err := s.SystemStore.SaveOrUpdateWithWarnMetricHandling(system)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerSystemStore) Update(system *model.System) error {
tries := 0
for {
err := s.SystemStore.Update(system)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) AnalyticsGetTeamCountForScheme(schemeID string) (int64, error) {
tries := 0
for {
result, err := s.TeamStore.AnalyticsGetTeamCountForScheme(schemeID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) AnalyticsTeamCount(opts *model.TeamSearch) (int64, error) {
tries := 0
for {
result, err := s.TeamStore.AnalyticsTeamCount(opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) ClearAllCustomRoleAssignments() error {
tries := 0
for {
err := s.TeamStore.ClearAllCustomRoleAssignments()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) ClearCaches() {
s.TeamStore.ClearCaches()
}
func (s *RetryLayerTeamStore) Get(id string) (*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetActiveMemberCount(teamID string, restrictions *model.ViewUsersRestrictions) (int64, error) {
tries := 0
for {
result, err := s.TeamStore.GetActiveMemberCount(teamID, restrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetAll() ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetAll()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetAllForExportAfter(limit int, afterID string) ([]*model.TeamForExport, error) {
tries := 0
for {
result, err := s.TeamStore.GetAllForExportAfter(limit, afterID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetAllPage(offset int, limit int, opts *model.TeamSearch) ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetAllPage(offset, limit, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetAllPrivateTeamListing() ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetAllPrivateTeamListing()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetAllTeamListing() ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetAllTeamListing()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetByInviteId(inviteID string) (*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetByInviteId(inviteID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetByName(name string) (*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetByName(name)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetByNames(name []string) ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetByNames(name)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetChannelUnreadsForAllTeams(excludeTeamID string, userID string) ([]*model.ChannelUnread, error) {
tries := 0
for {
result, err := s.TeamStore.GetChannelUnreadsForAllTeams(excludeTeamID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetChannelUnreadsForTeam(teamID string, userID string) ([]*model.ChannelUnread, error) {
tries := 0
for {
result, err := s.TeamStore.GetChannelUnreadsForTeam(teamID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetCommonTeamIDsForTwoUsers(userID string, otherUserID string) ([]string, error) {
tries := 0
for {
result, err := s.TeamStore.GetCommonTeamIDsForTwoUsers(userID, otherUserID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetMember(ctx context.Context, teamID string, userID string) (*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.GetMember(ctx, teamID, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetMembers(teamID string, offset int, limit int, teamMembersGetOptions *model.TeamMembersGetOptions) ([]*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.GetMembers(teamID, offset, limit, teamMembersGetOptions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetMembersByIds(teamID string, userIds []string, restrictions *model.ViewUsersRestrictions) ([]*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.GetMembersByIds(teamID, userIds, restrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetTeamMembersForExport(userID string) ([]*model.TeamMemberForExport, error) {
tries := 0
for {
result, err := s.TeamStore.GetTeamMembersForExport(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetTeamsByScheme(schemeID string, offset int, limit int) ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetTeamsByScheme(schemeID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetTeamsByUserId(userID string) ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.GetTeamsByUserId(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetTeamsForUser(ctx context.Context, userID string) ([]*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.GetTeamsForUser(ctx, userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetTeamsForUserWithPagination(userID string, page int, perPage int) ([]*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.GetTeamsForUserWithPagination(userID, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetTotalMemberCount(teamID string, restrictions *model.ViewUsersRestrictions) (int64, error) {
tries := 0
for {
result, err := s.TeamStore.GetTotalMemberCount(teamID, restrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GetUserTeamIds(userID string, allowFromCache bool) ([]string, error) {
tries := 0
for {
result, err := s.TeamStore.GetUserTeamIds(userID, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) GroupSyncedTeamCount() (int64, error) {
tries := 0
for {
result, err := s.TeamStore.GroupSyncedTeamCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) InvalidateAllTeamIdsForUser(userID string) {
s.TeamStore.InvalidateAllTeamIdsForUser(userID)
}
func (s *RetryLayerTeamStore) MigrateTeamMembers(fromTeamID string, fromUserID string) (map[string]string, error) {
tries := 0
for {
result, err := s.TeamStore.MigrateTeamMembers(fromTeamID, fromUserID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) PermanentDelete(teamID string) error {
tries := 0
for {
err := s.TeamStore.PermanentDelete(teamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) RemoveAllMembersByTeam(teamID string) error {
tries := 0
for {
err := s.TeamStore.RemoveAllMembersByTeam(teamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) RemoveAllMembersByUser(userID string) error {
tries := 0
for {
err := s.TeamStore.RemoveAllMembersByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) RemoveMember(teamID string, userID string) error {
tries := 0
for {
err := s.TeamStore.RemoveMember(teamID, userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) RemoveMembers(teamID string, userIds []string) error {
tries := 0
for {
err := s.TeamStore.RemoveMembers(teamID, userIds)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) ResetAllTeamSchemes() error {
tries := 0
for {
err := s.TeamStore.ResetAllTeamSchemes()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) Save(team *model.Team) (*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.Save(team)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) SaveMember(member *model.TeamMember, maxUsersPerTeam int) (*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.SaveMember(member, maxUsersPerTeam)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) SaveMultipleMembers(members []*model.TeamMember, maxUsersPerTeam int) ([]*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.SaveMultipleMembers(members, maxUsersPerTeam)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) SearchAll(opts *model.TeamSearch) ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.SearchAll(opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) SearchAllPaged(opts *model.TeamSearch) ([]*model.Team, int64, error) {
tries := 0
for {
result, resultVar1, err := s.TeamStore.SearchAllPaged(opts)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerTeamStore) SearchOpen(opts *model.TeamSearch) ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.SearchOpen(opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) SearchPrivate(opts *model.TeamSearch) ([]*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.SearchPrivate(opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) Update(team *model.Team) (*model.Team, error) {
tries := 0
for {
result, err := s.TeamStore.Update(team)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) UpdateLastTeamIconUpdate(teamID string, curTime int64) error {
tries := 0
for {
err := s.TeamStore.UpdateLastTeamIconUpdate(teamID, curTime)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) UpdateMember(member *model.TeamMember) (*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.UpdateMember(member)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) UpdateMembersRole(teamID string, userIDs []string) error {
tries := 0
for {
err := s.TeamStore.UpdateMembersRole(teamID, userIDs)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTeamStore) UpdateMultipleMembers(members []*model.TeamMember) ([]*model.TeamMember, error) {
tries := 0
for {
result, err := s.TeamStore.UpdateMultipleMembers(members)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTeamStore) UserBelongsToTeams(userID string, teamIds []string) (bool, error) {
tries := 0
for {
result, err := s.TeamStore.UserBelongsToTeams(userID, teamIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTermsOfServiceStore) Get(id string, allowFromCache bool) (*model.TermsOfService, error) {
tries := 0
for {
result, err := s.TermsOfServiceStore.Get(id, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTermsOfServiceStore) GetLatest(allowFromCache bool) (*model.TermsOfService, error) {
tries := 0
for {
result, err := s.TermsOfServiceStore.GetLatest(allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTermsOfServiceStore) Save(termsOfService *model.TermsOfService) (*model.TermsOfService, error) {
tries := 0
for {
result, err := s.TermsOfServiceStore.Save(termsOfService)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) CollectThreadsWithNewerReplies(userId string, channelIds []string, timestamp int64) ([]string, error) {
tries := 0
for {
result, err := s.ThreadStore.CollectThreadsWithNewerReplies(userId, channelIds, timestamp)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) Delete(postID string) error {
tries := 0
for {
err := s.ThreadStore.Delete(postID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerThreadStore) DeleteMembershipForUser(userId string, postID string) error {
tries := 0
for {
err := s.ThreadStore.DeleteMembershipForUser(userId, postID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerThreadStore) DeleteOrphanedRows(limit int) (int64, error) {
tries := 0
for {
result, err := s.ThreadStore.DeleteOrphanedRows(limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) Get(id string) (*model.Thread, error) {
tries := 0
for {
result, err := s.ThreadStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) GetMembershipForUser(userId string, postID string) (*model.ThreadMembership, error) {
tries := 0
for {
result, err := s.ThreadStore.GetMembershipForUser(userId, postID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) GetMembershipsForUser(userId string, teamID string) ([]*model.ThreadMembership, error) {
tries := 0
for {
result, err := s.ThreadStore.GetMembershipsForUser(userId, teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) GetPosts(threadID string, since int64) ([]*model.Post, error) {
tries := 0
for {
result, err := s.ThreadStore.GetPosts(threadID, since)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) GetThreadFollowers(threadID string) ([]string, error) {
tries := 0
for {
result, err := s.ThreadStore.GetThreadFollowers(threadID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) GetThreadForUser(teamID string, threadMembership *model.ThreadMembership, extended bool) (*model.ThreadResponse, error) {
tries := 0
for {
result, err := s.ThreadStore.GetThreadForUser(teamID, threadMembership, extended)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) GetThreadsForUser(userId string, teamID string, opts model.GetUserThreadsOpts) (*model.Threads, error) {
tries := 0
for {
result, err := s.ThreadStore.GetThreadsForUser(userId, teamID, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) MaintainMembership(userID string, postID string, opts store.ThreadMembershipOpts) (*model.ThreadMembership, error) {
tries := 0
for {
result, err := s.ThreadStore.MaintainMembership(userID, postID, opts)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) MarkAllAsRead(userID string, teamID string) error {
tries := 0
for {
err := s.ThreadStore.MarkAllAsRead(userID, teamID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerThreadStore) MarkAllAsReadInChannels(userID string, channelIDs []string) error {
tries := 0
for {
err := s.ThreadStore.MarkAllAsReadInChannels(userID, channelIDs)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerThreadStore) MarkAsRead(userID string, threadID string, timestamp int64) error {
tries := 0
for {
err := s.ThreadStore.MarkAsRead(userID, threadID, timestamp)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerThreadStore) PermanentDeleteBatchForRetentionPolicies(now int64, globalPolicyEndTime int64, limit int64, cursor model.RetentionPolicyCursor) (int64, model.RetentionPolicyCursor, error) {
tries := 0
for {
result, resultVar1, err := s.ThreadStore.PermanentDeleteBatchForRetentionPolicies(now, globalPolicyEndTime, limit, cursor)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerThreadStore) PermanentDeleteBatchThreadMembershipsForRetentionPolicies(now int64, globalPolicyEndTime int64, limit int64, cursor model.RetentionPolicyCursor) (int64, model.RetentionPolicyCursor, error) {
tries := 0
for {
result, resultVar1, err := s.ThreadStore.PermanentDeleteBatchThreadMembershipsForRetentionPolicies(now, globalPolicyEndTime, limit, cursor)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerThreadStore) Save(thread *model.Thread) (*model.Thread, error) {
tries := 0
for {
result, err := s.ThreadStore.Save(thread)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) SaveMembership(membership *model.ThreadMembership) (*model.ThreadMembership, error) {
tries := 0
for {
result, err := s.ThreadStore.SaveMembership(membership)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) SaveMultiple(thread []*model.Thread) ([]*model.Thread, int, error) {
tries := 0
for {
result, resultVar1, err := s.ThreadStore.SaveMultiple(thread)
if err == nil {
return result, resultVar1, nil
}
if !isRepeatableError(err) {
return result, resultVar1, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, resultVar1, err
}
}
}
func (s *RetryLayerThreadStore) Update(thread *model.Thread) (*model.Thread, error) {
tries := 0
for {
result, err := s.ThreadStore.Update(thread)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) UpdateMembership(membership *model.ThreadMembership) (*model.ThreadMembership, error) {
tries := 0
for {
result, err := s.ThreadStore.UpdateMembership(membership)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerThreadStore) UpdateUnreadsByChannel(userId string, changedThreads []string, timestamp int64, updateViewedTimestamp bool) error {
tries := 0
for {
err := s.ThreadStore.UpdateUnreadsByChannel(userId, changedThreads, timestamp, updateViewedTimestamp)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTokenStore) Cleanup() {
s.TokenStore.Cleanup()
}
func (s *RetryLayerTokenStore) Delete(token string) error {
tries := 0
for {
err := s.TokenStore.Delete(token)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTokenStore) GetAllTokensByType(tokenType string) ([]*model.Token, error) {
tries := 0
for {
result, err := s.TokenStore.GetAllTokensByType(tokenType)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTokenStore) GetByToken(token string) (*model.Token, error) {
tries := 0
for {
result, err := s.TokenStore.GetByToken(token)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerTokenStore) RemoveAllTokensByType(tokenType string) error {
tries := 0
for {
err := s.TokenStore.RemoveAllTokensByType(tokenType)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerTokenStore) Save(recovery *model.Token) error {
tries := 0
for {
err := s.TokenStore.Save(recovery)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUploadSessionStore) Delete(id string) error {
tries := 0
for {
err := s.UploadSessionStore.Delete(id)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUploadSessionStore) Get(id string) (*model.UploadSession, error) {
tries := 0
for {
result, err := s.UploadSessionStore.Get(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUploadSessionStore) GetForUser(userID string) ([]*model.UploadSession, error) {
tries := 0
for {
result, err := s.UploadSessionStore.GetForUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUploadSessionStore) Save(session *model.UploadSession) (*model.UploadSession, error) {
tries := 0
for {
result, err := s.UploadSessionStore.Save(session)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUploadSessionStore) Update(session *model.UploadSession) error {
tries := 0
for {
err := s.UploadSessionStore.Update(session)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) AnalyticsActiveCount(time int64, options model.UserCountOptions) (int64, error) {
tries := 0
for {
result, err := s.UserStore.AnalyticsActiveCount(time, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) AnalyticsActiveCountForPeriod(startTime int64, endTime int64, options model.UserCountOptions) (int64, error) {
tries := 0
for {
result, err := s.UserStore.AnalyticsActiveCountForPeriod(startTime, endTime, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) AnalyticsGetExternalUsers(hostDomain string) (bool, error) {
tries := 0
for {
result, err := s.UserStore.AnalyticsGetExternalUsers(hostDomain)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) AnalyticsGetGuestCount() (int64, error) {
tries := 0
for {
result, err := s.UserStore.AnalyticsGetGuestCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) AnalyticsGetInactiveUsersCount() (int64, error) {
tries := 0
for {
result, err := s.UserStore.AnalyticsGetInactiveUsersCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) AnalyticsGetSystemAdminCount() (int64, error) {
tries := 0
for {
result, err := s.UserStore.AnalyticsGetSystemAdminCount()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) AutocompleteUsersInChannel(teamID string, channelID string, term string, options *model.UserSearchOptions) (*model.UserAutocompleteInChannel, error) {
tries := 0
for {
result, err := s.UserStore.AutocompleteUsersInChannel(teamID, channelID, term, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) ClearAllCustomRoleAssignments() error {
tries := 0
for {
err := s.UserStore.ClearAllCustomRoleAssignments()
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) ClearCaches() {
s.UserStore.ClearCaches()
}
func (s *RetryLayerUserStore) Count(options model.UserCountOptions) (int64, error) {
tries := 0
for {
result, err := s.UserStore.Count(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) DeactivateGuests() ([]string, error) {
tries := 0
for {
result, err := s.UserStore.DeactivateGuests()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) DemoteUserToGuest(userID string) (*model.User, error) {
tries := 0
for {
result, err := s.UserStore.DemoteUserToGuest(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) Get(ctx context.Context, id string) (*model.User, error) {
tries := 0
for {
result, err := s.UserStore.Get(ctx, id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetAll() ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetAll()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetAllAfter(limit int, afterID string) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetAllAfter(limit, afterID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetAllNotInAuthService(authServices []string) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetAllNotInAuthService(authServices)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetAllProfiles(options *model.UserGetOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetAllProfiles(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetAllProfilesInChannel(ctx context.Context, channelID string, allowFromCache bool) (map[string]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetAllProfilesInChannel(ctx, channelID, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetAllUsingAuthService(authService string) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetAllUsingAuthService(authService)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetAnyUnreadPostCountForChannel(userID string, channelID string) (int64, error) {
tries := 0
for {
result, err := s.UserStore.GetAnyUnreadPostCountForChannel(userID, channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetByAuth(authData *string, authService string) (*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetByAuth(authData, authService)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetByEmail(email string) (*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetByEmail(email)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetByUsername(username string) (*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetByUsername(username)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetChannelGroupUsers(channelID string) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetChannelGroupUsers(channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetEtagForAllProfiles() string {
return s.UserStore.GetEtagForAllProfiles()
}
func (s *RetryLayerUserStore) GetEtagForProfiles(teamID string) string {
return s.UserStore.GetEtagForProfiles(teamID)
}
func (s *RetryLayerUserStore) GetEtagForProfilesNotInTeam(teamID string) string {
return s.UserStore.GetEtagForProfilesNotInTeam(teamID)
}
func (s *RetryLayerUserStore) GetForLogin(loginID string, allowSignInWithUsername bool, allowSignInWithEmail bool) (*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetForLogin(loginID, allowSignInWithUsername, allowSignInWithEmail)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetKnownUsers(userID string) ([]string, error) {
tries := 0
for {
result, err := s.UserStore.GetKnownUsers(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetMany(ctx context.Context, ids []string) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetMany(ctx, ids)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetNewUsersForTeam(teamID string, offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetNewUsersForTeam(teamID, offset, limit, viewRestrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfileByGroupChannelIdsForUser(userID string, channelIds []string) (map[string][]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfileByGroupChannelIdsForUser(userID, channelIds)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfileByIds(ctx context.Context, userIds []string, options *store.UserGetByIdsOpts, allowFromCache bool) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfileByIds(ctx, userIds, options, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfiles(options *model.UserGetOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfiles(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfilesByUsernames(usernames []string, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfilesByUsernames(usernames, viewRestrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfilesInChannel(options *model.UserGetOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfilesInChannel(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfilesInChannelByStatus(options *model.UserGetOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfilesInChannelByStatus(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfilesNotInChannel(teamID string, channelId string, groupConstrained bool, offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfilesNotInChannel(teamID, channelId, groupConstrained, offset, limit, viewRestrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfilesNotInTeam(teamID string, groupConstrained bool, offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfilesNotInTeam(teamID, groupConstrained, offset, limit, viewRestrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetProfilesWithoutTeam(options *model.UserGetOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetProfilesWithoutTeam(options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetRecentlyActiveUsersForTeam(teamID string, offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetRecentlyActiveUsersForTeam(teamID, offset, limit, viewRestrictions)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetSystemAdminProfiles() (map[string]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetSystemAdminProfiles()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetTeamGroupUsers(teamID string) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.GetTeamGroupUsers(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetUnreadCount(userID string) (int64, error) {
tries := 0
for {
result, err := s.UserStore.GetUnreadCount(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetUnreadCountForChannel(userID string, channelID string) (int64, error) {
tries := 0
for {
result, err := s.UserStore.GetUnreadCountForChannel(userID, channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) GetUsersBatchForIndexing(startTime int64, endTime int64, limit int) ([]*model.UserForIndexing, error) {
tries := 0
for {
result, err := s.UserStore.GetUsersBatchForIndexing(startTime, endTime, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) InferSystemInstallDate() (int64, error) {
tries := 0
for {
result, err := s.UserStore.InferSystemInstallDate()
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) InvalidateProfileCacheForUser(userID string) {
s.UserStore.InvalidateProfileCacheForUser(userID)
}
func (s *RetryLayerUserStore) InvalidateProfilesInChannelCache(channelID string) {
s.UserStore.InvalidateProfilesInChannelCache(channelID)
}
func (s *RetryLayerUserStore) InvalidateProfilesInChannelCacheByUser(userID string) {
s.UserStore.InvalidateProfilesInChannelCacheByUser(userID)
}
func (s *RetryLayerUserStore) PermanentDelete(userID string) error {
tries := 0
for {
err := s.UserStore.PermanentDelete(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) PromoteGuestToUser(userID string) error {
tries := 0
for {
err := s.UserStore.PromoteGuestToUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) ResetAuthDataToEmailForUsers(service string, userIDs []string, includeDeleted bool, dryRun bool) (int, error) {
tries := 0
for {
result, err := s.UserStore.ResetAuthDataToEmailForUsers(service, userIDs, includeDeleted, dryRun)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) ResetLastPictureUpdate(userID string) error {
tries := 0
for {
err := s.UserStore.ResetLastPictureUpdate(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) Save(user *model.User) (*model.User, error) {
tries := 0
for {
result, err := s.UserStore.Save(user)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) Search(teamID string, term string, options *model.UserSearchOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.Search(teamID, term, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) SearchInChannel(channelID string, term string, options *model.UserSearchOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.SearchInChannel(channelID, term, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) SearchInGroup(groupID string, term string, options *model.UserSearchOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.SearchInGroup(groupID, term, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) SearchNotInChannel(teamID string, channelID string, term string, options *model.UserSearchOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.SearchNotInChannel(teamID, channelID, term, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) SearchNotInTeam(notInTeamID string, term string, options *model.UserSearchOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.SearchNotInTeam(notInTeamID, term, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) SearchWithoutTeam(term string, options *model.UserSearchOptions) ([]*model.User, error) {
tries := 0
for {
result, err := s.UserStore.SearchWithoutTeam(term, options)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) Update(user *model.User, allowRoleUpdate bool) (*model.UserUpdate, error) {
tries := 0
for {
result, err := s.UserStore.Update(user, allowRoleUpdate)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) UpdateAuthData(userID string, service string, authData *string, email string, resetMfa bool) (string, error) {
tries := 0
for {
result, err := s.UserStore.UpdateAuthData(userID, service, authData, email, resetMfa)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) UpdateFailedPasswordAttempts(userID string, attempts int) error {
tries := 0
for {
err := s.UserStore.UpdateFailedPasswordAttempts(userID, attempts)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) UpdateLastPictureUpdate(userID string) error {
tries := 0
for {
err := s.UserStore.UpdateLastPictureUpdate(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) UpdateMfaActive(userID string, active bool) error {
tries := 0
for {
err := s.UserStore.UpdateMfaActive(userID, active)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) UpdateMfaSecret(userID string, secret string) error {
tries := 0
for {
err := s.UserStore.UpdateMfaSecret(userID, secret)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) UpdatePassword(userID string, newPassword string) error {
tries := 0
for {
err := s.UserStore.UpdatePassword(userID, newPassword)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserStore) UpdateUpdateAt(userID string) (int64, error) {
tries := 0
for {
result, err := s.UserStore.UpdateUpdateAt(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserStore) VerifyEmail(userID string, email string) (string, error) {
tries := 0
for {
result, err := s.UserStore.VerifyEmail(userID, email)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserAccessTokenStore) Delete(tokenID string) error {
tries := 0
for {
err := s.UserAccessTokenStore.Delete(tokenID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserAccessTokenStore) DeleteAllForUser(userID string) error {
tries := 0
for {
err := s.UserAccessTokenStore.DeleteAllForUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserAccessTokenStore) Get(tokenID string) (*model.UserAccessToken, error) {
tries := 0
for {
result, err := s.UserAccessTokenStore.Get(tokenID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserAccessTokenStore) GetAll(offset int, limit int) ([]*model.UserAccessToken, error) {
tries := 0
for {
result, err := s.UserAccessTokenStore.GetAll(offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserAccessTokenStore) GetByToken(tokenString string) (*model.UserAccessToken, error) {
tries := 0
for {
result, err := s.UserAccessTokenStore.GetByToken(tokenString)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserAccessTokenStore) GetByUser(userID string, page int, perPage int) ([]*model.UserAccessToken, error) {
tries := 0
for {
result, err := s.UserAccessTokenStore.GetByUser(userID, page, perPage)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserAccessTokenStore) Save(token *model.UserAccessToken) (*model.UserAccessToken, error) {
tries := 0
for {
result, err := s.UserAccessTokenStore.Save(token)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserAccessTokenStore) Search(term string) ([]*model.UserAccessToken, error) {
tries := 0
for {
result, err := s.UserAccessTokenStore.Search(term)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserAccessTokenStore) UpdateTokenDisable(tokenID string) error {
tries := 0
for {
err := s.UserAccessTokenStore.UpdateTokenDisable(tokenID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserAccessTokenStore) UpdateTokenEnable(tokenID string) error {
tries := 0
for {
err := s.UserAccessTokenStore.UpdateTokenEnable(tokenID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserTermsOfServiceStore) Delete(userID string, termsOfServiceId string) error {
tries := 0
for {
err := s.UserTermsOfServiceStore.Delete(userID, termsOfServiceId)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerUserTermsOfServiceStore) GetByUser(userID string) (*model.UserTermsOfService, error) {
tries := 0
for {
result, err := s.UserTermsOfServiceStore.GetByUser(userID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerUserTermsOfServiceStore) Save(userTermsOfService *model.UserTermsOfService) (*model.UserTermsOfService, error) {
tries := 0
for {
result, err := s.UserTermsOfServiceStore.Save(userTermsOfService)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) AnalyticsIncomingCount(teamID string) (int64, error) {
tries := 0
for {
result, err := s.WebhookStore.AnalyticsIncomingCount(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) AnalyticsOutgoingCount(teamID string) (int64, error) {
tries := 0
for {
result, err := s.WebhookStore.AnalyticsOutgoingCount(teamID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) ClearCaches() {
s.WebhookStore.ClearCaches()
}
func (s *RetryLayerWebhookStore) DeleteIncoming(webhookID string, time int64) error {
tries := 0
for {
err := s.WebhookStore.DeleteIncoming(webhookID, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerWebhookStore) DeleteOutgoing(webhookID string, time int64) error {
tries := 0
for {
err := s.WebhookStore.DeleteOutgoing(webhookID, time)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerWebhookStore) GetIncoming(id string, allowFromCache bool) (*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetIncoming(id, allowFromCache)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetIncomingByChannel(channelID string) ([]*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetIncomingByChannel(channelID)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetIncomingByTeam(teamID string, offset int, limit int) ([]*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetIncomingByTeam(teamID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetIncomingByTeamByUser(teamID string, userID string, offset int, limit int) ([]*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetIncomingByTeamByUser(teamID, userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetIncomingList(offset int, limit int) ([]*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetIncomingList(offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetIncomingListByUser(userID string, offset int, limit int) ([]*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetIncomingListByUser(userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetOutgoing(id string) (*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetOutgoing(id)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetOutgoingByChannel(channelID string, offset int, limit int) ([]*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetOutgoingByChannel(channelID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetOutgoingByChannelByUser(channelID string, userID string, offset int, limit int) ([]*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetOutgoingByChannelByUser(channelID, userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetOutgoingByTeam(teamID string, offset int, limit int) ([]*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetOutgoingByTeam(teamID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetOutgoingByTeamByUser(teamID string, userID string, offset int, limit int) ([]*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetOutgoingByTeamByUser(teamID, userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetOutgoingList(offset int, limit int) ([]*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetOutgoingList(offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) GetOutgoingListByUser(userID string, offset int, limit int) ([]*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.GetOutgoingListByUser(userID, offset, limit)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) InvalidateWebhookCache(webhook string) {
s.WebhookStore.InvalidateWebhookCache(webhook)
}
func (s *RetryLayerWebhookStore) PermanentDeleteIncomingByChannel(channelID string) error {
tries := 0
for {
err := s.WebhookStore.PermanentDeleteIncomingByChannel(channelID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerWebhookStore) PermanentDeleteIncomingByUser(userID string) error {
tries := 0
for {
err := s.WebhookStore.PermanentDeleteIncomingByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerWebhookStore) PermanentDeleteOutgoingByChannel(channelID string) error {
tries := 0
for {
err := s.WebhookStore.PermanentDeleteOutgoingByChannel(channelID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerWebhookStore) PermanentDeleteOutgoingByUser(userID string) error {
tries := 0
for {
err := s.WebhookStore.PermanentDeleteOutgoingByUser(userID)
if err == nil {
return nil
}
if !isRepeatableError(err) {
return err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return err
}
}
}
func (s *RetryLayerWebhookStore) SaveIncoming(webhook *model.IncomingWebhook) (*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.SaveIncoming(webhook)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) SaveOutgoing(webhook *model.OutgoingWebhook) (*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.SaveOutgoing(webhook)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) UpdateIncoming(webhook *model.IncomingWebhook) (*model.IncomingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.UpdateIncoming(webhook)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayerWebhookStore) UpdateOutgoing(hook *model.OutgoingWebhook) (*model.OutgoingWebhook, error) {
tries := 0
for {
result, err := s.WebhookStore.UpdateOutgoing(hook)
if err == nil {
return result, nil
}
if !isRepeatableError(err) {
return result, err
}
tries++
if tries >= 3 {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
}
}
}
func (s *RetryLayer) Close() {
s.Store.Close()
}
func (s *RetryLayer) DropAllTables() {
s.Store.DropAllTables()
}
func (s *RetryLayer) GetCurrentSchemaVersion() string {
return s.Store.GetCurrentSchemaVersion()
}
func (s *RetryLayer) LockToMaster() {
s.Store.LockToMaster()
}
func (s *RetryLayer) MarkSystemRanUnitTests() {
s.Store.MarkSystemRanUnitTests()
}
func (s *RetryLayer) SetContext(context context.Context) {
s.Store.SetContext(context)
}
func (s *RetryLayer) TotalMasterDbConnections() int {
return s.Store.TotalMasterDbConnections()
}
func (s *RetryLayer) TotalReadDbConnections() int {
return s.Store.TotalReadDbConnections()
}
func (s *RetryLayer) TotalSearchDbConnections() int {
return s.Store.TotalSearchDbConnections()
}
func (s *RetryLayer) UnlockFromMaster() {
s.Store.UnlockFromMaster()
}
func New(childStore store.Store) *RetryLayer {
newStore := RetryLayer{
Store: childStore,
}
newStore.AuditStore = &RetryLayerAuditStore{AuditStore: childStore.Audit(), Root: &newStore}
newStore.BotStore = &RetryLayerBotStore{BotStore: childStore.Bot(), Root: &newStore}
newStore.ChannelStore = &RetryLayerChannelStore{ChannelStore: childStore.Channel(), Root: &newStore}
newStore.ChannelMemberHistoryStore = &RetryLayerChannelMemberHistoryStore{ChannelMemberHistoryStore: childStore.ChannelMemberHistory(), Root: &newStore}
newStore.ClusterDiscoveryStore = &RetryLayerClusterDiscoveryStore{ClusterDiscoveryStore: childStore.ClusterDiscovery(), Root: &newStore}
newStore.CommandStore = &RetryLayerCommandStore{CommandStore: childStore.Command(), Root: &newStore}
newStore.CommandWebhookStore = &RetryLayerCommandWebhookStore{CommandWebhookStore: childStore.CommandWebhook(), Root: &newStore}
newStore.ComplianceStore = &RetryLayerComplianceStore{ComplianceStore: childStore.Compliance(), Root: &newStore}
newStore.EmojiStore = &RetryLayerEmojiStore{EmojiStore: childStore.Emoji(), Root: &newStore}
newStore.FileInfoStore = &RetryLayerFileInfoStore{FileInfoStore: childStore.FileInfo(), Root: &newStore}
newStore.GroupStore = &RetryLayerGroupStore{GroupStore: childStore.Group(), Root: &newStore}
newStore.JobStore = &RetryLayerJobStore{JobStore: childStore.Job(), Root: &newStore}
newStore.LicenseStore = &RetryLayerLicenseStore{LicenseStore: childStore.License(), Root: &newStore}
newStore.LinkMetadataStore = &RetryLayerLinkMetadataStore{LinkMetadataStore: childStore.LinkMetadata(), Root: &newStore}
newStore.OAuthStore = &RetryLayerOAuthStore{OAuthStore: childStore.OAuth(), Root: &newStore}
newStore.PluginStore = &RetryLayerPluginStore{PluginStore: childStore.Plugin(), Root: &newStore}
newStore.PostStore = &RetryLayerPostStore{PostStore: childStore.Post(), Root: &newStore}
newStore.PreferenceStore = &RetryLayerPreferenceStore{PreferenceStore: childStore.Preference(), Root: &newStore}
newStore.ProductNoticesStore = &RetryLayerProductNoticesStore{ProductNoticesStore: childStore.ProductNotices(), Root: &newStore}
newStore.ReactionStore = &RetryLayerReactionStore{ReactionStore: childStore.Reaction(), Root: &newStore}
newStore.RemoteClusterStore = &RetryLayerRemoteClusterStore{RemoteClusterStore: childStore.RemoteCluster(), Root: &newStore}
newStore.RetentionPolicyStore = &RetryLayerRetentionPolicyStore{RetentionPolicyStore: childStore.RetentionPolicy(), Root: &newStore}
newStore.RoleStore = &RetryLayerRoleStore{RoleStore: childStore.Role(), Root: &newStore}
newStore.SchemeStore = &RetryLayerSchemeStore{SchemeStore: childStore.Scheme(), Root: &newStore}
newStore.SessionStore = &RetryLayerSessionStore{SessionStore: childStore.Session(), Root: &newStore}
newStore.SharedChannelStore = &RetryLayerSharedChannelStore{SharedChannelStore: childStore.SharedChannel(), Root: &newStore}
newStore.StatusStore = &RetryLayerStatusStore{StatusStore: childStore.Status(), Root: &newStore}
newStore.SystemStore = &RetryLayerSystemStore{SystemStore: childStore.System(), Root: &newStore}
newStore.TeamStore = &RetryLayerTeamStore{TeamStore: childStore.Team(), Root: &newStore}
newStore.TermsOfServiceStore = &RetryLayerTermsOfServiceStore{TermsOfServiceStore: childStore.TermsOfService(), Root: &newStore}
newStore.ThreadStore = &RetryLayerThreadStore{ThreadStore: childStore.Thread(), Root: &newStore}
newStore.TokenStore = &RetryLayerTokenStore{TokenStore: childStore.Token(), Root: &newStore}
newStore.UploadSessionStore = &RetryLayerUploadSessionStore{UploadSessionStore: childStore.UploadSession(), Root: &newStore}
newStore.UserStore = &RetryLayerUserStore{UserStore: childStore.User(), Root: &newStore}
newStore.UserAccessTokenStore = &RetryLayerUserAccessTokenStore{UserAccessTokenStore: childStore.UserAccessToken(), Root: &newStore}
newStore.UserTermsOfServiceStore = &RetryLayerUserTermsOfServiceStore{UserTermsOfServiceStore: childStore.UserTermsOfService(), Root: &newStore}
newStore.WebhookStore = &RetryLayerWebhookStore{WebhookStore: childStore.Webhook(), Root: &newStore}
return &newStore
}
| {
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
return result, err
} |
transform_selection_groupby_test.go | // Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package transforms
import (
"context"
"testing"
"columns"
"datablocks"
"datatypes"
"mocks"
"planners"
"processors"
"github.com/stretchr/testify/assert"
)
func TestSelectionGroupByTransfrom(t *testing.T) {
tests := []struct {
name string
plan planners.IPlan
source []interface{}
expect *datablocks.DataBlock
}{
{
name: "simple",
plan: planners.NewSelectionPlan(
planners.NewMapPlan(
planners.NewVariablePlan("name"),
planners.NewUnaryExpressionPlan("sum", planners.NewVariablePlan("age")),
),
planners.NewMapPlan(
planners.NewVariablePlan("name"),
),
),
source: mocks.NewSourceFromSlice(
mocks.NewBlockFromSlice(
[]*columns.Column{
{Name: "name", DataType: datatypes.NewStringDataType()},
{Name: "age", DataType: datatypes.NewInt32DataType()},
},
[]interface{}{"x", 11},
[]interface{}{"z", 13},
[]interface{}{"y", 12},
[]interface{}{"y", 13},
),
mocks.NewBlockFromSlice(
[]*columns.Column{
{Name: "name", DataType: datatypes.NewStringDataType()},
{Name: "age", DataType: datatypes.NewInt32DataType()},
},
[]interface{}{"x", 11},
[]interface{}{"y", 13},
),
),
expect: mocks.NewBlockFromSlice(
[]*columns.Column{
{Name: "name", DataType: datatypes.NewStringDataType()},
{Name: "SUM(age)", DataType: datatypes.NewInt32DataType()},
},
[]interface{}{"x", 22},
[]interface{}{"y", 38},
[]interface{}{"z", 13},
),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
mock, cleanup := mocks.NewMock()
defer cleanup()
ctx := NewTransformContext(mock.Ctx, mock.Log, mock.Conf)
stream := mocks.NewMockBlockInputStream(test.source)
datasource := NewDataSourceTransform(ctx, stream)
orderby := NewOrderByTransform(ctx,
planners.NewOrderByPlan(
planners.Order{
Expression: planners.NewVariablePlan("name"),
Direction: "asc",
},
))
selection := NewGroupBySelectionTransform(ctx, test.plan.(*planners.SelectionPlan))
sink := processors.NewSink("sink")
pipeline := processors.NewPipeline(context.Background())
pipeline.Add(datasource)
pipeline.Add(selection)
pipeline.Add(orderby)
pipeline.Add(sink)
pipeline.Run()
var actual *datablocks.DataBlock
err := pipeline.Wait(func(x interface{}) error {
y := x.(*datablocks.DataBlock)
if actual == nil {
actual = y
} else { | }
return nil
})
assert.Nil(t, err)
expect := test.expect
assert.True(t, mocks.DataBlockEqual(actual, expect))
stats := selection.(*GroupBySelectionTransform).Stats()
assert.Equal(t, stats.TotalRowsToRead.Get(), int64(6))
})
}
} | actual.Append(y) |
bidiagonal.rs | #[cfg(feature = "serde-serialize")]
use serde::{Deserialize, Serialize};
use alga::general::ComplexField;
use crate::allocator::Allocator;
use crate::base::{DefaultAllocator, Matrix, MatrixMN, MatrixN, Unit, VectorN};
use crate::dimension::{Dim, DimDiff, DimMin, DimMinimum, DimSub, U1};
use crate::storage::Storage;
use crate::geometry::Reflection;
use crate::linalg::householder;
/// The bidiagonalization of a general matrix.
#[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
serialize = "DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>> +
Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
MatrixMN<N, R, C>: Serialize,
VectorN<N, DimMinimum<R, C>>: Serialize,
VectorN<N, DimDiff<DimMinimum<R, C>, U1>>: Serialize"
))
)]
#[cfg_attr(
feature = "serde-serialize",
serde(bound(
deserialize = "DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C> +
Allocator<N, DimMinimum<R, C>> +
Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
MatrixMN<N, R, C>: Deserialize<'de>,
VectorN<N, DimMinimum<R, C>>: Deserialize<'de>,
VectorN<N, DimDiff<DimMinimum<R, C>, U1>>: Deserialize<'de>"
))
)]
#[derive(Clone, Debug)]
pub struct Bidiagonal<N: ComplexField, R: DimMin<C>, C: Dim>
where
DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
{
// FIXME: perhaps we should pack the axises into different vectors so that axises for `v_t` are
// contiguous. This prevents some useless copies.
uv: MatrixMN<N, R, C>,
/// The diagonal elements of the decomposed matrix.
diagonal: VectorN<N, DimMinimum<R, C>>,
/// The off-diagonal elements of the decomposed matrix.
off_diagonal: VectorN<N, DimDiff<DimMinimum<R, C>, U1>>,
upper_diagonal: bool,
}
impl<N: ComplexField, R: DimMin<C>, C: Dim> Copy for Bidiagonal<N, R, C>
where
DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
MatrixMN<N, R, C>: Copy,
VectorN<N, DimMinimum<R, C>>: Copy,
VectorN<N, DimDiff<DimMinimum<R, C>, U1>>: Copy,
{}
impl<N: ComplexField, R: DimMin<C>, C: Dim> Bidiagonal<N, R, C>
where
DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, C>
+ Allocator<N, R>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
{
/// Computes the Bidiagonal decomposition using householder reflections.
pub fn new(mut matrix: MatrixMN<N, R, C>) -> Self {
let (nrows, ncols) = matrix.data.shape();
let min_nrows_ncols = nrows.min(ncols);
let dim = min_nrows_ncols.value();
assert!(
dim != 0,
"Cannot compute the bidiagonalization of an empty matrix."
);
let mut diagonal = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) };
let mut off_diagonal =
unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols.sub(U1), U1) };
let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) };
let mut work = unsafe { MatrixMN::new_uninitialized_generic(nrows, U1) };
let upper_diagonal = nrows.value() >= ncols.value();
if upper_diagonal {
for ite in 0..dim - 1 {
householder::clear_column_unchecked(&mut matrix, &mut diagonal[ite], ite, 0, None);
householder::clear_row_unchecked(
&mut matrix,
&mut off_diagonal[ite],
&mut axis_packed,
&mut work,
ite,
1,
);
}
householder::clear_column_unchecked(
&mut matrix,
&mut diagonal[dim - 1],
dim - 1,
0,
None,
);
} else {
for ite in 0..dim - 1 {
householder::clear_row_unchecked(
&mut matrix,
&mut diagonal[ite],
&mut axis_packed,
&mut work,
ite,
0,
);
householder::clear_column_unchecked(
&mut matrix,
&mut off_diagonal[ite],
ite,
1,
None,
);
}
householder::clear_row_unchecked(
&mut matrix,
&mut diagonal[dim - 1],
&mut axis_packed,
&mut work,
dim - 1,
0,
);
}
Bidiagonal {
uv: matrix,
diagonal,
off_diagonal,
upper_diagonal,
}
}
/// Indicates whether this decomposition contains an upper-diagonal matrix.
#[inline]
pub fn is_upper_diagonal(&self) -> bool {
self.upper_diagonal
}
#[inline]
fn axis_shift(&self) -> (usize, usize) {
if self.upper_diagonal {
(0, 1)
} else {
(1, 0)
}
}
/// Unpacks this decomposition into its three matrix factors `(U, D, V^t)`.
///
/// The decomposed matrix `M` is equal to `U * D * V^t`.
#[inline]
pub fn unpack(
self,
) -> (
MatrixMN<N, R, DimMinimum<R, C>>,
MatrixN<N, DimMinimum<R, C>>,
MatrixMN<N, DimMinimum<R, C>, C>,
)
where
DefaultAllocator: Allocator<N, DimMinimum<R, C>, DimMinimum<R, C>>
+ Allocator<N, R, DimMinimum<R, C>>
+ Allocator<N, DimMinimum<R, C>, C>,
{
// FIXME: optimize by calling a reallocator.
(self.u(), self.d(), self.v_t())
}
/// Retrieves the upper trapezoidal submatrix `R` of this decomposition.
#[inline]
pub fn d(&self) -> MatrixN<N, DimMinimum<R, C>>
where
DefaultAllocator: Allocator<N, DimMinimum<R, C>, DimMinimum<R, C>>,
{
let (nrows, ncols) = self.uv.data.shape();
let d = nrows.min(ncols);
let mut res = MatrixN::identity_generic(d, d);
res.set_partial_diagonal(self.diagonal.iter().map(|e| N::from_real(e.modulus())));
let start = self.axis_shift();
res.slice_mut(start, (d.value() - 1, d.value() - 1))
.set_partial_diagonal(self.off_diagonal.iter().map(|e| N::from_real(e.modulus())));
res
}
/// Computes the orthogonal matrix `U` of this `U * D * V` decomposition.
// FIXME: code duplication with householder::assemble_q.
// Except that we are returning a rectangular matrix here.
pub fn u(&self) -> MatrixMN<N, R, DimMinimum<R, C>>
where DefaultAllocator: Allocator<N, R, DimMinimum<R, C>> |
/// Computes the orthogonal matrix `V_t` of this `U * D * V_t` decomposition.
pub fn v_t(&self) -> MatrixMN<N, DimMinimum<R, C>, C>
where DefaultAllocator: Allocator<N, DimMinimum<R, C>, C> {
let (nrows, ncols) = self.uv.data.shape();
let min_nrows_ncols = nrows.min(ncols);
let mut res = Matrix::identity_generic(min_nrows_ncols, ncols);
let mut work = unsafe { MatrixMN::new_uninitialized_generic(min_nrows_ncols, U1) };
let mut axis_packed = unsafe { MatrixMN::new_uninitialized_generic(ncols, U1) };
let shift = self.axis_shift().1;
for i in (0..min_nrows_ncols.value() - shift).rev() {
let axis = self.uv.slice_range(i, i + shift..);
let mut axis_packed = axis_packed.rows_range_mut(i + shift..);
axis_packed.tr_copy_from(&axis);
// FIXME: sometimes, the axis might have a zero magnitude.
let refl = Reflection::new(Unit::new_unchecked(axis_packed), N::zero());
let mut res_rows = res.slice_range_mut(i.., i + shift..);
let sign = if self.upper_diagonal {
self.off_diagonal[i].signum()
} else {
self.diagonal[i].signum()
};
refl.reflect_rows_with_sign(&mut res_rows, &mut work.rows_range_mut(i..), sign);
}
res
}
/// The diagonal part of this decomposed matrix.
pub fn diagonal(&self) -> VectorN<N::RealField, DimMinimum<R, C>>
where DefaultAllocator: Allocator<N::RealField, DimMinimum<R, C>> {
self.diagonal.map(|e| e.modulus())
}
/// The off-diagonal part of this decomposed matrix.
pub fn off_diagonal(&self) -> VectorN<N::RealField, DimDiff<DimMinimum<R, C>, U1>>
where DefaultAllocator: Allocator<N::RealField, DimDiff<DimMinimum<R, C>, U1>> {
self.off_diagonal.map(|e| e.modulus())
}
#[doc(hidden)]
pub fn uv_internal(&self) -> &MatrixMN<N, R, C> {
&self.uv
}
}
// impl<N: ComplexField, D: DimMin<D, Output = D> + DimSub<Dynamic>> Bidiagonal<N, D, D>
// where DefaultAllocator: Allocator<N, D, D> +
// Allocator<N, D> {
// /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
// pub fn solve<R2: Dim, C2: Dim, S2>(&self, b: &Matrix<N, R2, C2, S2>) -> MatrixMN<N, R2, C2>
// where S2: StorageMut<N, R2, C2>,
// ShapeConstraint: SameNumberOfRows<R2, D>,
// DefaultAllocator: Allocator<N, R2, C2> {
// let mut res = b.clone_owned();
// self.solve_mut(&mut res);
// res
// }
//
// /// Solves the linear system `self * x = b`, where `x` is the unknown to be determined.
// pub fn solve_mut<R2: Dim, C2: Dim, S2>(&self, b: &mut Matrix<N, R2, C2, S2>)
// where S2: StorageMut<N, R2, C2>,
// ShapeConstraint: SameNumberOfRows<R2, D> {
//
// assert_eq!(self.uv.nrows(), b.nrows(), "Bidiagonal solve matrix dimension mismatch.");
// assert!(self.uv.is_square(), "Bidiagonal solve: unable to solve a non-square system.");
//
// self.q_tr_mul(b);
// self.solve_upper_triangular_mut(b);
// }
//
// // FIXME: duplicate code from the `solve` module.
// fn solve_upper_triangular_mut<R2: Dim, C2: Dim, S2>(&self, b: &mut Matrix<N, R2, C2, S2>)
// where S2: StorageMut<N, R2, C2>,
// ShapeConstraint: SameNumberOfRows<R2, D> {
//
// let dim = self.uv.nrows();
//
// for k in 0 .. b.ncols() {
// let mut b = b.column_mut(k);
// for i in (0 .. dim).rev() {
// let coeff;
//
// unsafe {
// let diag = *self.diag.vget_unchecked(i);
// coeff = *b.vget_unchecked(i) / diag;
// *b.vget_unchecked_mut(i) = coeff;
// }
//
// b.rows_range_mut(.. i).axpy(-coeff, &self.uv.slice_range(.. i, i), N::one());
// }
// }
// }
//
// /// Computes the inverse of the decomposed matrix.
// pub fn inverse(&self) -> MatrixN<N, D> {
// assert!(self.uv.is_square(), "Bidiagonal inverse: unable to compute the inverse of a non-square matrix.");
//
// // FIXME: is there a less naive method ?
// let (nrows, ncols) = self.uv.data.shape();
// let mut res = MatrixN::identity_generic(nrows, ncols);
// self.solve_mut(&mut res);
// res
// }
//
// // /// Computes the determinant of the decomposed matrix.
// // pub fn determinant(&self) -> N {
// // let dim = self.uv.nrows();
// // assert!(self.uv.is_square(), "Bidiagonal determinant: unable to compute the determinant of a non-square matrix.");
//
// // let mut res = N::one();
// // for i in 0 .. dim {
// // res *= unsafe { *self.diag.vget_unchecked(i) };
// // }
//
// // res self.q_determinant()
// // }
// }
impl<N: ComplexField, R: DimMin<C>, C: Dim, S: Storage<N, R, C>> Matrix<N, R, C, S>
where
DimMinimum<R, C>: DimSub<U1>,
DefaultAllocator: Allocator<N, R, C>
+ Allocator<N, C>
+ Allocator<N, R>
+ Allocator<N, DimMinimum<R, C>>
+ Allocator<N, DimDiff<DimMinimum<R, C>, U1>>,
{
/// Computes the bidiagonalization using householder reflections.
pub fn bidiagonalize(self) -> Bidiagonal<N, R, C> {
Bidiagonal::new(self.into_owned())
}
}
| {
let (nrows, ncols) = self.uv.data.shape();
let mut res = Matrix::identity_generic(nrows, nrows.min(ncols));
let dim = self.diagonal.len();
let shift = self.axis_shift().0;
for i in (0..dim - shift).rev() {
let axis = self.uv.slice_range(i + shift.., i);
// FIXME: sometimes, the axis might have a zero magnitude.
let refl = Reflection::new(Unit::new_unchecked(axis), N::zero());
let mut res_rows = res.slice_range_mut(i + shift.., i..);
let sign = if self.upper_diagonal {
self.diagonal[i].signum()
} else {
self.off_diagonal[i].signum()
};
refl.reflect_with_sign(&mut res_rows, sign);
}
res
} |
parser.go | package main
func Parse() | {
} |
|
custom_models.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class CustomResnetV1(nn.Module):
def __init__(self):
super(CustomResnetV1, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
self.resnet.conv1 = nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(0, 0), bias=False)
self.resnet.fc = nn.Linear(512, 256)
self.bn1a = nn.BatchNorm1d(256)
self.fc11 = nn.Linear(256, 256)
self.fc12 = nn.Linear(256, 256)
self.bn1b = nn.BatchNorm1d(256)
self.fc13 = nn.Linear(256, 256)
self.fc14 = nn.Linear(256, 256)
self.bn1c = nn.BatchNorm1d(256)
self.fc15 = nn.Linear(256, 256)
self.fc16 = nn.Linear(256, 256)
self.fc_down1 = nn.Linear(256, 128)
self.bn2a = nn.BatchNorm1d(128)
self.fc21 = nn.Linear(128, 128)
self.fc22 = nn.Linear(128, 128)
self.bn2b = nn.BatchNorm1d(128)
self.fc23 = nn.Linear(128, 128)
self.fc24 = nn.Linear(128, 128)
self.bn2c = nn.BatchNorm1d(128)
self.fc25 = nn.Linear(128, 128)
self.fc26 = nn.Linear(128, 128)
self.fc_down2 = nn.Linear(128, 64)
self.bn3a = nn.BatchNorm1d(64)
self.fc31 = nn.Linear(64, 64)
self.fc32 = nn.Linear(64, 64)
self.bn3b = nn.BatchNorm1d(64)
self.fc33 = nn.Linear(64, 64)
self.fc34 = nn.Linear(64, 64)
self.bn3c = nn.BatchNorm1d(64)
self.fc35 = nn.Linear(64, 64)
self.fc36 = nn.Linear(64, 64)
self.fc4 = nn.Linear(64, 10)
#self.drop1 = nn.Dropout2d(0.5)
def forward(self, x):
| x_ = F.relu(self.resnet(x))
x = self.bn1a(x_)
x = F.relu(self.fc11(x))
x = F.relu(self.fc12(x))
x_ = torch.add(x, x_)
x = self.bn1b(x_)
x = F.relu(self.fc13(x))
x = F.relu(self.fc14(x))
x_ = torch.add(x, x_)
x = self.bn1c(x_)
x = F.relu(self.fc15(x))
x = F.relu(self.fc16(x))
x_ = self.fc_down1(torch.add(x, x_))
x = self.bn2a(x_)
x = F.relu(self.fc21(x))
x = F.relu(self.fc22(x))
x_ = torch.add(x, x_)
x = self.bn2b(x_)
x = F.relu(self.fc23(x))
x = F.relu(self.fc24(x))
x_ = torch.add(x, x_)
x = self.bn2c(x_)
x = F.relu(self.fc25(x))
x = F.relu(self.fc26(x))
x_ = self.fc_down2(torch.add(x, x_))
x = self.bn3a(x_)
x = F.relu(self.fc31(x))
x = F.relu(self.fc32(x))
x_ = torch.add(x, x_)
x = self.bn3b(x_)
x = F.relu(self.fc33(x))
x = F.relu(self.fc34(x))
x_ = torch.add(x, x_)
x = self.bn3c(x_)
x = F.relu(self.fc35(x))
x = F.relu(self.fc36(x))
x_ = torch.add(x, x_)
x = self.fc4(x_)
return F.log_softmax(x, dim=1) |
|
getAllYs.ts |
export default getAllYs; | import { Point } from "../types";
// given collection of {x, y} points, get all y values
const getAllYs = (pts: Point[]): number[] => pts.map((pt) => pt.y); |
|
grpc_flow_limit.go | package grpc_proxy_middleware
import (
"errors"
"fmt"
"github.com/baxiang/go-gateway/dao"
"github.com/baxiang/go-gateway/pkg"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
"log"
"strings"
)
func | (serviceDetail *dao.ServiceDetail) func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error{
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error{
if serviceDetail.AccessControl.ServiceFlowLimit != 0 {
serviceLimiter, err := pkg.FlowLimiterHandler.GetLimiter(
pkg.FlowServicePrefix+serviceDetail.Info.ServiceName,
float64(serviceDetail.AccessControl.ServiceFlowLimit))
if err != nil {
return err
}
if !serviceLimiter.Allow() {
return errors.New(fmt.Sprintf("service flow limit %v", serviceDetail.AccessControl.ServiceFlowLimit), )
}
}
peerCtx,ok:=peer.FromContext(ss.Context())
if !ok{
return errors.New("peer not found with context")
}
peerAddr:=peerCtx.Addr.String()
addrPos:=strings.LastIndex(peerAddr,":")
clientIP:=peerAddr[0:addrPos]
if serviceDetail.AccessControl.ClientIPFlowLimit > 0 {
clientLimiter, err := pkg.FlowLimiterHandler.GetLimiter(
pkg.FlowServicePrefix+serviceDetail.Info.ServiceName+"_"+clientIP,
float64(serviceDetail.AccessControl.ClientIPFlowLimit))
if err != nil {
return err
}
if !clientLimiter.Allow() {
return errors.New(fmt.Sprintf("%v flow limit %v",clientIP, serviceDetail.AccessControl.ClientIPFlowLimit), )
}
}
if err := handler(srv, ss);err != nil {
log.Printf("GrpcFlowLimitMiddleware failed with error %v\n", err)
return err
}
return nil
}
} | GrpcFlowLimitMiddleware |
gen_type_activitystreams_like.go | // Code generated by astool. DO NOT EDIT.
package typelike
import (
"fmt"
vocab "github.com/go-fed/activity/streams/vocab"
"strings"
)
// Indicates that the actor likes, recommends or endorses the object. The target
// and origin typically have no defined meaning.
//
// Example 22 (https://www.w3.org/TR/activitystreams-vocabulary/#ex20-jsonld):
// {
// "actor": {
// "name": "Sally",
// "type": "Person"
// },
// "object": "http://example.org/notes/1",
// "summary": "Sally liked a note",
// "type": "Like"
// }
type ActivityStreamsLike struct {
ActivityStreamsActor vocab.ActivityStreamsActorProperty
ActivityStreamsAltitude vocab.ActivityStreamsAltitudeProperty
ActivityStreamsAttachment vocab.ActivityStreamsAttachmentProperty
ActivityStreamsAttributedTo vocab.ActivityStreamsAttributedToProperty
ActivityStreamsAudience vocab.ActivityStreamsAudienceProperty
ActivityStreamsBcc vocab.ActivityStreamsBccProperty
ActivityStreamsBto vocab.ActivityStreamsBtoProperty
ActivityStreamsCc vocab.ActivityStreamsCcProperty
ActivityStreamsContent vocab.ActivityStreamsContentProperty
ActivityStreamsContext vocab.ActivityStreamsContextProperty
ActivityStreamsDuration vocab.ActivityStreamsDurationProperty
ActivityStreamsEndTime vocab.ActivityStreamsEndTimeProperty
ActivityStreamsGenerator vocab.ActivityStreamsGeneratorProperty
ActivityStreamsIcon vocab.ActivityStreamsIconProperty
JSONLDId vocab.JSONLDIdProperty
ActivityStreamsImage vocab.ActivityStreamsImageProperty
ActivityStreamsInReplyTo vocab.ActivityStreamsInReplyToProperty
ActivityStreamsInstrument vocab.ActivityStreamsInstrumentProperty
ActivityStreamsLikes vocab.ActivityStreamsLikesProperty
ActivityStreamsLocation vocab.ActivityStreamsLocationProperty
ActivityStreamsMediaType vocab.ActivityStreamsMediaTypeProperty
ActivityStreamsName vocab.ActivityStreamsNameProperty
ActivityStreamsObject vocab.ActivityStreamsObjectProperty
ActivityStreamsOrigin vocab.ActivityStreamsOriginProperty
ActivityStreamsPreview vocab.ActivityStreamsPreviewProperty
ActivityStreamsPublished vocab.ActivityStreamsPublishedProperty
ActivityStreamsReplies vocab.ActivityStreamsRepliesProperty
ActivityStreamsResult vocab.ActivityStreamsResultProperty
ActivityStreamsSensitive vocab.ActivityStreamsSensitiveProperty
ActivityStreamsShares vocab.ActivityStreamsSharesProperty
ActivityStreamsSource vocab.ActivityStreamsSourceProperty
ActivityStreamsStartTime vocab.ActivityStreamsStartTimeProperty
ActivityStreamsSummary vocab.ActivityStreamsSummaryProperty
ActivityStreamsTag vocab.ActivityStreamsTagProperty
ActivityStreamsTarget vocab.ActivityStreamsTargetProperty
ForgeFedTeam vocab.ForgeFedTeamProperty
ForgeFedTicketsTrackedBy vocab.ForgeFedTicketsTrackedByProperty
ActivityStreamsTo vocab.ActivityStreamsToProperty
ForgeFedTracksTicketsFor vocab.ForgeFedTracksTicketsForProperty
JSONLDType vocab.JSONLDTypeProperty
ActivityStreamsUpdated vocab.ActivityStreamsUpdatedProperty
ActivityStreamsUrl vocab.ActivityStreamsUrlProperty
alias string
unknown map[string]interface{}
}
// ActivityStreamsLikeExtends returns true if the Like type extends from the other
// type.
func | (other vocab.Type) bool {
extensions := []string{"Activity", "Object"}
for _, ext := range extensions {
if ext == other.GetTypeName() {
return true
}
}
return false
}
// DeserializeLike creates a Like from a map representation that has been
// unmarshalled from a text or binary format.
func DeserializeLike(m map[string]interface{}, aliasMap map[string]string) (*ActivityStreamsLike, error) {
alias := ""
aliasPrefix := ""
if a, ok := aliasMap["https://www.w3.org/ns/activitystreams"]; ok {
alias = a
aliasPrefix = a + ":"
}
this := &ActivityStreamsLike{
alias: alias,
unknown: make(map[string]interface{}),
}
if typeValue, ok := m["type"]; !ok {
return nil, fmt.Errorf("no \"type\" property in map")
} else if typeString, ok := typeValue.(string); ok {
typeName := strings.TrimPrefix(typeString, aliasPrefix)
if typeName != "Like" {
return nil, fmt.Errorf("\"type\" property is not of %q type: %s", "Like", typeName)
}
// Fall through, success in finding a proper Type
} else if arrType, ok := typeValue.([]interface{}); ok {
found := false
for _, elemVal := range arrType {
if typeString, ok := elemVal.(string); ok && strings.TrimPrefix(typeString, aliasPrefix) == "Like" {
found = true
break
}
}
if !found {
return nil, fmt.Errorf("could not find a \"type\" property of value %q", "Like")
}
// Fall through, success in finding a proper Type
} else {
return nil, fmt.Errorf("\"type\" property is unrecognized type: %T", typeValue)
}
// Begin: Known property deserialization
if p, err := mgr.DeserializeActorPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsActor = p
}
if p, err := mgr.DeserializeAltitudePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAltitude = p
}
if p, err := mgr.DeserializeAttachmentPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAttachment = p
}
if p, err := mgr.DeserializeAttributedToPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAttributedTo = p
}
if p, err := mgr.DeserializeAudiencePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsAudience = p
}
if p, err := mgr.DeserializeBccPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsBcc = p
}
if p, err := mgr.DeserializeBtoPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsBto = p
}
if p, err := mgr.DeserializeCcPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsCc = p
}
if p, err := mgr.DeserializeContentPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsContent = p
}
if p, err := mgr.DeserializeContextPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsContext = p
}
if p, err := mgr.DeserializeDurationPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsDuration = p
}
if p, err := mgr.DeserializeEndTimePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsEndTime = p
}
if p, err := mgr.DeserializeGeneratorPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsGenerator = p
}
if p, err := mgr.DeserializeIconPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsIcon = p
}
if p, err := mgr.DeserializeIdPropertyJSONLD()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.JSONLDId = p
}
if p, err := mgr.DeserializeImagePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsImage = p
}
if p, err := mgr.DeserializeInReplyToPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsInReplyTo = p
}
if p, err := mgr.DeserializeInstrumentPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsInstrument = p
}
if p, err := mgr.DeserializeLikesPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsLikes = p
}
if p, err := mgr.DeserializeLocationPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsLocation = p
}
if p, err := mgr.DeserializeMediaTypePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsMediaType = p
}
if p, err := mgr.DeserializeNamePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsName = p
}
if p, err := mgr.DeserializeObjectPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsObject = p
}
if p, err := mgr.DeserializeOriginPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsOrigin = p
}
if p, err := mgr.DeserializePreviewPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsPreview = p
}
if p, err := mgr.DeserializePublishedPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsPublished = p
}
if p, err := mgr.DeserializeRepliesPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsReplies = p
}
if p, err := mgr.DeserializeResultPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsResult = p
}
if p, err := mgr.DeserializeSensitivePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsSensitive = p
}
if p, err := mgr.DeserializeSharesPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsShares = p
}
if p, err := mgr.DeserializeSourcePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsSource = p
}
if p, err := mgr.DeserializeStartTimePropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsStartTime = p
}
if p, err := mgr.DeserializeSummaryPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsSummary = p
}
if p, err := mgr.DeserializeTagPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsTag = p
}
if p, err := mgr.DeserializeTargetPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsTarget = p
}
if p, err := mgr.DeserializeTeamPropertyForgeFed()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ForgeFedTeam = p
}
if p, err := mgr.DeserializeTicketsTrackedByPropertyForgeFed()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ForgeFedTicketsTrackedBy = p
}
if p, err := mgr.DeserializeToPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsTo = p
}
if p, err := mgr.DeserializeTracksTicketsForPropertyForgeFed()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ForgeFedTracksTicketsFor = p
}
if p, err := mgr.DeserializeTypePropertyJSONLD()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.JSONLDType = p
}
if p, err := mgr.DeserializeUpdatedPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsUpdated = p
}
if p, err := mgr.DeserializeUrlPropertyActivityStreams()(m, aliasMap); err != nil {
return nil, err
} else if p != nil {
this.ActivityStreamsUrl = p
}
// End: Known property deserialization
// Begin: Unknown deserialization
for k, v := range m {
// Begin: Code that ensures a property name is unknown
if k == "actor" {
continue
} else if k == "altitude" {
continue
} else if k == "attachment" {
continue
} else if k == "attributedTo" {
continue
} else if k == "audience" {
continue
} else if k == "bcc" {
continue
} else if k == "bto" {
continue
} else if k == "cc" {
continue
} else if k == "content" {
continue
} else if k == "contentMap" {
continue
} else if k == "context" {
continue
} else if k == "duration" {
continue
} else if k == "endTime" {
continue
} else if k == "generator" {
continue
} else if k == "icon" {
continue
} else if k == "id" {
continue
} else if k == "image" {
continue
} else if k == "inReplyTo" {
continue
} else if k == "instrument" {
continue
} else if k == "likes" {
continue
} else if k == "location" {
continue
} else if k == "mediaType" {
continue
} else if k == "name" {
continue
} else if k == "nameMap" {
continue
} else if k == "object" {
continue
} else if k == "origin" {
continue
} else if k == "preview" {
continue
} else if k == "published" {
continue
} else if k == "replies" {
continue
} else if k == "result" {
continue
} else if k == "sensitive" {
continue
} else if k == "shares" {
continue
} else if k == "source" {
continue
} else if k == "startTime" {
continue
} else if k == "summary" {
continue
} else if k == "summaryMap" {
continue
} else if k == "tag" {
continue
} else if k == "target" {
continue
} else if k == "team" {
continue
} else if k == "ticketsTrackedBy" {
continue
} else if k == "to" {
continue
} else if k == "tracksTicketsFor" {
continue
} else if k == "type" {
continue
} else if k == "updated" {
continue
} else if k == "url" {
continue
} // End: Code that ensures a property name is unknown
this.unknown[k] = v
}
// End: Unknown deserialization
return this, nil
}
// IsOrExtendsLike returns true if the other provided type is the Like type or
// extends from the Like type.
func IsOrExtendsLike(other vocab.Type) bool {
if other.GetTypeName() == "Like" {
return true
}
return LikeIsExtendedBy(other)
}
// LikeIsDisjointWith returns true if the other provided type is disjoint with the
// Like type.
func LikeIsDisjointWith(other vocab.Type) bool {
disjointWith := []string{"Link", "Mention"}
for _, disjoint := range disjointWith {
if disjoint == other.GetTypeName() {
return true
}
}
return false
}
// LikeIsExtendedBy returns true if the other provided type extends from the Like
// type. Note that it returns false if the types are the same; see the
// "IsOrExtendsLike" variant instead.
func LikeIsExtendedBy(other vocab.Type) bool {
// Shortcut implementation: is not extended by anything.
return false
}
// NewActivityStreamsLike creates a new Like type
func NewActivityStreamsLike() *ActivityStreamsLike {
typeProp := typePropertyConstructor()
typeProp.AppendXMLSchemaString("Like")
return &ActivityStreamsLike{
JSONLDType: typeProp,
alias: "",
unknown: make(map[string]interface{}),
}
}
// GetActivityStreamsActor returns the "actor" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsActor() vocab.ActivityStreamsActorProperty {
return this.ActivityStreamsActor
}
// GetActivityStreamsAltitude returns the "altitude" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsAltitude() vocab.ActivityStreamsAltitudeProperty {
return this.ActivityStreamsAltitude
}
// GetActivityStreamsAttachment returns the "attachment" property if it exists,
// and nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsAttachment() vocab.ActivityStreamsAttachmentProperty {
return this.ActivityStreamsAttachment
}
// GetActivityStreamsAttributedTo returns the "attributedTo" property if it
// exists, and nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsAttributedTo() vocab.ActivityStreamsAttributedToProperty {
return this.ActivityStreamsAttributedTo
}
// GetActivityStreamsAudience returns the "audience" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsAudience() vocab.ActivityStreamsAudienceProperty {
return this.ActivityStreamsAudience
}
// GetActivityStreamsBcc returns the "bcc" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsBcc() vocab.ActivityStreamsBccProperty {
return this.ActivityStreamsBcc
}
// GetActivityStreamsBto returns the "bto" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsBto() vocab.ActivityStreamsBtoProperty {
return this.ActivityStreamsBto
}
// GetActivityStreamsCc returns the "cc" property if it exists, and nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsCc() vocab.ActivityStreamsCcProperty {
return this.ActivityStreamsCc
}
// GetActivityStreamsContent returns the "content" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsContent() vocab.ActivityStreamsContentProperty {
return this.ActivityStreamsContent
}
// GetActivityStreamsContext returns the "context" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsContext() vocab.ActivityStreamsContextProperty {
return this.ActivityStreamsContext
}
// GetActivityStreamsDuration returns the "duration" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsDuration() vocab.ActivityStreamsDurationProperty {
return this.ActivityStreamsDuration
}
// GetActivityStreamsEndTime returns the "endTime" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsEndTime() vocab.ActivityStreamsEndTimeProperty {
return this.ActivityStreamsEndTime
}
// GetActivityStreamsGenerator returns the "generator" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsGenerator() vocab.ActivityStreamsGeneratorProperty {
return this.ActivityStreamsGenerator
}
// GetActivityStreamsIcon returns the "icon" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsIcon() vocab.ActivityStreamsIconProperty {
return this.ActivityStreamsIcon
}
// GetActivityStreamsImage returns the "image" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsImage() vocab.ActivityStreamsImageProperty {
return this.ActivityStreamsImage
}
// GetActivityStreamsInReplyTo returns the "inReplyTo" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsInReplyTo() vocab.ActivityStreamsInReplyToProperty {
return this.ActivityStreamsInReplyTo
}
// GetActivityStreamsInstrument returns the "instrument" property if it exists,
// and nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsInstrument() vocab.ActivityStreamsInstrumentProperty {
return this.ActivityStreamsInstrument
}
// GetActivityStreamsLikes returns the "likes" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsLikes() vocab.ActivityStreamsLikesProperty {
return this.ActivityStreamsLikes
}
// GetActivityStreamsLocation returns the "location" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsLocation() vocab.ActivityStreamsLocationProperty {
return this.ActivityStreamsLocation
}
// GetActivityStreamsMediaType returns the "mediaType" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsMediaType() vocab.ActivityStreamsMediaTypeProperty {
return this.ActivityStreamsMediaType
}
// GetActivityStreamsName returns the "name" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsName() vocab.ActivityStreamsNameProperty {
return this.ActivityStreamsName
}
// GetActivityStreamsObject returns the "object" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsObject() vocab.ActivityStreamsObjectProperty {
return this.ActivityStreamsObject
}
// GetActivityStreamsOrigin returns the "origin" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsOrigin() vocab.ActivityStreamsOriginProperty {
return this.ActivityStreamsOrigin
}
// GetActivityStreamsPreview returns the "preview" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsPreview() vocab.ActivityStreamsPreviewProperty {
return this.ActivityStreamsPreview
}
// GetActivityStreamsPublished returns the "published" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsPublished() vocab.ActivityStreamsPublishedProperty {
return this.ActivityStreamsPublished
}
// GetActivityStreamsReplies returns the "replies" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsReplies() vocab.ActivityStreamsRepliesProperty {
return this.ActivityStreamsReplies
}
// GetActivityStreamsResult returns the "result" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsResult() vocab.ActivityStreamsResultProperty {
return this.ActivityStreamsResult
}
// GetActivityStreamsSensitive returns the "sensitive" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsSensitive() vocab.ActivityStreamsSensitiveProperty {
return this.ActivityStreamsSensitive
}
// GetActivityStreamsShares returns the "shares" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsShares() vocab.ActivityStreamsSharesProperty {
return this.ActivityStreamsShares
}
// GetActivityStreamsSource returns the "source" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsSource() vocab.ActivityStreamsSourceProperty {
return this.ActivityStreamsSource
}
// GetActivityStreamsStartTime returns the "startTime" property if it exists, and
// nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsStartTime() vocab.ActivityStreamsStartTimeProperty {
return this.ActivityStreamsStartTime
}
// GetActivityStreamsSummary returns the "summary" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsSummary() vocab.ActivityStreamsSummaryProperty {
return this.ActivityStreamsSummary
}
// GetActivityStreamsTag returns the "tag" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsTag() vocab.ActivityStreamsTagProperty {
return this.ActivityStreamsTag
}
// GetActivityStreamsTarget returns the "target" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsTarget() vocab.ActivityStreamsTargetProperty {
return this.ActivityStreamsTarget
}
// GetActivityStreamsTo returns the "to" property if it exists, and nil otherwise.
func (this ActivityStreamsLike) GetActivityStreamsTo() vocab.ActivityStreamsToProperty {
return this.ActivityStreamsTo
}
// GetActivityStreamsUpdated returns the "updated" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsUpdated() vocab.ActivityStreamsUpdatedProperty {
return this.ActivityStreamsUpdated
}
// GetActivityStreamsUrl returns the "url" property if it exists, and nil
// otherwise.
func (this ActivityStreamsLike) GetActivityStreamsUrl() vocab.ActivityStreamsUrlProperty {
return this.ActivityStreamsUrl
}
// GetForgeFedTeam returns the "team" property if it exists, and nil otherwise.
func (this ActivityStreamsLike) GetForgeFedTeam() vocab.ForgeFedTeamProperty {
return this.ForgeFedTeam
}
// GetForgeFedTicketsTrackedBy returns the "ticketsTrackedBy" property if it
// exists, and nil otherwise.
func (this ActivityStreamsLike) GetForgeFedTicketsTrackedBy() vocab.ForgeFedTicketsTrackedByProperty {
return this.ForgeFedTicketsTrackedBy
}
// GetForgeFedTracksTicketsFor returns the "tracksTicketsFor" property if it
// exists, and nil otherwise.
func (this ActivityStreamsLike) GetForgeFedTracksTicketsFor() vocab.ForgeFedTracksTicketsForProperty {
return this.ForgeFedTracksTicketsFor
}
// GetJSONLDId returns the "id" property if it exists, and nil otherwise.
func (this ActivityStreamsLike) GetJSONLDId() vocab.JSONLDIdProperty {
return this.JSONLDId
}
// GetJSONLDType returns the "type" property if it exists, and nil otherwise.
func (this ActivityStreamsLike) GetJSONLDType() vocab.JSONLDTypeProperty {
return this.JSONLDType
}
// GetTypeName returns the name of this type.
func (this ActivityStreamsLike) GetTypeName() string {
return "Like"
}
// GetUnknownProperties returns the unknown properties for the Like type. Note
// that this should not be used by app developers. It is only used to help
// determine which implementation is LessThan the other. Developers who are
// creating a different implementation of this type's interface can use this
// method in their LessThan implementation, but routine ActivityPub
// applications should not use this to bypass the code generation tool.
func (this ActivityStreamsLike) GetUnknownProperties() map[string]interface{} {
return this.unknown
}
// IsExtending returns true if the Like type extends from the other type.
func (this ActivityStreamsLike) IsExtending(other vocab.Type) bool {
return ActivityStreamsLikeExtends(other)
}
// JSONLDContext returns the JSONLD URIs required in the context string for this
// type and the specific properties that are set. The value in the map is the
// alias used to import the type and its properties.
func (this ActivityStreamsLike) JSONLDContext() map[string]string {
m := map[string]string{"https://www.w3.org/ns/activitystreams": this.alias}
m = this.helperJSONLDContext(this.ActivityStreamsActor, m)
m = this.helperJSONLDContext(this.ActivityStreamsAltitude, m)
m = this.helperJSONLDContext(this.ActivityStreamsAttachment, m)
m = this.helperJSONLDContext(this.ActivityStreamsAttributedTo, m)
m = this.helperJSONLDContext(this.ActivityStreamsAudience, m)
m = this.helperJSONLDContext(this.ActivityStreamsBcc, m)
m = this.helperJSONLDContext(this.ActivityStreamsBto, m)
m = this.helperJSONLDContext(this.ActivityStreamsCc, m)
m = this.helperJSONLDContext(this.ActivityStreamsContent, m)
m = this.helperJSONLDContext(this.ActivityStreamsContext, m)
m = this.helperJSONLDContext(this.ActivityStreamsDuration, m)
m = this.helperJSONLDContext(this.ActivityStreamsEndTime, m)
m = this.helperJSONLDContext(this.ActivityStreamsGenerator, m)
m = this.helperJSONLDContext(this.ActivityStreamsIcon, m)
m = this.helperJSONLDContext(this.JSONLDId, m)
m = this.helperJSONLDContext(this.ActivityStreamsImage, m)
m = this.helperJSONLDContext(this.ActivityStreamsInReplyTo, m)
m = this.helperJSONLDContext(this.ActivityStreamsInstrument, m)
m = this.helperJSONLDContext(this.ActivityStreamsLikes, m)
m = this.helperJSONLDContext(this.ActivityStreamsLocation, m)
m = this.helperJSONLDContext(this.ActivityStreamsMediaType, m)
m = this.helperJSONLDContext(this.ActivityStreamsName, m)
m = this.helperJSONLDContext(this.ActivityStreamsObject, m)
m = this.helperJSONLDContext(this.ActivityStreamsOrigin, m)
m = this.helperJSONLDContext(this.ActivityStreamsPreview, m)
m = this.helperJSONLDContext(this.ActivityStreamsPublished, m)
m = this.helperJSONLDContext(this.ActivityStreamsReplies, m)
m = this.helperJSONLDContext(this.ActivityStreamsResult, m)
m = this.helperJSONLDContext(this.ActivityStreamsSensitive, m)
m = this.helperJSONLDContext(this.ActivityStreamsShares, m)
m = this.helperJSONLDContext(this.ActivityStreamsSource, m)
m = this.helperJSONLDContext(this.ActivityStreamsStartTime, m)
m = this.helperJSONLDContext(this.ActivityStreamsSummary, m)
m = this.helperJSONLDContext(this.ActivityStreamsTag, m)
m = this.helperJSONLDContext(this.ActivityStreamsTarget, m)
m = this.helperJSONLDContext(this.ForgeFedTeam, m)
m = this.helperJSONLDContext(this.ForgeFedTicketsTrackedBy, m)
m = this.helperJSONLDContext(this.ActivityStreamsTo, m)
m = this.helperJSONLDContext(this.ForgeFedTracksTicketsFor, m)
m = this.helperJSONLDContext(this.JSONLDType, m)
m = this.helperJSONLDContext(this.ActivityStreamsUpdated, m)
m = this.helperJSONLDContext(this.ActivityStreamsUrl, m)
return m
}
// LessThan computes if this Like is lesser, with an arbitrary but stable
// determination.
func (this ActivityStreamsLike) LessThan(o vocab.ActivityStreamsLike) bool {
// Begin: Compare known properties
// Compare property "actor"
if lhs, rhs := this.ActivityStreamsActor, o.GetActivityStreamsActor(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "altitude"
if lhs, rhs := this.ActivityStreamsAltitude, o.GetActivityStreamsAltitude(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "attachment"
if lhs, rhs := this.ActivityStreamsAttachment, o.GetActivityStreamsAttachment(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "attributedTo"
if lhs, rhs := this.ActivityStreamsAttributedTo, o.GetActivityStreamsAttributedTo(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "audience"
if lhs, rhs := this.ActivityStreamsAudience, o.GetActivityStreamsAudience(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "bcc"
if lhs, rhs := this.ActivityStreamsBcc, o.GetActivityStreamsBcc(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "bto"
if lhs, rhs := this.ActivityStreamsBto, o.GetActivityStreamsBto(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "cc"
if lhs, rhs := this.ActivityStreamsCc, o.GetActivityStreamsCc(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "content"
if lhs, rhs := this.ActivityStreamsContent, o.GetActivityStreamsContent(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "context"
if lhs, rhs := this.ActivityStreamsContext, o.GetActivityStreamsContext(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "duration"
if lhs, rhs := this.ActivityStreamsDuration, o.GetActivityStreamsDuration(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "endTime"
if lhs, rhs := this.ActivityStreamsEndTime, o.GetActivityStreamsEndTime(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "generator"
if lhs, rhs := this.ActivityStreamsGenerator, o.GetActivityStreamsGenerator(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "icon"
if lhs, rhs := this.ActivityStreamsIcon, o.GetActivityStreamsIcon(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "id"
if lhs, rhs := this.JSONLDId, o.GetJSONLDId(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "image"
if lhs, rhs := this.ActivityStreamsImage, o.GetActivityStreamsImage(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "inReplyTo"
if lhs, rhs := this.ActivityStreamsInReplyTo, o.GetActivityStreamsInReplyTo(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "instrument"
if lhs, rhs := this.ActivityStreamsInstrument, o.GetActivityStreamsInstrument(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "likes"
if lhs, rhs := this.ActivityStreamsLikes, o.GetActivityStreamsLikes(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "location"
if lhs, rhs := this.ActivityStreamsLocation, o.GetActivityStreamsLocation(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "mediaType"
if lhs, rhs := this.ActivityStreamsMediaType, o.GetActivityStreamsMediaType(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "name"
if lhs, rhs := this.ActivityStreamsName, o.GetActivityStreamsName(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "object"
if lhs, rhs := this.ActivityStreamsObject, o.GetActivityStreamsObject(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "origin"
if lhs, rhs := this.ActivityStreamsOrigin, o.GetActivityStreamsOrigin(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "preview"
if lhs, rhs := this.ActivityStreamsPreview, o.GetActivityStreamsPreview(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "published"
if lhs, rhs := this.ActivityStreamsPublished, o.GetActivityStreamsPublished(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "replies"
if lhs, rhs := this.ActivityStreamsReplies, o.GetActivityStreamsReplies(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "result"
if lhs, rhs := this.ActivityStreamsResult, o.GetActivityStreamsResult(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "sensitive"
if lhs, rhs := this.ActivityStreamsSensitive, o.GetActivityStreamsSensitive(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "shares"
if lhs, rhs := this.ActivityStreamsShares, o.GetActivityStreamsShares(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "source"
if lhs, rhs := this.ActivityStreamsSource, o.GetActivityStreamsSource(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "startTime"
if lhs, rhs := this.ActivityStreamsStartTime, o.GetActivityStreamsStartTime(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "summary"
if lhs, rhs := this.ActivityStreamsSummary, o.GetActivityStreamsSummary(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "tag"
if lhs, rhs := this.ActivityStreamsTag, o.GetActivityStreamsTag(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "target"
if lhs, rhs := this.ActivityStreamsTarget, o.GetActivityStreamsTarget(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "team"
if lhs, rhs := this.ForgeFedTeam, o.GetForgeFedTeam(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "ticketsTrackedBy"
if lhs, rhs := this.ForgeFedTicketsTrackedBy, o.GetForgeFedTicketsTrackedBy(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "to"
if lhs, rhs := this.ActivityStreamsTo, o.GetActivityStreamsTo(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "tracksTicketsFor"
if lhs, rhs := this.ForgeFedTracksTicketsFor, o.GetForgeFedTracksTicketsFor(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "type"
if lhs, rhs := this.JSONLDType, o.GetJSONLDType(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "updated"
if lhs, rhs := this.ActivityStreamsUpdated, o.GetActivityStreamsUpdated(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// Compare property "url"
if lhs, rhs := this.ActivityStreamsUrl, o.GetActivityStreamsUrl(); lhs != nil && rhs != nil {
if lhs.LessThan(rhs) {
return true
} else if rhs.LessThan(lhs) {
return false
}
} else if lhs == nil && rhs != nil {
// Nil is less than anything else
return true
} else if rhs != nil && rhs == nil {
// Anything else is greater than nil
return false
} // Else: Both are nil
// End: Compare known properties
// Begin: Compare unknown properties (only by number of them)
if len(this.unknown) < len(o.GetUnknownProperties()) {
return true
} else if len(o.GetUnknownProperties()) < len(this.unknown) {
return false
} // End: Compare unknown properties (only by number of them)
// All properties are the same.
return false
}
// Serialize converts this into an interface representation suitable for
// marshalling into a text or binary format.
func (this ActivityStreamsLike) Serialize() (map[string]interface{}, error) {
m := make(map[string]interface{})
typeName := "Like"
if len(this.alias) > 0 {
typeName = this.alias + ":" + "Like"
}
m["type"] = typeName
// Begin: Serialize known properties
// Maybe serialize property "actor"
if this.ActivityStreamsActor != nil {
if i, err := this.ActivityStreamsActor.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsActor.Name()] = i
}
}
// Maybe serialize property "altitude"
if this.ActivityStreamsAltitude != nil {
if i, err := this.ActivityStreamsAltitude.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsAltitude.Name()] = i
}
}
// Maybe serialize property "attachment"
if this.ActivityStreamsAttachment != nil {
if i, err := this.ActivityStreamsAttachment.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsAttachment.Name()] = i
}
}
// Maybe serialize property "attributedTo"
if this.ActivityStreamsAttributedTo != nil {
if i, err := this.ActivityStreamsAttributedTo.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsAttributedTo.Name()] = i
}
}
// Maybe serialize property "audience"
if this.ActivityStreamsAudience != nil {
if i, err := this.ActivityStreamsAudience.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsAudience.Name()] = i
}
}
// Maybe serialize property "bcc"
if this.ActivityStreamsBcc != nil {
if i, err := this.ActivityStreamsBcc.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsBcc.Name()] = i
}
}
// Maybe serialize property "bto"
if this.ActivityStreamsBto != nil {
if i, err := this.ActivityStreamsBto.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsBto.Name()] = i
}
}
// Maybe serialize property "cc"
if this.ActivityStreamsCc != nil {
if i, err := this.ActivityStreamsCc.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsCc.Name()] = i
}
}
// Maybe serialize property "content"
if this.ActivityStreamsContent != nil {
if i, err := this.ActivityStreamsContent.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsContent.Name()] = i
}
}
// Maybe serialize property "context"
if this.ActivityStreamsContext != nil {
if i, err := this.ActivityStreamsContext.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsContext.Name()] = i
}
}
// Maybe serialize property "duration"
if this.ActivityStreamsDuration != nil {
if i, err := this.ActivityStreamsDuration.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsDuration.Name()] = i
}
}
// Maybe serialize property "endTime"
if this.ActivityStreamsEndTime != nil {
if i, err := this.ActivityStreamsEndTime.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsEndTime.Name()] = i
}
}
// Maybe serialize property "generator"
if this.ActivityStreamsGenerator != nil {
if i, err := this.ActivityStreamsGenerator.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsGenerator.Name()] = i
}
}
// Maybe serialize property "icon"
if this.ActivityStreamsIcon != nil {
if i, err := this.ActivityStreamsIcon.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsIcon.Name()] = i
}
}
// Maybe serialize property "id"
if this.JSONLDId != nil {
if i, err := this.JSONLDId.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.JSONLDId.Name()] = i
}
}
// Maybe serialize property "image"
if this.ActivityStreamsImage != nil {
if i, err := this.ActivityStreamsImage.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsImage.Name()] = i
}
}
// Maybe serialize property "inReplyTo"
if this.ActivityStreamsInReplyTo != nil {
if i, err := this.ActivityStreamsInReplyTo.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsInReplyTo.Name()] = i
}
}
// Maybe serialize property "instrument"
if this.ActivityStreamsInstrument != nil {
if i, err := this.ActivityStreamsInstrument.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsInstrument.Name()] = i
}
}
// Maybe serialize property "likes"
if this.ActivityStreamsLikes != nil {
if i, err := this.ActivityStreamsLikes.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsLikes.Name()] = i
}
}
// Maybe serialize property "location"
if this.ActivityStreamsLocation != nil {
if i, err := this.ActivityStreamsLocation.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsLocation.Name()] = i
}
}
// Maybe serialize property "mediaType"
if this.ActivityStreamsMediaType != nil {
if i, err := this.ActivityStreamsMediaType.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsMediaType.Name()] = i
}
}
// Maybe serialize property "name"
if this.ActivityStreamsName != nil {
if i, err := this.ActivityStreamsName.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsName.Name()] = i
}
}
// Maybe serialize property "object"
if this.ActivityStreamsObject != nil {
if i, err := this.ActivityStreamsObject.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsObject.Name()] = i
}
}
// Maybe serialize property "origin"
if this.ActivityStreamsOrigin != nil {
if i, err := this.ActivityStreamsOrigin.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsOrigin.Name()] = i
}
}
// Maybe serialize property "preview"
if this.ActivityStreamsPreview != nil {
if i, err := this.ActivityStreamsPreview.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsPreview.Name()] = i
}
}
// Maybe serialize property "published"
if this.ActivityStreamsPublished != nil {
if i, err := this.ActivityStreamsPublished.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsPublished.Name()] = i
}
}
// Maybe serialize property "replies"
if this.ActivityStreamsReplies != nil {
if i, err := this.ActivityStreamsReplies.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsReplies.Name()] = i
}
}
// Maybe serialize property "result"
if this.ActivityStreamsResult != nil {
if i, err := this.ActivityStreamsResult.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsResult.Name()] = i
}
}
// Maybe serialize property "sensitive"
if this.ActivityStreamsSensitive != nil {
if i, err := this.ActivityStreamsSensitive.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsSensitive.Name()] = i
}
}
// Maybe serialize property "shares"
if this.ActivityStreamsShares != nil {
if i, err := this.ActivityStreamsShares.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsShares.Name()] = i
}
}
// Maybe serialize property "source"
if this.ActivityStreamsSource != nil {
if i, err := this.ActivityStreamsSource.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsSource.Name()] = i
}
}
// Maybe serialize property "startTime"
if this.ActivityStreamsStartTime != nil {
if i, err := this.ActivityStreamsStartTime.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsStartTime.Name()] = i
}
}
// Maybe serialize property "summary"
if this.ActivityStreamsSummary != nil {
if i, err := this.ActivityStreamsSummary.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsSummary.Name()] = i
}
}
// Maybe serialize property "tag"
if this.ActivityStreamsTag != nil {
if i, err := this.ActivityStreamsTag.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsTag.Name()] = i
}
}
// Maybe serialize property "target"
if this.ActivityStreamsTarget != nil {
if i, err := this.ActivityStreamsTarget.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsTarget.Name()] = i
}
}
// Maybe serialize property "team"
if this.ForgeFedTeam != nil {
if i, err := this.ForgeFedTeam.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ForgeFedTeam.Name()] = i
}
}
// Maybe serialize property "ticketsTrackedBy"
if this.ForgeFedTicketsTrackedBy != nil {
if i, err := this.ForgeFedTicketsTrackedBy.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ForgeFedTicketsTrackedBy.Name()] = i
}
}
// Maybe serialize property "to"
if this.ActivityStreamsTo != nil {
if i, err := this.ActivityStreamsTo.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsTo.Name()] = i
}
}
// Maybe serialize property "tracksTicketsFor"
if this.ForgeFedTracksTicketsFor != nil {
if i, err := this.ForgeFedTracksTicketsFor.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ForgeFedTracksTicketsFor.Name()] = i
}
}
// Maybe serialize property "type"
if this.JSONLDType != nil {
if i, err := this.JSONLDType.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.JSONLDType.Name()] = i
}
}
// Maybe serialize property "updated"
if this.ActivityStreamsUpdated != nil {
if i, err := this.ActivityStreamsUpdated.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsUpdated.Name()] = i
}
}
// Maybe serialize property "url"
if this.ActivityStreamsUrl != nil {
if i, err := this.ActivityStreamsUrl.Serialize(); err != nil {
return nil, err
} else if i != nil {
m[this.ActivityStreamsUrl.Name()] = i
}
}
// End: Serialize known properties
// Begin: Serialize unknown properties
for k, v := range this.unknown {
// To be safe, ensure we aren't overwriting a known property
if _, has := m[k]; !has {
m[k] = v
}
}
// End: Serialize unknown properties
return m, nil
}
// SetActivityStreamsActor sets the "actor" property.
func (this *ActivityStreamsLike) SetActivityStreamsActor(i vocab.ActivityStreamsActorProperty) {
this.ActivityStreamsActor = i
}
// SetActivityStreamsAltitude sets the "altitude" property.
func (this *ActivityStreamsLike) SetActivityStreamsAltitude(i vocab.ActivityStreamsAltitudeProperty) {
this.ActivityStreamsAltitude = i
}
// SetActivityStreamsAttachment sets the "attachment" property.
func (this *ActivityStreamsLike) SetActivityStreamsAttachment(i vocab.ActivityStreamsAttachmentProperty) {
this.ActivityStreamsAttachment = i
}
// SetActivityStreamsAttributedTo sets the "attributedTo" property.
func (this *ActivityStreamsLike) SetActivityStreamsAttributedTo(i vocab.ActivityStreamsAttributedToProperty) {
this.ActivityStreamsAttributedTo = i
}
// SetActivityStreamsAudience sets the "audience" property.
func (this *ActivityStreamsLike) SetActivityStreamsAudience(i vocab.ActivityStreamsAudienceProperty) {
this.ActivityStreamsAudience = i
}
// SetActivityStreamsBcc sets the "bcc" property.
func (this *ActivityStreamsLike) SetActivityStreamsBcc(i vocab.ActivityStreamsBccProperty) {
this.ActivityStreamsBcc = i
}
// SetActivityStreamsBto sets the "bto" property.
func (this *ActivityStreamsLike) SetActivityStreamsBto(i vocab.ActivityStreamsBtoProperty) {
this.ActivityStreamsBto = i
}
// SetActivityStreamsCc sets the "cc" property.
func (this *ActivityStreamsLike) SetActivityStreamsCc(i vocab.ActivityStreamsCcProperty) {
this.ActivityStreamsCc = i
}
// SetActivityStreamsContent sets the "content" property.
func (this *ActivityStreamsLike) SetActivityStreamsContent(i vocab.ActivityStreamsContentProperty) {
this.ActivityStreamsContent = i
}
// SetActivityStreamsContext sets the "context" property.
func (this *ActivityStreamsLike) SetActivityStreamsContext(i vocab.ActivityStreamsContextProperty) {
this.ActivityStreamsContext = i
}
// SetActivityStreamsDuration sets the "duration" property.
func (this *ActivityStreamsLike) SetActivityStreamsDuration(i vocab.ActivityStreamsDurationProperty) {
this.ActivityStreamsDuration = i
}
// SetActivityStreamsEndTime sets the "endTime" property.
func (this *ActivityStreamsLike) SetActivityStreamsEndTime(i vocab.ActivityStreamsEndTimeProperty) {
this.ActivityStreamsEndTime = i
}
// SetActivityStreamsGenerator sets the "generator" property.
func (this *ActivityStreamsLike) SetActivityStreamsGenerator(i vocab.ActivityStreamsGeneratorProperty) {
this.ActivityStreamsGenerator = i
}
// SetActivityStreamsIcon sets the "icon" property.
func (this *ActivityStreamsLike) SetActivityStreamsIcon(i vocab.ActivityStreamsIconProperty) {
this.ActivityStreamsIcon = i
}
// SetActivityStreamsImage sets the "image" property.
func (this *ActivityStreamsLike) SetActivityStreamsImage(i vocab.ActivityStreamsImageProperty) {
this.ActivityStreamsImage = i
}
// SetActivityStreamsInReplyTo sets the "inReplyTo" property.
func (this *ActivityStreamsLike) SetActivityStreamsInReplyTo(i vocab.ActivityStreamsInReplyToProperty) {
this.ActivityStreamsInReplyTo = i
}
// SetActivityStreamsInstrument sets the "instrument" property.
func (this *ActivityStreamsLike) SetActivityStreamsInstrument(i vocab.ActivityStreamsInstrumentProperty) {
this.ActivityStreamsInstrument = i
}
// SetActivityStreamsLikes sets the "likes" property.
func (this *ActivityStreamsLike) SetActivityStreamsLikes(i vocab.ActivityStreamsLikesProperty) {
this.ActivityStreamsLikes = i
}
// SetActivityStreamsLocation sets the "location" property.
func (this *ActivityStreamsLike) SetActivityStreamsLocation(i vocab.ActivityStreamsLocationProperty) {
this.ActivityStreamsLocation = i
}
// SetActivityStreamsMediaType sets the "mediaType" property.
func (this *ActivityStreamsLike) SetActivityStreamsMediaType(i vocab.ActivityStreamsMediaTypeProperty) {
this.ActivityStreamsMediaType = i
}
// SetActivityStreamsName sets the "name" property.
func (this *ActivityStreamsLike) SetActivityStreamsName(i vocab.ActivityStreamsNameProperty) {
this.ActivityStreamsName = i
}
// SetActivityStreamsObject sets the "object" property.
func (this *ActivityStreamsLike) SetActivityStreamsObject(i vocab.ActivityStreamsObjectProperty) {
this.ActivityStreamsObject = i
}
// SetActivityStreamsOrigin sets the "origin" property.
func (this *ActivityStreamsLike) SetActivityStreamsOrigin(i vocab.ActivityStreamsOriginProperty) {
this.ActivityStreamsOrigin = i
}
// SetActivityStreamsPreview sets the "preview" property.
func (this *ActivityStreamsLike) SetActivityStreamsPreview(i vocab.ActivityStreamsPreviewProperty) {
this.ActivityStreamsPreview = i
}
// SetActivityStreamsPublished sets the "published" property.
func (this *ActivityStreamsLike) SetActivityStreamsPublished(i vocab.ActivityStreamsPublishedProperty) {
this.ActivityStreamsPublished = i
}
// SetActivityStreamsReplies sets the "replies" property.
func (this *ActivityStreamsLike) SetActivityStreamsReplies(i vocab.ActivityStreamsRepliesProperty) {
this.ActivityStreamsReplies = i
}
// SetActivityStreamsResult sets the "result" property.
func (this *ActivityStreamsLike) SetActivityStreamsResult(i vocab.ActivityStreamsResultProperty) {
this.ActivityStreamsResult = i
}
// SetActivityStreamsSensitive sets the "sensitive" property.
func (this *ActivityStreamsLike) SetActivityStreamsSensitive(i vocab.ActivityStreamsSensitiveProperty) {
this.ActivityStreamsSensitive = i
}
// SetActivityStreamsShares sets the "shares" property.
func (this *ActivityStreamsLike) SetActivityStreamsShares(i vocab.ActivityStreamsSharesProperty) {
this.ActivityStreamsShares = i
}
// SetActivityStreamsSource sets the "source" property.
func (this *ActivityStreamsLike) SetActivityStreamsSource(i vocab.ActivityStreamsSourceProperty) {
this.ActivityStreamsSource = i
}
// SetActivityStreamsStartTime sets the "startTime" property.
func (this *ActivityStreamsLike) SetActivityStreamsStartTime(i vocab.ActivityStreamsStartTimeProperty) {
this.ActivityStreamsStartTime = i
}
// SetActivityStreamsSummary sets the "summary" property.
func (this *ActivityStreamsLike) SetActivityStreamsSummary(i vocab.ActivityStreamsSummaryProperty) {
this.ActivityStreamsSummary = i
}
// SetActivityStreamsTag sets the "tag" property.
func (this *ActivityStreamsLike) SetActivityStreamsTag(i vocab.ActivityStreamsTagProperty) {
this.ActivityStreamsTag = i
}
// SetActivityStreamsTarget sets the "target" property.
func (this *ActivityStreamsLike) SetActivityStreamsTarget(i vocab.ActivityStreamsTargetProperty) {
this.ActivityStreamsTarget = i
}
// SetActivityStreamsTo sets the "to" property.
func (this *ActivityStreamsLike) SetActivityStreamsTo(i vocab.ActivityStreamsToProperty) {
this.ActivityStreamsTo = i
}
// SetActivityStreamsUpdated sets the "updated" property.
func (this *ActivityStreamsLike) SetActivityStreamsUpdated(i vocab.ActivityStreamsUpdatedProperty) {
this.ActivityStreamsUpdated = i
}
// SetActivityStreamsUrl sets the "url" property.
func (this *ActivityStreamsLike) SetActivityStreamsUrl(i vocab.ActivityStreamsUrlProperty) {
this.ActivityStreamsUrl = i
}
// SetForgeFedTeam sets the "team" property.
func (this *ActivityStreamsLike) SetForgeFedTeam(i vocab.ForgeFedTeamProperty) {
this.ForgeFedTeam = i
}
// SetForgeFedTicketsTrackedBy sets the "ticketsTrackedBy" property.
func (this *ActivityStreamsLike) SetForgeFedTicketsTrackedBy(i vocab.ForgeFedTicketsTrackedByProperty) {
this.ForgeFedTicketsTrackedBy = i
}
// SetForgeFedTracksTicketsFor sets the "tracksTicketsFor" property.
func (this *ActivityStreamsLike) SetForgeFedTracksTicketsFor(i vocab.ForgeFedTracksTicketsForProperty) {
this.ForgeFedTracksTicketsFor = i
}
// SetJSONLDId sets the "id" property.
func (this *ActivityStreamsLike) SetJSONLDId(i vocab.JSONLDIdProperty) {
this.JSONLDId = i
}
// SetJSONLDType sets the "type" property.
func (this *ActivityStreamsLike) SetJSONLDType(i vocab.JSONLDTypeProperty) {
this.JSONLDType = i
}
// VocabularyURI returns the vocabulary's URI as a string.
func (this ActivityStreamsLike) VocabularyURI() string {
return "https://www.w3.org/ns/activitystreams"
}
// helperJSONLDContext obtains the context uris and their aliases from a property,
// if it is not nil.
func (this ActivityStreamsLike) helperJSONLDContext(i jsonldContexter, toMerge map[string]string) map[string]string {
if i == nil {
return toMerge
}
for k, v := range i.JSONLDContext() {
/*
Since the literal maps in this function are determined at
code-generation time, this loop should not overwrite an existing key with a
new value.
*/
toMerge[k] = v
}
return toMerge
}
| ActivityStreamsLikeExtends |
visit_item_fn.rs | use super::*;
impl<'a> UnstableVisitor<'a> {
pub(super) fn | (&mut self, node: &syn::ItemFn) {
if self.feature.is_unstable(&node.attrs, Some(&node.vis)) {
let attrs = self.feature.strip_attrs(&node.attrs);
self.visit_unstable_item(syn::ItemFn {
attrs: attrs.clone(),
block: Box::new(util::empty_block()),
..node.clone()
});
self.feature.assert_stable().visit_item_fn(&syn::ItemFn {
attrs,
..node.clone()
})
} else {
self.feature.assert_stable().visit_item_fn(node)
}
}
}
| visit_item_fn |
modify_topic_remark.go | package alikafka
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/requests"
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/responses"
)
// ModifyTopicRemark invokes the alikafka.ModifyTopicRemark API synchronously
// api document: https://help.aliyun.com/api/alikafka/modifytopicremark.html
func (client *Client) ModifyTopicRemark(request *ModifyTopicRemarkRequest) (response *ModifyTopicRemarkResponse, err error) {
response = CreateModifyTopicRemarkResponse()
err = client.DoAction(request, response)
return
}
// ModifyTopicRemarkWithChan invokes the alikafka.ModifyTopicRemark API asynchronously
// api document: https://help.aliyun.com/api/alikafka/modifytopicremark.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ModifyTopicRemarkWithChan(request *ModifyTopicRemarkRequest) (<-chan *ModifyTopicRemarkResponse, <-chan error) {
responseChan := make(chan *ModifyTopicRemarkResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.ModifyTopicRemark(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// ModifyTopicRemarkWithCallback invokes the alikafka.ModifyTopicRemark API asynchronously
// api document: https://help.aliyun.com/api/alikafka/modifytopicremark.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ModifyTopicRemarkWithCallback(request *ModifyTopicRemarkRequest, callback func(response *ModifyTopicRemarkResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *ModifyTopicRemarkResponse
var err error
defer close(result)
response, err = client.ModifyTopicRemark(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// ModifyTopicRemarkRequest is the request struct for api ModifyTopicRemark
type ModifyTopicRemarkRequest struct {
*requests.RpcRequest
InstanceId string `position:"Query" name:"InstanceId"`
Topic string `position:"Query" name:"Topic"`
Remark string `position:"Query" name:"Remark"`
}
| // ModifyTopicRemarkResponse is the response struct for api ModifyTopicRemark
type ModifyTopicRemarkResponse struct {
*responses.BaseResponse
Success bool `json:"Success" xml:"Success"`
RequestId string `json:"RequestId" xml:"RequestId"`
Code int `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
}
// CreateModifyTopicRemarkRequest creates a request to invoke ModifyTopicRemark API
func CreateModifyTopicRemarkRequest() (request *ModifyTopicRemarkRequest) {
request = &ModifyTopicRemarkRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("alikafka", "2019-09-16", "ModifyTopicRemark", "alikafka", "openAPI")
request.Method = requests.POST
return
}
// CreateModifyTopicRemarkResponse creates a response to parse from ModifyTopicRemark response
func CreateModifyTopicRemarkResponse() (response *ModifyTopicRemarkResponse) {
response = &ModifyTopicRemarkResponse{
BaseResponse: &responses.BaseResponse{},
}
return
} | |
PuppetLaunchError.ts | constructor(message: string, stack: string, readonly isSandboxError: boolean) {
super(message);
this.stack = stack;
this.name = 'PuppetLaunchError';
}
} | import { IPuppetLaunchError } from '@secret-agent/puppet-interfaces/IPuppetLaunchError';
export default class PuppetLaunchError extends Error implements IPuppetLaunchError { |
|
test_fuzzy_completion.py | from __future__ import unicode_literals
import pytest
@pytest.fixture
def completer():
import mssqlcli.mssqlcompleter as mssqlcompleter
return mssqlcompleter.MssqlCompleter()
def test_ranking_ignores_identifier_quotes(completer):
"""When calculating result rank, identifier quotes should be ignored.
The result ranking algorithm ignores identifier quotes. Without this
correction, the match "user", which Postgres requires to be quoted
since it is also a reserved word, would incorrectly fall below the
match user_action because the literal quotation marks in "user"
alter the position of the match.
This test checks that the fuzzy ranking algorithm correctly ignores
quotation marks when computing match ranks.
"""
text = 'user'
collection = ['user_action', '"user"']
matches = completer.find_matches(text, collection)
assert len(matches) == 2
def test_ranking_based_on_shortest_match(completer):
"""Fuzzy result rank should be based on shortest match.
Result ranking in fuzzy searching is partially based on the length
of matches: shorter matches are considered more relevant than
longer ones. When searching for the text 'user', the length
component of the match 'user_group' could be either 4 ('user') or
7 ('user_gr').
This test checks that the fuzzy ranking algorithm uses the shorter
match when calculating result rank.
"""
text = 'user'
collection = ['api_user', 'user_group']
matches = completer.find_matches(text, collection)
assert matches[1].priority > matches[0].priority
@pytest.mark.parametrize('collection', [
['user_action', 'user'],
['user_group', 'user'],
['user_group', 'user_action'],
])
def test_should_break_ties_using_lexical_order(completer, collection):
|
def test_matching_should_be_case_insensitive(completer):
"""Fuzzy matching should keep matches even if letter casing doesn't match.
This test checks that variations of the text which have different casing
are still matched.
"""
text = 'foo'
collection = ['Foo', 'FOO', 'fOO']
matches = completer.find_matches(text, collection)
assert len(matches) == 3
| """Fuzzy result rank should use lexical order to break ties.
When fuzzy matching, if multiple matches have the same match length and
start position, present them in lexical (rather than arbitrary) order. For
example, if we have tables 'user', 'user_action', and 'user_group', a
search for the text 'user' should present these tables in this order.
The input collections to this test are out of order; each run checks that
the search text 'user' results in the input tables being reordered
lexically.
"""
text = 'user'
matches = completer.find_matches(text, collection)
assert matches[1].priority > matches[0].priority |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.