prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from __future__ import absolute_import
import os
from django import forms
from django.utils.translation import ugettext_lazy as _
from feincms import settings
from . import logger
from .models import Category, MediaFile
from .fields import AdminFileWithPreviewWidget
# ------------------------------------------------------------------------
class MediaCategoryAdminForm(forms.ModelForm):
class Meta:
model = Category
def clean_parent(self):
data = self.cleaned_data['parent']
if data is not None and self.instance in data.path_list():
raise forms.ValidationError(_("This would create a loop in the hierarchy"))
return data
def __init__(self,* args, **kwargs):
super(MediaCategoryAdminForm, self).__init__(*args, **kwargs)
self.fields['parent'].queryset = self.fields['parent'].queryset.exclude(pk=self.instance.pk)
# ------------------------------------------------------------------------
class MediaFileAdminForm(forms.ModelForm):
class Meta:
model = MediaFile
widgets = { 'file': AdminFileWithPreviewWidget }
def __init__(self, *args, **kwargs):
super(MediaFileAdminForm, self).__init__(*args, **kwargs)
if settings.FEINCMS_MEDIAFILE_OVERWRITE and self.instance.id:
if not hasattr(self.instance.file.field, '_feincms_generate_filename_patched'):
orig_generate_filename = self.instance.file.field.generate_filename
def _gen_fname(instance, filename):<|fim▁hole|> if instance.id and hasattr(instance, 'original_name'):
logger.info("Overwriting file %s with new data" % instance.original_name)
instance.file.storage.delete(instance.original_name)
return instance.original_name
return orig_generate_filename(instance, filename)
self.instance.file.field.generate_filename = _gen_fname
self.instance.file.field._feincms_generate_filename_patched = True
def clean_file(self):
if settings.FEINCMS_MEDIAFILE_OVERWRITE and self.instance.id:
new_base, new_ext = os.path.splitext(self.cleaned_data['file'].name)
old_base, old_ext = os.path.splitext(self.instance.file.name)
if new_ext.lower() != old_ext.lower():
raise forms.ValidationError(_("Cannot overwrite with different file type (attempt to overwrite a %(old_ext)s with a %(new_ext)s)") % { 'old_ext': old_ext, 'new_ext': new_ext })
self.instance.original_name = self.instance.file.name
return self.cleaned_data['file']
# ------------------------------------------------------------------------<|fim▁end|>
| |
<|file_name|>my_elbow_angle_tcr_imgt.py<|end_file_name|><|fim▁begin|>'''
More information at: http://www.pymolwiki.org/index.php/elbow_angle
Calculate the elbow angle of an antibody Fab complex and optionally draw a
graphical representation of the vectors used to determine the angle.
NOTE: There is no automatic checking of the validity of limit_l and limit_h
values or of the assignment of light and heavy chain IDs. If these are entered
incorrectly or omitted, the reported angle will likely be incorrect.
As always with these things, your mileage may vary. Use at your own risk!
REQUIREMENTS
numpy, version 1.6
http://numpy.scipy.org
transformations.py, version 2012.01.01
by Christoph Gohlke
www.lfd.uci.edu/~gohlke/code
May also require an edit to transformations.py:
Changes `1e-8` to `1e-7` in lines 357 & 363 to avoid a numerical error.
com.py
by Jason Vertrees
http://www.pymolwiki.org/index.php/com
'''
__author__ = 'Jared Sampson'
__version__ = '0.1'
from pymol import cmd
import transformations
import com
import numpy
################################################################################
def calc_super_matrix(mobile,static):
'''
DESCRIPTION
Aligns two objects (or selections), returns the transformation matrix,
and resets the matrix of the mobile object.
Uses CEAlign PyMOL function for alignment.
ARGUMENTS
mobile = string: selection describing the mobile object whose rotation
matrix will be reported
static = string: selection describing the static object onto which the
mobile object will be aligned
REQUIRES: numpy
'''
cmd.cealign(static,mobile)
# cmd.super(mobile,static)
T = cmd.get_object_matrix(mobile)
R = numpy.identity(4)
k=0
for i in range (0,4):
for j in range (0,4):
R[i][j] = T[k]
k+=1
return R
################################################################################
#def elbow_angle(obj,light='L',heavy='H',limit_l=110,limit_h=113,draw=1):
# alpha = light, beta = heavy
# def elbow_angle(obj,light,heavy,limit_l=128,limit_h=127,draw=0):
def elbow_angle(obj,heavy,light,limit_h="1001E",limit_l=1001,draw=0):
"""
DESCRIPTION
Calculates the integer elbow angle of an antibody Fab complex and
optionally draws a graphical representation of the vectors used to
determine the angle.
ARGUMENTS
obj = string: object
light/heavy = strings: chain ID of light and heavy chains, respectively
limit_l/limit_h = integers: residue numbers of the last residue in the
light and heavy chain variable domains, respectively
draw = boolean: Choose whether or not to draw the angle visualization<|fim▁hole|>
"""
# store current view
orig_view = cmd.get_view()
#limit_l = int(limit_l)
#limit_h = int(limit_h)
draw = int(draw)
# for temp object names
tmp_prefix = "tmp_elbow_"
prefix = tmp_prefix + obj + '_'
# names
vl = prefix + 'VL'
vh = prefix + 'VH'
cl = prefix + 'CL'
ch = prefix + 'CH'
# selections
vl_sel = 'polymer and %s and chain %s and resi 1-%i' % (obj, light, limit_l)
vh_sel = 'polymer and %s and chain %s and resi 1-%s & !resi 1001D & !resi 1001C & !resi 1001B & !resi 1001A & !resi 1001' % (obj, heavy, limit_h)
cl_sel = 'polymer and %s and chain %s and not resi 1-%i' % (obj, light, limit_l)
#ch_sel = 'polymer and %s and chain %s and not resi 1-%i' % (obj, heavy, limit_h)
ch_sel = 'polymer and %s and chain %s and not resi 1-127 and not resi 1001D and not resi 1001C and not resi 1001B and not resi 1001A and not resi 1001' % (obj, heavy)
v_sel = '(('+vl_sel+') or ('+vh_sel+'))'
c_sel = '(('+cl_sel+') or ('+ch_sel+'))'
# create temp objects
cmd.create(vl,vl_sel)
cmd.create(vh,vh_sel)
cmd.create(cl,cl_sel)
cmd.create(ch,ch_sel)
# superimpose vl onto vh, calculate axis and angle
Rv = calc_super_matrix(vl,vh)
angle_v,direction_v,point_v = transformations.rotation_from_matrix(Rv)
# superimpose cl onto ch, calculate axis and angle
Rc = calc_super_matrix(cl,ch)
angle_c,direction_c,point_c = transformations.rotation_from_matrix(Rc)
# delete temporary objects
cmd.delete(vl)
cmd.delete(vh)
cmd.delete(cl)
cmd.delete(ch)
# if dot product is positive, angle is acute
if (numpy.dot(direction_v,direction_c)>0):
direction_c = direction_c * -1 # ensure angle is > 90 (need to standardize this)
# TODO: make both directions point away from the elbow axis.
elbow = int(numpy.degrees(numpy.arccos(numpy.dot(direction_v,direction_c))))
# while (elbow < 90):
# elbow = 180 - elbow # limit to physically reasonable range
# compare the direction_v and direction_c axes to the vector defined by
# the C-heavy atoms of limit_l and limit_h of the original fab
hinge_l_sel = "%s//%s/%s/CA" % (obj,light,limit_l)
hinge_h_sel = "%s//%s/%s/CA" % (obj,heavy,limit_h)
hinge_l = cmd.get_atom_coords(hinge_l_sel)
hinge_h = cmd.get_atom_coords(hinge_h_sel)
hinge_vec = numpy.array(hinge_h) - numpy.array(hinge_l)
test = numpy.dot(hinge_vec,numpy.cross(direction_v,direction_c))
if (test > 0):
elbow = 360 - elbow
#print " Elbow angle: %i degrees" % elbow
if (draw==1):
# there is probably a more elegant way to do this, but
# it works so I'm not going to mess with it for now
pre = obj+'_elbow_'
# draw hinge vector
cmd.pseudoatom(pre+"hinge_l",pos=hinge_l)
cmd.pseudoatom(pre+"hinge_h",pos=hinge_h)
cmd.distance(pre+"hinge_vec",pre+"hinge_l",pre+"hinge_h")
cmd.set("dash_gap",0)
# draw the variable domain axis
com_v = com.COM(v_sel)
start_v = [a - 10*b for a, b in zip(com_v, direction_v)]
end_v = [a + 10*b for a, b in zip(com_v, direction_v)]
cmd.pseudoatom(pre+"start_v",pos=start_v)
cmd.pseudoatom(pre+"end_v",pos=end_v)
cmd.distance(pre+"v_vec",pre+"start_v",pre+"end_v")
# draw the constant domain axis
com_c = com.COM(c_sel)
start_c = [a - 10*b for a, b in zip(com_c, direction_c)]
end_c = [a + 10*b for a, b in zip(com_c, direction_c)]
cmd.pseudoatom(pre+"start_c",pos=start_c)
cmd.pseudoatom(pre+"end_c",pos=end_c)
cmd.distance(pre+"c_vec",pre+"start_c",pre+"end_c")
# customize appearance
cmd.hide("labels",pre+"hinge_vec");cmd.hide("labels",pre+"v_vec");cmd.hide("labels",pre+"c_vec");
cmd.color("green",pre+"hinge_l");cmd.color("red",pre+"hinge_h");cmd.color("black",pre+"hinge_vec");
cmd.color("black",pre+"start_v");cmd.color("black",pre+"end_v");cmd.color("black",pre+"v_vec");
cmd.color("black",pre+"start_c");cmd.color("black",pre+"end_c");cmd.color("black",pre+"c_vec")
# draw spheres
cmd.show("spheres",pre+"hinge_l or "+pre+"hinge_h")
cmd.show("spheres",pre+"start_v or "+pre+"start_c")
cmd.show("spheres",pre+"end_v or "+pre+"end_c")
cmd.set("sphere_scale",2)
cmd.set("dash_gap",0,pre+"hinge_vec")
cmd.set("dash_width",5)
cmd.set("dash_radius",0.3)
# group drawing objects
cmd.group(pre,pre+"*")
# restore original view
cmd.set_view(orig_view)
return elbow
def setup_antibody():
my_struc = cmd.load("1mhp_ch.pdb")
my_elbow = elbow_angle(my_struc)
print(my_elbow)
return 0<|fim▁end|>
|
REQUIRES: com.py, transformations.py, numpy (see above)
|
<|file_name|>Icon.js<|end_file_name|><|fim▁begin|>let _ = require('underscore'),
React = require('react');
class Icon extends React.Component {
render() {
let className = "icon " + this.props.icon;
let other = _.omit(this.props.icon, "icon");
return (
<span className={className} role="img" {...other}></span>
);
}
}
Icon.propTypes = {
icon: React.PropTypes.string.isRequired
<|fim▁hole|>};
module.exports = Icon;<|fim▁end|>
| |
<|file_name|>bench_setup.rs<|end_file_name|><|fim▁begin|>use froggy::Pointer;
/// Entities with velocity and position component.
pub const N_POS_VEL: usize = 5_000;
/// Entities with position component only.
pub const N_POS: usize = 15_000;
pub struct Position {
pub x: f32,<|fim▁hole|>#[allow(dead_code)]
pub struct Velocity {
pub dx: f32,
pub dy: f32,
pub writes: Pointer<Position>,
}<|fim▁end|>
|
pub y: f32,
}
|
<|file_name|>E0516.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {<|fim▁hole|><|fim▁end|>
|
let x: typeof(92) = 92; //~ ERROR E0516
//~| reserved keyword
}
|
<|file_name|>resources.py<|end_file_name|><|fim▁begin|>from __future__ import with_statement
import sys
import logging
import warnings
import django
from django.conf import settings
try:
from django.conf.urls import patterns, url
except ImportError: # Django < 1.4
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.core.signals import got_request_exception
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse, Unauthorized
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# copycompat deprecated in Django 1.5. If python version is at least 2.5, it
# is safe to use the native python copy module.
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
if sys.version_info >= (2,5):
try:
from copy import deepcopy
except ImportError:
from django.utils.copycompat import deepcopy
else:
# For python older than 2.5, we must be running a version of Django before
# copycompat was deprecated.
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
# Django 1.5 has moved this constant up one level.
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
data = {"error": e.args[0] if getattr(e, 'args') else ''}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except ValidationError, e:
data = {"error": e.messages}
return self.error_response(request, data, response_class=http.HttpBadRequest)
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
return self.error_response(request, data, response_class=response_class)
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=True,
extra={'status_code': response_code, 'request': request})
if django.VERSION < (1, 3, 0):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Send the signal so other apps are aware of the exception.
got_request_exception.send(self.__class__, request=request)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
return self.error_response(request, data, response_class=response_class)
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
overridden_urls = self.override_urls()
if overridden_urls:
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += overridden_urls
urls += self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])<|fim▁hole|>
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(str.upper, allowed))
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpTooManyRequests())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def unauthorized_result(self, exception):
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def authorized_read_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_read_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to GET this resource.
"""
try:
auth_result = self._meta.authorization.read_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_create_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_create_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to POST this resource.
"""
try:
auth_result = self._meta.authorization.create_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_update_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_update_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
try:
auth_result = self._meta.authorization.update_detail(object_list, bundle)
if not auth_result is True:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_list(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_list(object_list, bundle)
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def authorized_delete_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to DELETE this resource.
"""
try:
auth_result = self._meta.authorization.delete_detail(object_list, bundle)
if not auth_result:
raise Unauthorized()
except Unauthorized, e:
self.unauthorized_result(e)
return auth_result
def build_bundle(self, obj=None, data=None, request=None, objects_saved=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(
obj=obj,
data=data,
request=request,
objects_saved=objects_saved
)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix)-1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
bundle = self.build_bundle(request=request)
return self.obj_get(bundle=bundle, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle, for_list=False):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
use_in = ['all', 'list' if for_list else 'detail']
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# If it's not for use in this mode, skip
field_use_in = getattr(field_object, 'use_in', 'all')
if callable(field_use_in):
if not field_use_in(bundle):
continue
else:
if field_use_in not in use_in:
continue
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle, for_list=for_list)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
# NOTE: A bug fix in Django (ticket #18153) fixes incorrect behavior
# which Tastypie was relying on. To fix this, we store value.obj to
# be saved later in save_related.
try:
setattr(bundle.obj, field_object.attribute, value.obj)
except (ValueError, ObjectDoesNotExist):
bundle.related_objects_to_save[field_object.attribute] = value.obj
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
if field_object.dehydrated_type == 'related':
if getattr(field_object, 'is_m2m', False):
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Deprecated.
FIXME: REMOVE BEFORE 1.0
"""
return self._meta.authorization.apply_limits(request, object_list)
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, bundle, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, bundle, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, bundle, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, bundle, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
cached_bundle = self._meta.cache.get(cache_key)
if cached_bundle is None:
cached_bundle = self.obj_get(bundle=bundle, **kwargs)
self._meta.cache.set(cache_key, cached_bundle)
return cached_bundle
def obj_create(self, bundle, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, bundle, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
Deletes an entire list of objects, specific to PUT list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, bundle, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, request, errors, response_class=None):
"""
Extracts the common "which-format/serialize/return-error-response"
cycle.
Should be used as much as possible to return errors.
"""
if response_class is None:
response_class = http.HttpBadRequest
desired_format = None
if request:
if request.GET.get('callback', None) is None:
try:
desired_format = self.determine_format(request)
except BadRequest:
pass # Fall through to default handler below
else:
# JSONP can cause extra breakage.
desired_format = 'application/json'
if not desired_format:
desired_format = self._meta.default_format
try:
serialized = self.serialize(request, errors, desired_format)
except BadRequest, e:
error = "Additional errors occurred, but serialization of those errors failed."
if settings.DEBUG:
error += " %s" % e
return response_class(content=error, content_type='text/plain')
return response_class(content=serialized, content_type=build_content_type(desired_format))
def is_valid(self, bundle):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, bundle.request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = []
for obj in to_be_serialized[self._meta.collection_name]:
bundle = self.build_bundle(obj=obj, request=request)
bundles.append(self.full_dehydrate(bundle, for_list=True))
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
basic_bundle = self.build_bundle(request=request)
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not self._meta.collection_name in deserialized:
raise BadRequest("Invalid data sent.")
basic_bundle = self.build_bundle(request=request)
self.obj_delete_list_for_update(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle, for_list=True) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle=bundle, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
bundle = self.build_bundle(request=request)
self.obj_delete_list(bundle=bundle, request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
# Manually construct the bundle here, since we don't want to try to
# delete an empty instance.
bundle = Bundle(request=request)
try:
self.obj_delete(bundle=bundle, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
bundles_seen = []
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle=bundle)
bundles_seen.append(bundle)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
bundle = self.build_bundle(obj=obj, request=request)
self.obj_delete(bundle=bundle)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
to_be_serialized = {}
to_be_serialized['objects'] = [self.full_dehydrate(bundle, for_list=True) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
basic_bundle = self.build_bundle(request=request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(bundle=original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
bundle = self.build_bundle(request=request)
self.authorized_read_detail(self.get_object_list(bundle.request), bundle)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
base_bundle = self.build_bundle(request=request)
for identifier in obj_identifiers:
try:
obj = self.obj_get(bundle=base_bundle, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle, for_list=True)
objects.append(bundle)
except (ObjectDoesNotExist, Unauthorized):
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
if value in ['true', 'True', True]:
value = True
elif value in ['false', 'False', False]:
value = False
elif value in ('nil', 'none', 'None', None):
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
if hasattr(self._meta.queryset.query.query_terms, 'keys'):
# Django 1.4 & below compatibility.
query_terms = self._meta.queryset.query.query_terms.keys()
else:
# Django 1.5+.
query_terms = self._meta.queryset.query.query_terms
else:
if hasattr(QUERY_TERMS, 'keys'):
# Django 1.4 & below compatibility.
query_terms = QUERY_TERMS.keys()
else:
# Django 1.5+.
query_terms = QUERY_TERMS
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(bundle.request, 'GET'):
# Grab a mutable copy.
filters = bundle.request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
objects = self.apply_filters(bundle.request, applicable_filters)
return self.authorized_read_list(objects, bundle)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
object_list = self.get_object_list(bundle.request).filter(**kwargs)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
bundle.obj = object_list[0]
self.authorized_read_detail(object_list, bundle)
return bundle.obj
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
bundle = self.full_hydrate(bundle)
return self.save(bundle)
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or getattr(field_object, 'is_related', False):
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not self.get_bundle_detail_data(bundle):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle=bundle, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
return self.save(bundle, skip_errors=skip_errors)
def obj_delete_list(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_delete_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete_list_for_update(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list_for_update``.
"""
objects_to_delete = self.obj_get_list(bundle=bundle, **kwargs)
deletable_objects = self.authorized_update_list(objects_to_delete, bundle)
if hasattr(deletable_objects, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
deletable_objects.delete()
else:
for authed_obj in deletable_objects:
authed_obj.delete()
def obj_delete(self, bundle, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
if not hasattr(bundle.obj, 'delete'):
try:
bundle.obj = self.obj_get(bundle=bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self.authorized_delete_detail(self.get_object_list(bundle.request), bundle)
bundle.obj.delete()
@transaction.commit_on_success()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def create_identifier(self, obj):
return u"%s.%s.%s" % (obj._meta.app_label, obj._meta.module_name, obj.pk)
def save(self, bundle, skip_errors=False):
self.is_valid(bundle)
if bundle.errors and not skip_errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
# Check if they're authorized.
if bundle.obj.pk:
self.authorized_update_detail(self.get_object_list(bundle.request), bundle)
else:
self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
bundle.objects_saved.add(self.create_identifier(bundle.obj))
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and not bundle.data.has_key(field_name):
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = bundle.related_objects_to_save.get(field_object.attribute, None)
# Because sometimes it's ``None`` & that's OK.
if related_obj:
if field_object.related_name:
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_resource = field_object.get_related_resource(related_obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
obj_id = self.create_identifier(related_obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
if bundle.data.get(field_name) and hasattr(bundle.data[field_name], 'keys'):
# Only build & save if there's data, not just a URI.
related_bundle = related_resource.build_bundle(
obj=related_obj,
data=bundle.data.get(field_name),
request=bundle.request,
objects_saved=bundle.objects_saved
)
related_resource.save(related_bundle)
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, basestring):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# FIXME: Dupe the original bundle, copy in the new object &
# check the perms on that (using the related resource)?
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_resource = field_object.get_related_resource(bundle.obj)
# Before we build the bundle & try saving it, let's make sure we
# haven't already saved it.
obj_id = self.create_identifier(related_bundle.obj)
if obj_id in bundle.objects_saved:
# It's already been saved. We're done here.
continue
# Only build & save if there's data, not just a URI.
updated_related_bundle = related_resource.build_bundle(
obj=related_bundle.obj,
data=related_bundle.data,
request=bundle.request,
objects_saved=bundle.objects_saved
)
#Only save related models if they're newly added.
if updated_related_bundle.obj._state.adding:
related_resource.save(updated_related_bundle)
related_objs.append(updated_related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')<|fim▁end|>
|
except KeyError:
pass
return kwargs_subset
|
<|file_name|>PersistentConnection.d.ts<|end_file_name|><|fim▁begin|>import { ServerActions } from './ServerActions';
import { AuthTokenProvider } from './AuthTokenProvider';
import { RepoInfo } from './RepoInfo';
import { Query } from '../api/Query';
/**
* Firebase connection. Abstracts wire protocol and handles reconnecting.
*
* NOTE: All JSON objects sent to the realtime connection must have property names enclosed
* in quotes to make sure the closure compiler does not minify them.
*/
export declare class PersistentConnection extends ServerActions {
private repoInfo_;
private onDataUpdate_;
private onConnectStatus_;
private onServerInfoUpdate_;
private authTokenProvider_;
private authOverride_;
id: number;
private log_;
/** @private {Object} */
private interruptReasons_;
private listens_;
private outstandingPuts_;
private outstandingPutCount_;
private onDisconnectRequestQueue_;
private connected_;
private reconnectDelay_;
private maxReconnectDelay_;
private securityDebugCallback_;
lastSessionId: string | null;
/** @private {number|null} */
private establishConnectionTimer_;
/** @private {boolean} */
private visible_;
private requestCBHash_;
private requestNumber_;
/** @private {?{
* sendRequest(Object),
* close()
* }} */
private realtime_;
/** @private {string|null} */
private authToken_;
private forceTokenRefresh_;
private invalidAuthTokenCount_;
private firstConnection_;
private lastConnectionAttemptTime_;
private lastConnectionEstablishedTime_;
<|fim▁hole|> private static nextPersistentConnectionId_;
/**
* Counter for number of connections created. Mainly used for tagging in the logs
* @type {number}
* @private
*/
private static nextConnectionId_;
/**
* @implements {ServerActions}
* @param {!RepoInfo} repoInfo_ Data about the namespace we are connecting to
* @param {function(string, *, boolean, ?number)} onDataUpdate_ A callback for new data from the server
* @param onConnectStatus_
* @param onServerInfoUpdate_
* @param authTokenProvider_
* @param authOverride_
*/
constructor(repoInfo_: RepoInfo, onDataUpdate_: (a: string, b: any, c: boolean, d: number | null) => void, onConnectStatus_: (a: boolean) => void, onServerInfoUpdate_: (a: any) => void, authTokenProvider_: AuthTokenProvider, authOverride_?: Object | null);
/**
* @param {!string} action
* @param {*} body
* @param {function(*)=} onResponse
* @protected
*/
protected sendRequest(action: string, body: any, onResponse?: (a: any) => void): void;
/**
* @inheritDoc
*/
listen(query: Query, currentHashFn: () => string, tag: number | null, onComplete: (a: string, b: any) => void): void;
/**
* @param {!{onComplete(),
* hashFn():!string,
* query: !Query,
* tag: ?number}} listenSpec
* @private
*/
private sendListen_(listenSpec);
/**
* @param {*} payload
* @param {!Query} query
* @private
*/
private static warnOnListenWarnings_(payload, query);
/**
* @inheritDoc
*/
refreshAuthToken(token: string): void;
/**
* @param {!string} credential
* @private
*/
private reduceReconnectDelayIfAdminCredential_(credential);
/**
* Attempts to authenticate with the given credentials. If the authentication attempt fails, it's triggered like
* a auth revoked (the connection is closed).
*/
tryAuth(): void;
/**
* @inheritDoc
*/
unlisten(query: Query, tag: number | null): void;
private sendUnlisten_(pathString, queryId, queryObj, tag);
/**
* @inheritDoc
*/
onDisconnectPut(pathString: string, data: any, onComplete?: (a: string, b: string) => void): void;
/**
* @inheritDoc
*/
onDisconnectMerge(pathString: string, data: any, onComplete?: (a: string, b: string) => void): void;
/**
* @inheritDoc
*/
onDisconnectCancel(pathString: string, onComplete?: (a: string, b: string) => void): void;
private sendOnDisconnect_(action, pathString, data, onComplete);
/**
* @inheritDoc
*/
put(pathString: string, data: any, onComplete?: (a: string, b: string) => void, hash?: string): void;
/**
* @inheritDoc
*/
merge(pathString: string, data: any, onComplete: (a: string, b: string | null) => void, hash?: string): void;
putInternal(action: string, pathString: string, data: any, onComplete: (a: string, b: string | null) => void, hash?: string): void;
private sendPut_(index);
/**
* @inheritDoc
*/
reportStats(stats: {
[k: string]: any;
}): void;
/**
* @param {*} message
* @private
*/
private onDataMessage_(message);
private onDataPush_(action, body);
private onReady_(timestamp, sessionId);
private scheduleConnect_(timeout);
/**
* @param {boolean} visible
* @private
*/
private onVisible_(visible);
private onOnline_(online);
private onRealtimeDisconnect_();
private establishConnection_();
/**
* @param {string} reason
*/
interrupt(reason: string): void;
/**
* @param {string} reason
*/
resume(reason: string): void;
private handleTimestamp_(timestamp);
private cancelSentTransactions_();
/**
* @param {!string} pathString
* @param {Array.<*>=} query
* @private
*/
private onListenRevoked_(pathString, query?);
/**
* @param {!string} pathString
* @param {!string} queryId
* @return {{queries:Array.<Query>, onComplete:function(string)}}
* @private
*/
private removeListen_(pathString, queryId);
private onAuthRevoked_(statusCode, explanation);
private onSecurityDebugPacket_(body);
private restoreState_();
/**
* Sends client stats for first connection
* @private
*/
private sendConnectStats_();
/**
* @return {boolean}
* @private
*/
private shouldReconnect_();
}<|fim▁end|>
|
/**
* @private
*/
|
<|file_name|>rm.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like rm command for cloud storage providers."""
from __future__ import absolute_import
import time
from gslib.cloud_api import BucketNotFoundException
from gslib.cloud_api import NotEmptyException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import ServiceException
from gslib.command import Command
from gslib.command import DecrementFailureCount
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.exception import NO_URLS_MATCHED_GENERIC
from gslib.exception import NO_URLS_MATCHED_TARGET
from gslib.name_expansion import NameExpansionIterator
from gslib.name_expansion import SeekAheadNameExpansionIterator
from gslib.parallelism_framework_util import PutToQueueWithTimeout
from gslib.storage_url import StorageUrlFromString
from gslib.thread_message import MetadataMessage
from gslib.translation_helper import PreconditionsFromHeaders
from gslib.util import GetCloudApiInstance
from gslib.util import NO_MAX
from gslib.util import Retry
from gslib.util import StdinIterator
_SYNOPSIS = """
gsutil rm [-f] [-r] url...
gsutil rm [-f] [-r] -I
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The gsutil rm command removes objects.
For example, the command:
gsutil rm gs://bucket/subdir/*
will remove all objects in gs://bucket/subdir, but not in any of its
sub-directories. In contrast:
gsutil rm gs://bucket/subdir/**
will remove all objects under gs://bucket/subdir or any of its
subdirectories.
You can also use the -r option to specify recursive object deletion. Thus, for
example, either of the following two commands will remove gs://bucket/subdir
and all objects and subdirectories under it:
gsutil rm gs://bucket/subdir**
gsutil rm -r gs://bucket/subdir
The -r option will also delete all object versions in the subdirectory for
versioning-enabled buckets, whereas the ** command will only delete the live
version of each object in the subdirectory.
Running gsutil rm -r on a bucket will delete all versions of all objects in
the bucket, and then delete the bucket:
gsutil rm -r gs://bucket
If you want to delete all objects in the bucket, but not the bucket itself,
this command will work:
gsutil rm gs://bucket/**
If you have a large number of objects to remove you might want to use the
gsutil -m option, to perform parallel (multi-threaded/multi-processing)
removes:
gsutil -m rm -r gs://my_bucket/subdir
You can pass a list of URLs (one per line) to remove on stdin instead of as
command line arguments by using the -I option. This allows you to use gsutil
in a pipeline to remove objects identified by a program, such as:
some_program | gsutil -m rm -I
The contents of stdin can name cloud URLs and wildcards of cloud URLs.
Note that gsutil rm will refuse to remove files from the local
file system. For example this will fail:
gsutil rm *.txt
WARNING: Object removal cannot be undone. Google Cloud Storage is designed
to give developers a high amount of flexibility and control over their data,
and Google maintains strict controls over the processing and purging of
deleted data. To protect yourself from mistakes, you can configure object
versioning on your bucket(s). See 'gsutil help versions' for details.
<B>DATA RESTORATION FROM ACCIDENTAL DELETION OR OVERWRITES</B>
Google Cloud Storage does not provide support for restoring data lost
or overwritten due to customer errors. If you have concerns that your
application software (or your users) may at some point erroneously delete or
overwrite data, you can protect yourself from that risk by enabling Object
Versioning (see "gsutil help versioning"). Doing so increases storage costs,
which can be partially mitigated by configuring Lifecycle Management to delete
older object versions (see "gsutil help lifecycle").
<B>OPTIONS</B>
-f Continues silently (without printing error messages) despite
errors when removing multiple objects. If some of the objects
could not be removed, gsutil's exit status will be non-zero even
if this flag is set. Execution will still halt if an inaccessible
bucket is encountered. This option is implicitly set when running
"gsutil -m rm ...".
-I Causes gsutil to read the list of objects to remove from stdin.
This allows you to run a program that generates the list of
objects to remove.
-R, -r The -R and -r options are synonymous. Causes bucket or bucket
subdirectory contents (all objects and subdirectories that it
contains) to be removed recursively. If used with a bucket-only
URL (like gs://bucket), after deleting objects and subdirectories
gsutil will delete the bucket. This option implies the -a option
and will delete all object versions.
-a Delete all versions of an object.
""")
def _RemoveExceptionHandler(cls, e):
"""Simple exception handler to allow post-completion status."""
if not cls.continue_on_error:
cls.logger.error(str(e))
# TODO: Use shared state to track missing bucket names when we get a
# BucketNotFoundException. Then improve bucket removal logic and exception
# messages.
if isinstance(e, BucketNotFoundException):
cls.bucket_not_found_count += 1
cls.logger.error(str(e))
else:
if _ExceptionMatchesBucketToDelete(cls.bucket_strings_to_delete, e):
DecrementFailureCount()
else:
cls.op_failure_count += 1
# pylint: disable=unused-argument
def _RemoveFoldersExceptionHandler(cls, e):
"""When removing folders, we don't mind if none exist."""
if ((isinstance(e, CommandException) and
NO_URLS_MATCHED_GENERIC in e.reason)
or isinstance(e, NotFoundException)):
DecrementFailureCount()
else:
raise e
def _RemoveFuncWrapper(cls, name_expansion_result, thread_state=None):
cls.RemoveFunc(name_expansion_result, thread_state=thread_state)
def _ExceptionMatchesBucketToDelete(bucket_strings_to_delete, e):
"""Returns True if the exception matches a bucket slated for deletion.
A recursive delete call on an empty bucket will raise an exception when
listing its objects, but if we plan to delete the bucket that shouldn't
result in a user-visible error.
Args:
bucket_strings_to_delete: Buckets slated for recursive deletion.
e: Exception to check.
Returns:
True if the exception was a no-URLs-matched exception and it matched
one of bucket_strings_to_delete, None otherwise.
"""
if bucket_strings_to_delete:
msg = NO_URLS_MATCHED_TARGET % ''
if msg in str(e):
parts = str(e).split(msg)
return len(parts) == 2 and parts[1] in bucket_strings_to_delete
class RmCommand(Command):
"""Implementation of gsutil rm command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'rm',
command_name_aliases=['del', 'delete', 'remove'],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='afIrR',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='rm',
help_name_aliases=['del', 'delete', 'remove'],
help_type='command_help',
help_one_line_summary='Remove objects',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def RunCommand(self):
"""Command entry point for the rm command."""
# self.recursion_requested is initialized in command.py (so it can be
# checked in parent class for all commands).
self.continue_on_error = self.parallel_operations
self.read_args_from_stdin = False
self.all_versions = False
if self.sub_opts:
for o, unused_a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-f':
self.continue_on_error = True
elif o == '-I':
self.read_args_from_stdin = True
elif o == '-r' or o == '-R':
self.recursion_requested = True
self.all_versions = True
if self.read_args_from_stdin:
if self.args:
raise CommandException('No arguments allowed with the -I flag.')
url_strs = StdinIterator()
else:
if not self.args:
raise CommandException('The rm command (without -I) expects at '
'least one URL.')
url_strs = self.args
# Tracks number of object deletes that failed.
self.op_failure_count = 0
<|fim▁hole|> self.bucket_not_found_count = 0
# Tracks buckets that are slated for recursive deletion.
bucket_urls_to_delete = []
self.bucket_strings_to_delete = []
if self.recursion_requested:
bucket_fields = ['id']
for url_str in url_strs:
url = StorageUrlFromString(url_str)
if url.IsBucket() or url.IsProvider():
for blr in self.WildcardIterator(url_str).IterBuckets(
bucket_fields=bucket_fields):
bucket_urls_to_delete.append(blr.storage_url)
self.bucket_strings_to_delete.append(url_str)
self.preconditions = PreconditionsFromHeaders(self.headers or {})
try:
# Expand wildcards, dirs, buckets, and bucket subdirs in URLs.
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug, self.logger, self.gsutil_api,
url_strs, self.recursion_requested, project_id=self.project_id,
all_versions=self.all_versions,
continue_on_error=self.continue_on_error or self.parallel_operations)
seek_ahead_iterator = None
# Cannot seek ahead with stdin args, since we can only iterate them
# once without buffering in memory.
if not self.read_args_from_stdin:
seek_ahead_iterator = SeekAheadNameExpansionIterator(
self.command_name, self.debug, self.GetSeekAheadGsutilApi(),
url_strs, self.recursion_requested,
all_versions=self.all_versions, project_id=self.project_id)
# Perform remove requests in parallel (-m) mode, if requested, using
# configured number of parallel processes and threads. Otherwise,
# perform requests with sequential function calls in current process.
self.Apply(_RemoveFuncWrapper, name_expansion_iterator,
_RemoveExceptionHandler,
fail_on_error=(not self.continue_on_error),
shared_attrs=['op_failure_count', 'bucket_not_found_count'],
seek_ahead_iterator=seek_ahead_iterator)
# Assuming the bucket has versioning enabled, url's that don't map to
# objects should throw an error even with all_versions, since the prior
# round of deletes only sends objects to a history table.
# This assumption that rm -a is only called for versioned buckets should be
# corrected, but the fix is non-trivial.
except CommandException as e:
# Don't raise if there are buckets to delete -- it's valid to say:
# gsutil rm -r gs://some_bucket
# if the bucket is empty.
if _ExceptionMatchesBucketToDelete(self.bucket_strings_to_delete, e):
DecrementFailureCount()
else:
raise
except ServiceException, e:
if not self.continue_on_error:
raise
if self.bucket_not_found_count:
raise CommandException('Encountered non-existent bucket during listing')
if self.op_failure_count and not self.continue_on_error:
raise CommandException('Some files could not be removed.')
# If this was a gsutil rm -r command covering any bucket subdirs,
# remove any dir_$folder$ objects (which are created by various web UI
# tools to simulate folders).
if self.recursion_requested:
folder_object_wildcards = []
for url_str in url_strs:
url = StorageUrlFromString(url_str)
if url.IsObject():
folder_object_wildcards.append('%s**_$folder$' % url_str)
if folder_object_wildcards:
self.continue_on_error = True
try:
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug,
self.logger, self.gsutil_api,
folder_object_wildcards, self.recursion_requested,
project_id=self.project_id,
all_versions=self.all_versions)
# When we're removing folder objects, always continue on error
self.Apply(_RemoveFuncWrapper, name_expansion_iterator,
_RemoveFoldersExceptionHandler,
fail_on_error=False)
except CommandException as e:
# Ignore exception from name expansion due to an absent folder file.
if not e.reason.startswith(NO_URLS_MATCHED_GENERIC):
raise
# Now that all data has been deleted, delete any bucket URLs.
for url in bucket_urls_to_delete:
self.logger.info('Removing %s...', url)
@Retry(NotEmptyException, tries=3, timeout_secs=1)
def BucketDeleteWithRetry():
self.gsutil_api.DeleteBucket(url.bucket_name, provider=url.scheme)
BucketDeleteWithRetry()
if self.op_failure_count:
plural_str = 's' if self.op_failure_count else ''
raise CommandException('%d file%s/object%s could not be removed.' % (
self.op_failure_count, plural_str, plural_str))
return 0
def RemoveFunc(self, name_expansion_result, thread_state=None):
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
exp_src_url = name_expansion_result.expanded_storage_url
self.logger.info('Removing %s...', exp_src_url)
gsutil_api.DeleteObject(
exp_src_url.bucket_name, exp_src_url.object_name,
preconditions=self.preconditions, generation=exp_src_url.generation,
provider=exp_src_url.scheme)
PutToQueueWithTimeout(gsutil_api.status_queue,
MetadataMessage(message_time=time.time()))<|fim▁end|>
|
# Tracks if any buckets were missing.
|
<|file_name|>images.js<|end_file_name|><|fim▁begin|>var request = require("request");
var AuthDetails = require("../../auth.json");
exports.commands = [
"image", //gives top image from google search
"rimage", //gives random image from google search
"ggif" //gives random gif from google search
];
exports.image = {
usage: "<search query>",
description: "gets the top matching image from google",
process: function(bot, msg, args) {
if (!AuthDetails || !AuthDetails.youtube_api_key || !AuthDetails.google_custom_search) {
bot.sendMessage(msg.channel, "Image search requires both a YouTube API key and a Google Custom Search key!");
return;
}
//gets us a random result in first 5 pages
var page = 1; //we request 10 items
request("https://www.googleapis.com/customsearch/v1?key=" + AuthDetails.youtube_api_key + "&cx=" + AuthDetails.google_custom_search + "&q=" + (args.replace(/\s/g, '+')) + "&searchType=image&alt=json&num=10&start=" + page, function(err, res, body) {
var data, error;
try {
data = JSON.parse(body);
} catch (error) {
console.log(error)
return;
}
if (!data) {
console.log(data);
bot.sendMessage(msg.channel, "Error:\n" + JSON.stringify(data));
return;
} else if (!data.items || data.items.length == 0) {
console.log(data);
bot.sendMessage(msg.channel, "No result for '" + args + "'");
return;
}
var randResult = data.items[0];
bot.sendMessage(msg.channel, randResult.title + '\n' + randResult.link);
});
}<|fim▁hole|>exports.rimage = {
usage: "<search query>",
description: "gets a random image matching tags from google",
process: function(bot, msg, args) {
if (!AuthDetails || !AuthDetails.youtube_api_key || !AuthDetails.google_custom_search) {
bot.sendMessage(msg.channel, "Image search requires both a YouTube API key and a Google Custom Search key!");
return;
}
//gets us a random result in first 5 pages
var page = 1 + Math.floor(Math.random() * 5) * 10; //we request 10 items
request("https://www.googleapis.com/customsearch/v1?key=" + AuthDetails.youtube_api_key + "&cx=" + AuthDetails.google_custom_search + "&q=" + (args.replace(/\s/g, '+')) + "&searchType=image&alt=json&num=10&start=" + page, function(err, res, body) {
var data, error;
try {
data = JSON.parse(body);
} catch (error) {
console.log(error)
return;
}
if (!data) {
console.log(data);
bot.sendMessage(msg.channel, "Error:\n" + JSON.stringify(data));
return;
} else if (!data.items || data.items.length == 0) {
console.log(data);
bot.sendMessage(msg.channel, "No result for '" + args + "'");
return;
}
var randResult = data.items[Math.floor(Math.random() * data.items.length)];
bot.sendMessage(msg.channel, randResult.title + '\n' + randResult.link);
});
}
}
exports.ggif = {
usage: "<search query>",
description: "get random gif matching tags from google",
process: function(bot, msg, args) {
//gets us a random result in first 5 pages
var page = 1 + Math.floor(Math.random() * 5) * 10; //we request 10 items
request("https://www.googleapis.com/customsearch/v1?key=" + AuthDetails.youtube_api_key + "&cx=" + AuthDetails.google_custom_search + "&q=" + (args.replace(/\s/g, '+')) + "&searchType=image&alt=json&num=10&start=" + page + "&fileType=gif", function(err, res, body) {
var data, error;
try {
data = JSON.parse(body);
} catch (error) {
console.log(error)
return;
}
if (!data) {
console.log(data);
bot.sendMessage(msg.channel, "Error:\n" + JSON.stringify(data));
return;
} else if (!data.items || data.items.length == 0) {
console.log(data);
bot.sendMessage(msg.channel, "No result for '" + args + "'");
return;
}
var randResult = data.items[Math.floor(Math.random() * data.items.length)];
bot.sendMessage(msg.channel, randResult.title + '\n' + randResult.link);
});
}
}<|fim▁end|>
|
}
|
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for django_webapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""<|fim▁hole|>from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()<|fim▁end|>
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_webapp.settings")
|
<|file_name|>p1.py<|end_file_name|><|fim▁begin|>f = open('input.txt')
triangles = [map(int,l.split()) for l in f.readlines()]
possible = 0<|fim▁hole|>
print(possible)<|fim▁end|>
|
for t in triangles:
t.sort()
if t[0] + t[1] > t[2]:
possible += 1
|
<|file_name|>UnreachableCodeElimination.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.base.Preconditions;
import com.google.javascript.jscomp.ControlFlowGraph.Branch;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.NodeTraversal.ScopedCallback;
import com.google.javascript.jscomp.graph.GraphReachability;
import com.google.javascript.jscomp.graph.DiGraph.DiGraphEdge;
import com.google.javascript.jscomp.graph.DiGraph.DiGraphNode;
import com.google.javascript.rhino.Node;
import com.google.javascript.rhino.Token;
import java.util.Deque;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Removes dead code from a parse tree. The kinds of dead code that this pass
* removes are:
* - Any code following a return statement, such as the <code>alert</code>
* call in: <code>if (x) { return; alert('unreachable'); }</code>.
* - Statements that have no side effects, such as:
* <code>a.b.MyClass.prototype.propertyName;</code> or <code>true;</code>.
* That first kind of statement sometimes appears intentionally, so that
* prototype properties can be annotated using JSDoc without actually
* being initialized.
*
*/
class UnreachableCodeElimination extends AbstractPostOrderCallback
implements CompilerPass, ScopedCallback {
private static final Logger logger =
Logger.getLogger(UnreachableCodeElimination.class.getName());
private final AbstractCompiler compiler;
private final boolean removeNoOpStatements;
Deque<ControlFlowGraph<Node>> cfgStack =
new LinkedList<ControlFlowGraph<Node>>();
ControlFlowGraph<Node> curCfg = null;
UnreachableCodeElimination(AbstractCompiler compiler,
boolean removeNoOpStatements) {
this.compiler = compiler;
this.removeNoOpStatements = removeNoOpStatements;
}
@Override
public void enterScope(NodeTraversal t) {
Scope scope = t.getScope();
// Computes the control flow graph.
ControlFlowAnalysis cfa = new ControlFlowAnalysis(compiler, false);
cfa.process(null, scope.getRootNode());
cfgStack.push(curCfg);
curCfg = cfa.getCfg();
new GraphReachability<Node, ControlFlowGraph.Branch>(curCfg)
.compute(curCfg.getEntry().getValue());
}
@Override
public void exitScope(NodeTraversal t) {
curCfg = cfgStack.pop();
}
@Override
public void process(Node externs, Node root) {
NodeTraversal.traverse(compiler, root, this);
}
@Override
public void visit(NodeTraversal t, Node n, Node parent) {
if (parent == null) {
return;
}
if (n.getType() == Token.FUNCTION || n.getType() == Token.SCRIPT) {
return;
}
// Removes TRYs that had its CATCH removed and/or empty FINALLY.
// TODO(dcc): Move the parts of this that don't require a control flow
// graph to PeepholeRemoveDeadCode
if (n.getType() == Token.TRY) {
Node body = n.getFirstChild();
Node catchOrFinallyBlock = body.getNext();
Node finallyBlock = catchOrFinallyBlock.getNext();
if (!catchOrFinallyBlock.hasChildren() &&
(finallyBlock == null || !finallyBlock.hasChildren())) {
n.removeChild(body);
parent.replaceChild(n, body);
compiler.reportCodeChange();
n = body;
}
}
DiGraphNode<Node, Branch> gNode = curCfg.getDirectedGraphNode(n);
if (gNode == null) { // Not in CFG.
return;
}
if (gNode.getAnnotation() != GraphReachability.REACHABLE ||
(removeNoOpStatements && !NodeUtil.mayHaveSideEffects(n))) {
removeDeadExprStatementSafely(n);
return;
}
tryRemoveUnconditionalBranching(n);
}
/**
* Tries to remove n if an unconditional branch node (break, continue or
* return) if the target of n is the same as the the follow of n. That is, if
* we remove n, the control flow remains the same. Also if n targets to
* another unconditional branch, this function will recursively try to remove
* the target branch as well. The reason why we want to cascade this removal
* is because we only run this pass once. If we have code such as
*
* break -> break -> break
*
* where all 3 break's are useless. The order of removal matters. When we
* first look at the first break, we see that it branches to the 2nd break.
* However, if we remove the last break, the 2nd break becomes useless and
* finally the first break becomes useless as well.<|fim▁hole|> *
* @return The target of this jump. If the target is also useless jump,
* the target of that useless jump recursively.
*/
@SuppressWarnings("fallthrough")
private Node tryRemoveUnconditionalBranching(Node n) {
/*
* For each of the unconditional branching control flow node, check to see
* if the ControlFlowAnalysis.computeFollowNode of that node is same as
* the branching target. If it is, the branch node is safe to be removed.
*
* This is not as clever as MinimizeExitPoints because it doesn't do any
* if-else conversion but it handles more complicated switch statements
* much nicer.
*/
// If n is null the target is the end of the function, nothing to do.
if (n == null) {
return n;
}
DiGraphNode<Node, Branch> gNode = curCfg.getDirectedGraphNode(n);
if (gNode == null) {
return n;
}
// If the parent is null, this mean whatever node it was there is now
// useless and it has been removed by other logics in this pass. That node
// while no longer exists in the AST, is still in the CFG because we
// never update the graph as nodes are removed.
if (n.getParent() == null) {
List<DiGraphEdge<Node,Branch>> outEdges = gNode.getOutEdges();
if (outEdges.size() == 1) {
return tryRemoveUnconditionalBranching(
outEdges.get(0).getDestination().getValue());
}
}
switch (n.getType()) {
case Token.BLOCK:
if (n.hasChildren()) {
Node first = n.getFirstChild();
return tryRemoveUnconditionalBranching(first);
} else {
return tryRemoveUnconditionalBranching(
ControlFlowAnalysis.computeFollowNode(n));
}
case Token.RETURN:
if (n.hasChildren()) {
break;
}
case Token.BREAK:
case Token.CONTINUE:
// We are looking for a control flow changing statement that always
// branches to the same node. If removing it the control flow still
// branches to that same node. It is safe to remove it.
List<DiGraphEdge<Node,Branch>> outEdges = gNode.getOutEdges();
if (outEdges.size() == 1 &&
// If there is a next node, there is no chance this jump is useless.
(n.getNext() == null || n.getNext().getType() == Token.FUNCTION)) {
Preconditions.checkState(outEdges.get(0).getValue() == Branch.UNCOND);
Node fallThrough = tryRemoveUnconditionalBranching(
ControlFlowAnalysis.computeFollowNode(n));
Node nextCfgNode = outEdges.get(0).getDestination().getValue();
if (nextCfgNode == fallThrough) {
removeDeadExprStatementSafely(n);
return fallThrough;
}
}
}
return n;
}
private void removeDeadExprStatementSafely(Node n) {
if (n.getType() == Token.EMPTY ||
(n.getType() == Token.BLOCK && !n.hasChildren())) {
// Not always trivial to remove, let FoldContants work its magic later.
return;
}
// Removing an unreachable DO node is messy because it means we still have
// to execute one iteration. If the DO's body has breaks in the middle, it
// can get even more trickier and code size might actually increase.
switch (n.getType()) {
case Token.DO:
case Token.TRY:
case Token.CATCH:
case Token.FINALLY:
return;
}
NodeUtil.redeclareVarsInsideBranch(n);
compiler.reportCodeChange();
if (logger.isLoggable(Level.FINE)) {
logger.fine("Removing " + n.toString());
}
NodeUtil.removeChild(n.getParent(), n);
}
}<|fim▁end|>
| |
<|file_name|>serializer.rs<|end_file_name|><|fim▁begin|>use serialize::json;
use object_builder;
/// Provides functionality to create custom JSON presenters for your structs.
///
/// ## Example
///
/// ```
/// use jsonway::{self, Serializer};
///
/// struct Jedi {
/// name: String
/// }
///
/// struct JediSerializer<'a> {
/// jedi: &'a Jedi
/// }
///
/// impl<'a> jsonway::Serializer for JediSerializer<'a> {
/// fn root(&self) -> Option<&str> { Some("jedi") }
/// fn build(&self, json: &mut jsonway::ObjectBuilder) {
/// json.set("name", self.jedi.name.to_string());
/// }
/// }
///
/// let jedi = Jedi { name: "Saes Rrogon".to_string() };
/// let json = JediSerializer{jedi: &jedi}.serialize(true);<|fim▁hole|>/// "jedi",
/// "name",
/// ]).unwrap().as_string().unwrap(),
/// "Saes Rrogon"
/// );
/// ```
pub trait Serializer {
fn build(&self, &mut object_builder::ObjectBuilder);
#[inline]
fn root(&self) -> Option<&str> {
None
}
fn serialize(&mut self, include_root: bool) -> json::Json {
let mut bldr = object_builder::ObjectBuilder::new();
let root = self.root();
if include_root && root.is_some() {
bldr.root(root.unwrap())
}
self.build(&mut bldr);
bldr.unwrap()
}
}
/// Provides functionality to create custom JSON presenters for your structs.
///
/// ## Example
///
/// ```
/// use jsonway::{self, ObjectSerializer};
///
/// struct Jedi {
/// name: String
/// }
///
/// struct JediSerializer;
///
/// impl jsonway::ObjectSerializer<Jedi> for JediSerializer {
/// fn root(&self) -> Option<&str> { Some("jedi") }
/// fn build(&self, jedi: &Jedi, json: &mut jsonway::ObjectBuilder) {
/// json.set("name", jedi.name.to_string());
/// }
/// }
///
/// let jedi = Jedi { name: "Saes Rrogon".to_string() };
/// let json = JediSerializer.serialize(&jedi, true);
///
/// assert_eq!(
/// json.find_path(&[
/// "jedi",
/// "name",
/// ]).unwrap().as_string().unwrap(),
/// "Saes Rrogon"
/// );
/// ```
pub trait ObjectSerializer<T> {
fn build(&self, &T, &mut object_builder::ObjectBuilder);
#[inline]
fn root(&self) -> Option<&str> {
None
}
fn serialize(&mut self, obj: &T, include_root: bool) -> json::Json {
let mut bldr = object_builder::ObjectBuilder::new();
let root = self.root();
if include_root && root.is_some() {
bldr.root(root.unwrap())
}
self.build(obj, &mut bldr);
bldr.unwrap()
}
}
/// Provides functionality to create custom JSON presenters for your structs.
///
/// ## Example
///
/// ```rust
/// use jsonway::{self, ObjectScopeSerializer};
///
/// struct User {
/// id: u64,
/// is_admin: bool
/// }
///
/// struct Jedi {
/// name: String,
/// secret: String
/// }
///
/// struct JediSerializer;
///
/// impl jsonway::ObjectScopeSerializer<Jedi, User> for JediSerializer {
/// fn root(&self) -> Option<&str> { Some("jedi") }
/// fn build(&self, jedi: &Jedi, current_user: &User, json: &mut jsonway::ObjectBuilder) {
/// json.set("name", jedi.name.to_string());
///
/// if current_user.is_admin {
/// json.set("secret", jedi.secret.to_string());
/// }
/// }
/// }
///
/// let jedi = Jedi {
/// name: "Palpatine".to_string(),
/// secret: "Dark side".to_string()
/// };
///
/// let current_user = User { id: 1, is_admin: true };
/// let json = JediSerializer.serialize(&jedi, ¤t_user, true);
///
/// assert_eq!(
/// json.find_path(&[
/// "jedi",
/// "name",
/// ]).unwrap().as_string().unwrap(),
/// "Palpatine"
/// );
///
/// assert_eq!(
/// json.find_path(&[
/// "jedi",
/// "secret",
/// ]).unwrap().as_string().unwrap(),
/// "Dark side"
/// );
///
/// ```
pub trait ObjectScopeSerializer<T, S> {
fn build(&self, &T, &S, &mut object_builder::ObjectBuilder);
#[inline]
fn root(&self) -> Option<&str> {
None
}
fn serialize(&mut self, obj: &T, scope: &S, include_root: bool) -> json::Json {
let mut bldr = object_builder::ObjectBuilder::new();
let root = self.root();
if include_root && root.is_some() {
bldr.root(root.unwrap())
}
self.build(obj, scope, &mut bldr);
bldr.unwrap()
}
}<|fim▁end|>
|
///
/// assert_eq!(
/// json.find_path(&[
|
<|file_name|>number.tsx<|end_file_name|><|fim▁begin|>/**
* @author Adam Meadows <[email protected]>
* @copyright 2015 Ciena Corporation. All rights reserved.
*/
import * as React from 'react';
const Demo = require('cy-gh-pages/src/components/demo');
import DemoComponent from './code/number';
const demoSource = require('!!raw!./code/number');
export default class Number extends React.Component<{}, {}> {
public displayName: string = 'NumberInputDemo';
<|fim▁hole|> public render(): React.ReactElement<any> {
return (
<Demo
demoComponent={DemoComponent}
demoSource={demoSource}
fluid={false}
horizontal={true}
/>
);
}
}<|fim▁end|>
|
/* React method */
|
<|file_name|>GetMetricStatisticsRequestUnmarshaller.java<|end_file_name|><|fim▁begin|>package com.transcend.monitor.transform;
import java.util.Date;
import java.util.Map;
import java.util.TimeZone;
import org.slf4j.Logger;
import com.msi.tough.core.Appctx;
import com.msi.tough.core.DateHelper;
import com.msi.tough.monitor.common.MonitorConstants;
import com.msi.tough.query.ErrorResponse;
import com.msi.tough.query.QueryFaults;
import com.transcend.monitor.message.GetMetricStatisticsMessage.GetMetricStatisticsRequest;
import com.transcend.monitor.message.MetricAlarmMessage.Statistic;
import com.transcend.monitor.message.MetricAlarmMessage.Unit;
public class GetMetricStatisticsRequestUnmarshaller extends BaseMonitorUnmarshaller<GetMetricStatisticsRequest>
{
public static final int MAX_DATAPOINTS = 100;
private final static Logger logger = Appctx
.getLogger(GetMetricStatisticsRequestUnmarshaller.class.getName());
private static GetMetricStatisticsRequestUnmarshaller instance;
public static GetMetricStatisticsRequestUnmarshaller getInstance()
{
if (instance == null)
{
instance = new GetMetricStatisticsRequestUnmarshaller();
}
return instance;
}
@Override
public GetMetricStatisticsRequest unmarshall(Map<String, String[]> in)
{
final GetMetricStatisticsRequest.Builder req =
GetMetricStatisticsRequest.newBuilder();
req.setPeriod(MarshallingUtils.unmarshallInteger(in,
MonitorConstants.NODE_PERIOD,
logger));
req.setStartTime(MarshallingUtils.unmarshallString(in,
MonitorConstants.NODE_STARTTIME,<|fim▁hole|> req.setMetricName(MarshallingUtils.unmarshallString(in,
MonitorConstants.NODE_METRICNAME,
logger));
String unit = MarshallingUtils.unmarshallString(in,
MonitorConstants.NODE_UNIT, null,
logger);
req.setUnit(unit == null? Unit.None : Unit.valueOf(unit));
int i = 0;
while (true)
{
i++;
final String n[] = in.get("Statistics.member." + i);
if (n == null)
{
break;
}
try {
req.addStatistic(Statistic.valueOf(n[0]));
} catch (Exception e) {
throw QueryFaults.InvalidParameterValue();
}
}
if (req.getStatisticCount() == 0) {
throw ErrorResponse.missingParameter();
}
req.addAllDimension(unmarshallDimensions(in));
Date start = DateHelper.getCalendarFromISO8601String(req.getStartTime(),
TimeZone.getTimeZone("GMT")).getTime();
Date end = DateHelper.getCalendarFromISO8601String(req.getEndTime(),
TimeZone.getTimeZone("GMT")).getTime();
if (!start.before(end)) {
throw QueryFaults.InvalidParameterValue();
}
if (req.getPeriod() < 60 || req.getPeriod() % 60 != 0) {
throw QueryFaults.InvalidParameterValue();
}
long timeDelta = end.getTime() -
start.getTime();
long numPoints = timeDelta / req.getPeriod() / 1000 / 60;
if (numPoints > MAX_DATAPOINTS) {
throw QueryFaults.InvalidParameterCombination("You have requested" +
" up to "+numPoints+" datapoints, which exceeds the " +
"limit of "+MAX_DATAPOINTS+".");
}
return super.unmarshall(req.buildPartial(), in);
}
}<|fim▁end|>
|
logger));
req.setEndTime(MarshallingUtils.unmarshallString(in,
MonitorConstants.NODE_ENDTIME,
logger));
|
<|file_name|>testcase_units.rs<|end_file_name|><|fim▁begin|>// https://rustbyexample.com/generics/phantom/testcase_units.html
// http://rust-lang-ja.org/rust-by-example/generics/phantom/testcase_units.html
use std::ops::Add;
use std::marker::PhantomData;
/// Create void enumerations to define unit types.
#[derive(Debug, Clone, Copy)]
enum Inch {}
#[derive(Debug, Clone, Copy)]
enum Mm {}
/// `Length` is a type with phantom type parameter `Unit`,
/// and is not generic over the length type (that is `f64`).
///
/// `f64` already implements the `Clone` and `Copy` traits.
#[derive(Debug, Clone, Copy)]
struct Length<Unit>(f64, PhantomData<Unit>);
/// The `Add` trait defines the behaviour of the `+` operator.
impl<Unit> Add for Length<Unit> {
type Output = Length<Unit>;
// add() returns a new `Length` struct containig the sum.<|fim▁hole|> Length(self.0 + rhs.0, PhantomData)
}
}
fn main() {
// Specifies `one_foot` to have phantom type parameter `Inch`.
let one_foot: Length<Inch> = Length(12.0, PhantomData);
// `one_meter` has phantom type parameter `Mm`.
let one_meter: Length<Mm> = Length(1000.0, PhantomData);
// `+` calls the `add()` method we implemented for `Length<Unit>`.
//
// Since `Length` implements `Copy`, `add()` does not consume
// `one_foot` add `one_meter` but copies them into `self` and `rhs`.
let two_feet = one_foot + one_foot;
let two_meters = one_meter + one_meter;
// Addition works.
println!("one foot + one_foot = {:?} in", two_feet.0);
println!("one meter + one_meter = {:?} mm", two_meters.0);
// Nonsensical operations fail as they should:
// Compile-time Error: type mismatch.
//let one_feter = one_foot + one_meter;
// error[E0308]: mismatched types
}<|fim▁end|>
|
fn add(self, rhs: Length<Unit>) -> Length<Unit> {
// `+` calls the `Add` implementation for `f64`.
|
<|file_name|>sequence.js<|end_file_name|><|fim▁begin|><|fim▁hole|>// Copyright 2007 The Closure Library Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview DOM pattern to match a sequence of other patterns.
*/
goog.provide('goog.dom.pattern.Sequence');
goog.require('goog.dom.NodeType');
goog.require('goog.dom.pattern');
goog.require('goog.dom.pattern.AbstractPattern');
goog.require('goog.dom.pattern.MatchType');
/**
* Pattern object that matches a sequence of other patterns.
*
* @param {Array<goog.dom.pattern.AbstractPattern>} patterns Ordered array of
* patterns to match.
* @param {boolean=} opt_ignoreWhitespace Optional flag to ignore text nodes
* consisting entirely of whitespace. The default is to not ignore them.
* @constructor
* @extends {goog.dom.pattern.AbstractPattern}
* @final
*/
goog.dom.pattern.Sequence = function(patterns, opt_ignoreWhitespace) {
/**
* Ordered array of patterns to match.
*
* @type {Array<goog.dom.pattern.AbstractPattern>}
*/
this.patterns = patterns;
/**
* Whether or not to ignore whitespace only Text nodes.
*
* @private {boolean}
*/
this.ignoreWhitespace_ = !!opt_ignoreWhitespace;
/**
* Position in the patterns array we have reached by successful matches.
*
* @private {number}
*/
this.currentPosition_ = 0;
};
goog.inherits(goog.dom.pattern.Sequence, goog.dom.pattern.AbstractPattern);
/**
* Regular expression for breaking text nodes.
* @private {!RegExp}
*/
goog.dom.pattern.Sequence.BREAKING_TEXTNODE_RE_ = /^\s*$/;
/**
* Test whether the given token starts, continues, or finishes the sequence
* of patterns given in the constructor.
*
* @param {Node} token Token to match against.
* @param {goog.dom.TagWalkType} type The type of token.
* @return {goog.dom.pattern.MatchType} <code>MATCH</code> if the pattern
* matches, <code>MATCHING</code> if the pattern starts a match, and
* <code>NO_MATCH</code> if the pattern does not match.
* @override
*/
goog.dom.pattern.Sequence.prototype.matchToken = function(token, type) {
// If the option is set, ignore any whitespace only text nodes
if (this.ignoreWhitespace_ && token.nodeType == goog.dom.NodeType.TEXT &&
goog.dom.pattern.Sequence.BREAKING_TEXTNODE_RE_.test(token.nodeValue)) {
return goog.dom.pattern.MatchType.MATCHING;
}
switch (this.patterns[this.currentPosition_].matchToken(token, type)) {
case goog.dom.pattern.MatchType.MATCH:
// Record the first token we match.
if (this.currentPosition_ == 0) {
this.matchedNode = token;
}
// Move forward one position.
this.currentPosition_++;
// Check if this is the last position.
if (this.currentPosition_ == this.patterns.length) {
this.reset();
return goog.dom.pattern.MatchType.MATCH;
} else {
return goog.dom.pattern.MatchType.MATCHING;
}
case goog.dom.pattern.MatchType.MATCHING:
// This can happen when our child pattern is a sequence or a repetition.
return goog.dom.pattern.MatchType.MATCHING;
case goog.dom.pattern.MatchType.BACKTRACK_MATCH:
// This means a repetitive match succeeded 1 token ago.
// TODO(robbyw): Backtrack further if necessary.
this.currentPosition_++;
if (this.currentPosition_ == this.patterns.length) {
this.reset();
return goog.dom.pattern.MatchType.BACKTRACK_MATCH;
} else {
// Retry the same token on the next pattern.
return this.matchToken(token, type);
}
default:
this.reset();
return goog.dom.pattern.MatchType.NO_MATCH;
}
};
/**
* Reset any internal state this pattern keeps.
* @override
*/
goog.dom.pattern.Sequence.prototype.reset = function() {
if (this.patterns[this.currentPosition_]) {
this.patterns[this.currentPosition_].reset();
}
this.currentPosition_ = 0;
};<|fim▁end|>
| |
<|file_name|>5ca019edf61f_cascade_on_delete.py<|end_file_name|><|fim▁begin|>"""Cascade on Delete
Revision ID: 5ca019edf61f
Revises: 469f428604aa
Create Date: 2019-06-23 05:49:26.061932
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "5ca019edf61f"
down_revision = "469f428604aa"
branch_labels = None
depends_on = None
def upgrade():
with op.batch_alter_table("penalty") as batch_op:
batch_op.drop_constraint("penalty_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="CASCADE"
)
op.create_foreign_key(
"penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="CASCADE"
)
with op.batch_alter_table("snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_ibfk_1",
"snapshot_team",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_1",<|fim▁hole|> "snapshot_to_snapshot_team",
"snapshot",
["snapshot_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_2",
"snapshot_to_snapshot_team",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_team_to_flag") as batch_op:
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_1",
"snapshot_team_to_flag",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_2",
"snapshot_team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("snapshot_team_to_game_level") as batch_op:
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_1", type_="foreignkey"
)
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_2", type_="foreignkey"
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_1",
"snapshot_team_to_game_level",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_2",
"snapshot_team_to_game_level",
"game_level",
["gam_level_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_box") as batch_op:
batch_op.drop_constraint("team_to_box_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_box_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_box_ibfk_1",
"team_to_box",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_box_ibfk_2",
"team_to_box",
"box",
["box_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_item") as batch_op:
batch_op.drop_constraint("team_to_item_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_item_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_item_ibfk_1",
"team_to_item",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_item_ibfk_2",
"team_to_item",
"market_item",
["item_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_source_code") as batch_op:
batch_op.drop_constraint("team_to_source_code_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_source_code_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_source_code_ibfk_1",
"team_to_source_code",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_source_code_ibfk_2",
"team_to_source_code",
"source_code",
["source_code_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_hint") as batch_op:
batch_op.drop_constraint("team_to_hint_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_hint_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_hint_ibfk_1",
"team_to_hint",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_hint_ibfk_2",
"team_to_hint",
"hint",
["hint_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_flag") as batch_op:
batch_op.drop_constraint("team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_flag_ibfk_1",
"team_to_flag",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_flag_ibfk_2",
"team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="CASCADE",
)
with op.batch_alter_table("team_to_game_level") as batch_op:
batch_op.drop_constraint("team_to_game_level_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_game_level_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_game_level_ibfk_1",
"team_to_game_level",
"team",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"team_to_game_level_ibfk_2",
"team_to_game_level",
"game_level",
["game_level_id"],
["id"],
ondelete="CASCADE",
)
def downgrade():
with op.batch_alter_table("penalty") as batch_op:
batch_op.drop_constraint("penalty_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("penalty_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"penalty_ibfk_1", "penalty", "team", ["team_id"], ["id"], ondelete="RESTRICT"
)
op.create_foreign_key(
"penalty_ibfk_2", "penalty", "flag", ["flag_id"], ["id"], ondelete="RESTRICT"
)
with op.batch_alter_table("snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_team_ibfk_1", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_ibfk_1",
"snapshot_team",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_to_snapshot_team") as batch_op:
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_to_snapshot_team_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_1",
"snapshot_to_snapshot_team",
"snapshot",
["snapshot_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"snapshot_to_snapshot_team_ibfk_2",
"snapshot_to_snapshot_team",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_team_to_flag") as batch_op:
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("snapshot_team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_1",
"snapshot_team_to_flag",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"snapshot_team_to_flag_ibfk_2",
"snapshot_team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("snapshot_team_to_game_level") as batch_op:
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_1", type_="foreignkey"
)
batch_op.drop_constraint(
"snapshot_team_to_game_level_ibfk_2", type_="foreignkey"
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_1",
"snapshot_team_to_game_level",
"snapshot_team",
["snapshot_team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"snapshot_team_to_game_level_ibfk_2",
"snapshot_team_to_game_level",
"game_level",
["gam_level_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_box") as batch_op:
batch_op.drop_constraint("team_to_box_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_box_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_box_ibfk_1",
"team_to_box",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_box_ibfk_2",
"team_to_box",
"box",
["box_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_item") as batch_op:
batch_op.drop_constraint("team_to_item_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_item_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_item_ibfk_1",
"team_to_item",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_item_ibfk_2",
"team_to_item",
"market_item",
["item_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_source_code") as batch_op:
batch_op.drop_constraint("team_to_source_code_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_source_code_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_source_code_ibfk_1",
"team_to_source_code",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_source_code_ibfk_2",
"team_to_source_code",
"source_code",
["source_code_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_hint") as batch_op:
batch_op.drop_constraint("team_to_hint_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_hint_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_hint_ibfk_1",
"team_to_hint",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_hint_ibfk_2",
"team_to_hint",
"hint",
["hint_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_flag") as batch_op:
batch_op.drop_constraint("team_to_flag_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_flag_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_flag_ibfk_1",
"team_to_flag",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_flag_ibfk_2",
"team_to_flag",
"flag",
["flag_id"],
["id"],
ondelete="RESTRICT",
)
with op.batch_alter_table("team_to_game_level") as batch_op:
batch_op.drop_constraint("team_to_game_level_ibfk_1", type_="foreignkey")
batch_op.drop_constraint("team_to_game_level_ibfk_2", type_="foreignkey")
op.create_foreign_key(
"team_to_game_level_ibfk_1",
"team_to_game_level",
"team",
["team_id"],
["id"],
ondelete="RESTRICT",
)
op.create_foreign_key(
"team_to_game_level_ibfk_2",
"team_to_game_level",
"game_level",
["game_level_id"],
["id"],
ondelete="RESTRICT",
)<|fim▁end|>
| |
<|file_name|>test_alignments.py<|end_file_name|><|fim▁begin|>import six
from unittest import TestCase
from dark.reads import Read, Reads
from dark.score import HigherIsBetterScore
from dark.hsp import HSP, LSP
from dark.alignments import (
Alignment, bestAlignment, ReadAlignments, ReadsAlignmentsParams,
ReadsAlignments)
class TestAlignment(TestCase):
"""
Tests for the dark.alignment.Alignment class
"""
def testExpectedAttrs(self):
"""
An alignment must have the expected attributes.
"""
alignment = Alignment(45, 'title')
self.assertEqual('title', alignment.subjectTitle)
self.assertEqual(45, alignment.subjectLength)
def testNoHspsWhenCreated(self):
"""
An alignment must have no HSPs when it is created.
"""
alignment = Alignment(45, 'title')
self.assertEqual(0, len(alignment.hsps))
def testAddHsp(self):
"""
It must be possible to add an HSP to an alignment.
"""
alignment = Alignment(45, 'title')
alignment.addHsp(HSP(3))
self.assertEqual(HSP(3), alignment.hsps[0])
class TestReadAlignments(TestCase):
"""
Tests for the dark.alignment.ReadAlignments class
"""
def testRead(self):
"""
An read alignments must store its read.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(read, readAlignments.read)
def testNoAlignments(self):
"""
An read alignments must be able to have no alignments.
"""
read = Read('id', 'ACGT')
readAlignments = ReadAlignments(read)
self.assertEqual(0, len(readAlignments))
def testAlignments(self):
"""
An read alignments must store its alignments.
"""
read = Read('id', 'ACGT')
alignment1 = Alignment(45, 'title1')
alignment2 = Alignment(55, 'title2')
readAlignments = ReadAlignments(read, [alignment1, alignment2])
self.assertEqual([alignment1, alignment2], readAlignments)
class TestBestAlignmentHSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when HSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(HSP(10))
alignment.addHsp(HSP(9))
alignments = [alignment]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
best = bestAlignment(hit)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the highest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(HSP(10))
alignment1.addHsp(HSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(HSP(30))
alignment2.addHsp(HSP(29))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(HSP(20))
alignment3.addHsp(HSP(19))
<|fim▁hole|> best = bestAlignment(hit)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestBestAlignmentLSP(TestCase):
"""
Test the L{dark.hits.bestAlignment} function when LSPs are used.
"""
def testOneAlignment(self):
"""
When one alignment is present that alignment must be returned by
bestAlignment.
"""
alignment = Alignment(44, 'Seq 1')
alignment.addHsp(LSP(10))
alignment.addHsp(LSP(9))
alignments = [alignment]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 1', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
def testThreeAlignments(self):
"""
When three alignments are present, the one with the lowest first HSP
must be returned by bestAlignment.
"""
alignment1 = Alignment(33, 'Seq 1')
alignment1.addHsp(LSP(10))
alignment1.addHsp(LSP(9))
alignment2 = Alignment(44, 'Seq 2')
alignment2.addHsp(LSP(3))
alignment2.addHsp(LSP(2))
alignment3 = Alignment(55, 'Seq 3')
alignment3.addHsp(LSP(20))
alignment3.addHsp(LSP(19))
alignments = [alignment1, alignment2, alignment3]
readAlignments = ReadAlignments(Read('id0', 'aaa'), alignments)
best = bestAlignment(readAlignments)
self.assertEqual('Seq 2', best.subjectTitle)
self.assertEqual(44, best.subjectLength)
class TestReadsAlignmentsParams(TestCase):
"""
Test the L{dark.alignments.ReadsAlignmentsParams} class.
"""
def testExpectedAttrs(self):
"""
A ReadsAlignmentsParams instance must have the expected attributes.
"""
applicationParams = {}
params = ReadsAlignmentsParams('application name', applicationParams,
False, 'Bit score')
self.assertEqual('application name', params.application)
self.assertIs(applicationParams, params.applicationParams)
self.assertFalse(params.subjectIsNucleotides)
self.assertEqual('Bit score', params.scoreTitle)
class TestReadsAlignments(TestCase):
"""
Test the L{dark.alignments.ReadsAlignments} class.
"""
# NOTE: The ReadsAlignments class is a base class for concrete
# implementations, such as BlastReadsAlignments. So it can only be
# tested minimally by itself. For full tests see the
# TestBlastReadsAlignments and TestBlastReadsAlignmentsFiltering
# classes in test/blast/blast_alignments.py
def testExpectedAttrs(self):
"""
A ReadsAlignments instance must have the expected attributes.
"""
reads = Reads()
params = {
'application': 'app name'
}
readsAlignments = ReadsAlignments(reads, params)
self.assertIs(readsAlignments.reads, reads)
self.assertEqual('app name', readsAlignments.params['application'])
self.assertIs(params, readsAlignments.params)
self.assertIs(HigherIsBetterScore, readsAlignments.scoreClass)
def testNotIterable(self):
"""
Iterating an empty ReadsAlignments must result in the empty list.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
self.assertEqual([], list(readsAlignments))
def testGetSubjectSequence(self):
"""
A ReadsAlignments instance will not implement getSubjectSequence.
Subclasses are expected to implement it.
"""
reads = Reads()
readsAlignments = ReadsAlignments(reads, 'applicationName', None)
error = 'getSubjectSequence must be implemented by a subclass'
six.assertRaisesRegex(self, NotImplementedError, error,
readsAlignments.getSubjectSequence, 'title')<|fim▁end|>
|
alignments = [alignment1, alignment2, alignment3]
hit = ReadAlignments(Read('id1', 'aaa'), alignments)
|
<|file_name|>conference.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
#Added
from models import Session
from models import SessionForm
from models import SessionForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
SESSION_DEFAULTS = {
"description": '',
"highlights": ["Default"],
"duration": 0.0,
"users": []
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
## create Resource container for post request with Sessions
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1)
)
## and for a GET Session request
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1)
)
SESSION_GETBYNAME = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1)
)
SESSION_GETBYTYPE = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionType=messages.StringField(1),
websafeConferenceKey=messages.StringField(2)
)
USERWISHLIST = endpoints.ResourceContainer(
message_types.VoidMessage,
sessionKey = messages.StringField(1)
)
GET_FEATURED_SPEAKER = endpoints.ResourceContainer(
speaker = messages.StringField(1)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# Task 1.)
# Sessions
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copySessionToForm(self, session):
"""Copy relevant fields from Conference to ConferenceForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
if field.name == 'date':
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
sf.check_initialized()
return sf
def _createSessionObject(self, request):
"""Create or update Session object, returning SessionForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# get the conf that the session should be added to
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.UnauthorizedException("There must be a valid conference to add the sessions to")
if not request.speaker:
raise endpoints.BadRequestException("Session 'speaker' field required")
if not request.speaker:
raise endpoints.BadRequestException("Session 'type' field required")
<|fim▁hole|> data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data["websafeConferenceKey"]
## Check to see if valid start time. Must be between 1-12 am and 1-12 pm
## The format should be 00:xx ex: 09:am
if data['startTime']:
hour = int(data['startTime'][0:2])
ampm = data['startTime'][3:]
print ampm
if not (hour <= 12 and hour >= 1):
raise endpoints.BadRequestException("Start time must be between 1 and 12")
if not (ampm == 'am' or ampm == 'AM' or ampm == 'pm' or ampm == 'PM'):
raise endpoints.BadRequestException("Start time must be either am or pm")
else:
raise endpoints.BadRequestException("We need to know the start time of the session")
# add default values for those missing (both data model & outbound Message)
# convert dates from strings to Date objects; set month based on start_date
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
else:
raise endpoints.BadRequestException("Session start date required")
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
setattr(request, df, SESSION_DEFAULTS[df])
# if there is a refrence to the Conference that the session is for then
# make the session a child of that Conference.
# creating the session key
s_id = Session.allocate_ids(size=1, parent=conf.key)[0]
s_key = ndb.Key(Session, s_id, parent=conf.key)
data["key"] = s_key
Session(**data).put()
## Additions for Task 4
## first get current featured speaker
curr_speaker = data["speaker"]
taskqueue.add(params={'speaker':curr_speaker, 'websafeConferenceKey': conf.key.urlsafe()},
url='/tasks/setFeaturedSpeaker')
return self._copySessionToForm(request)
# Task 4 Endpoint for getting the current featured speaker
@endpoints.method(message_types.VoidMessage,StringMessage,path='featuredspeaker',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self,request):
"""Return the featured speaker for the session """
featured_speaker = memcache.get("featured_speaker")
# if there is not speaker then tell the 'user' there is no speaker
if featured_speaker == None:
featured_speaker = "There is no current featured speaker"
# using the string message class from models.py
string_message = StringMessage()
setattr(string_message,"data",featured_speaker)
return string_message
# Task 1 Enpoint for creating a session
@endpoints.method(SESSION_POST_REQUEST,SessionForm,path='session/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self,request):
"""Create new session """
return self._createSessionObject(request)
# Task 1 Endpoint for fetching a list of all current sessions of a conference
@endpoints.method(SESSION_GET_REQUEST,SessionForms,path='session/{websafeConferenceKey}',
http_method='GET', name='getSessions')
def getSessions(self,request):
"""Create new session """
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(ancestor=conf.key)
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# Task 1 Endpoint for getting all sessions of a speaker
@endpoints.method(SESSION_GETBYNAME, SessionForms,
path='session/{speaker}',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Return requested session (by username)."""
# get Conference object from request; bail if not found
if not request.speaker:
raise endpoints.BadRequestException("You must pass the name of the speaker")
# the speaker can have more than one session
sessions = Session.query(Session.speaker == request.speaker)
# return SessionForm
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# Task 1 Enpoint for getting all sessions of a given type
@endpoints.method(SESSION_GETBYTYPE, SessionForms,
path='session',
http_method='GET', name='getSessionByType')
def getSessionByType(self, request):
"""Return requested session (by type)."""
# get Conference object from request; bail if not found
if not request.sessionType:
raise endpoints.BadRequestException("You must pass the type of the session")
if not request.websafeConferenceKey:
raise endpoints.BadRequestException("You must pass a conference key")
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(Session.sessionType == request.sessionType,
ancestor=conf.key)
# return SessionForm
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# Task 2.)
## --- User wish list MEthods
# add a wishlist to a given session for the current user
@endpoints.method(USERWISHLIST,SessionForm,path='wishlist/{sessionKey}',
http_method='POST', name='addToWishList')
def addToWishList(self,request):
if not request.sessionKey:
raise BadRequestException("You must pass a session key")
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# append the current user to the wishlist property
session_key = ndb.Key(urlsafe=request.sessionKey)
session = session_key.get()
## Only add the user if he does not currently have the session in his wishlist
if user_id in session.users:
raise BadRequestException("You are already in this session")
else:
session.users.append(user_id)
session.put()
return self._copySessionToForm(session)
# Task 2 endpoint delete current user from given wish list
@endpoints.method(USERWISHLIST,SessionForm,path='deleteWishlist/{sessionKey}',
http_method='POST', name='deleteFromWishList')
def deleteFromWishList(self,request):
if not request.sessionKey:
raise BadRequestException("You must pass a session key")
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
session_key = ndb.Key(urlsafe=request.sessionKey)
session = session_key.get()
# refrencing the session users property call the python remove function
# to remove user from the current users list
# only remove from the users list if the user is in it otherwise return a error
if user_id in session.users:
session.users.remove(user_id)
else:
raise BadRequestException("You do not have this session in your wishlist")
session.put()
return self._copySessionToForm(session)
# Task 2 endpoint that returns the full wishlist of the current user
@endpoints.method(message_types.VoidMessage,SessionForms,path='wishlist',
http_method='GET', name='getCurrentWishList')
def getCurrentWishList(self,request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
sessions = Session.query(Session.users.IN([user_id]))
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# FOR Task 3
# query wishlist for a given conference
@endpoints.method(SESSION_GET_REQUEST,SessionForms,path='wishlistByConference/{websafeConferenceKey}',
http_method='GET',name='getWishListByConference')
def getWishListByConference(self,request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(ndb.AND(Session.users.IN([user_id]),ancestor=conf.key))
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
# query Sessions that start at a specific time
@endpoints.method(SESSION_GET_REQUEST,SessionForms,path="sessionsByStartTime",
http_method='GET',name='getSessionsByTime')
def getSessionsByTime(self,request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.startTime:
raise BadRequestException("You must pass a startime in the format 12:am")
# Since we are not quering on a specif
sessions = Session.query(startime=request.startTime).get()
return SessionForms(
items = [self._copySessionToForm(session) for session in sessions]
)
## ------- Conference MEthods
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city=="London")
q = q.filter(Conference.topics=="Medical Innovations")
q = q.filter(Conference.month==6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
api = endpoints.api_server([ConferenceApi]) # register API<|fim▁end|>
|
# copy SessionForm/ProtoRPC Message into dict
|
<|file_name|>int.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Ramp Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This module holds `Int` and related types.
use rand::Rng;
use std;
use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
use std::error::Error;
use std::ops::{
Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
Mul, MulAssign, Neg, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
};
use std::ptr::Unique;
use std::str::FromStr;
use std::{fmt, hash, io};
use hamming;
use num_integer::Integer;
use num_traits::{Num, One, Zero};
use ll;
use ll::limb::{BaseInt, Limb};
use ll::limb_ptr::{Limbs, LimbsMut};
use alloc::raw_vec::RawVec;
use traits::DivRem;
///
/// An arbitrary-precision signed integer.
///
/// This type grows to the size it needs to in order to store the result of any operation.
///
/// ## Creation
///
/// An `Int` can be constructed in a number of ways:
///
/// - `Int::zero` and `Int::one` construct a zero- and one-valued `Int` respectively.
///
/// - `Int::from` will convert from any primitive integer type to an `Int` of the same value
///
/// ```
/// # use ramp::Int;
/// let four = Int::from(4);
/// ```
///
/// - `Int::from_str` (or `str::parse`) will attempt to convert from a string to an `Int`
///
/// ```
/// # use ramp::Int;
/// # use std::str::FromStr;
/// let i = Int::from_str("123456789").unwrap();
/// ```
///
/// ## Output
///
/// `Int` supports all the formatting traits, allowing it to be used just like a regular integer
/// when used in `format!` and similar macros. `Int` also supports conversion to primitive integer
/// types, truncating if the `Int` cannot fit into the target type. Conversion to primtive integers
/// is done with the `From` trait:
///
/// ```
/// # use ramp::Int;
/// let big_i = Int::from(123456789);
/// let i = i32::from(&big_i);
/// assert_eq!(123456789, i);
/// ```
///
/// ## Usage
///
/// `Int` has a number of operator overloads to make working with them as painless as possible.
///
/// The most basic usage is simply `a + b` or similar. Assuming `a` and `b` are of type `Int`, this
/// operation will consume both operands, reusing the storage from one of them. If you do not wish
/// your operands to be moved, one or both of them can be references: `&a + &b` works as well, but
/// requires an entire new `Int` to be allocated for the return value.
///
/// There are also a overloads for a small number of primitive integer types, namely `i32` and
/// `usize`. While automatic type widening isn't done in Rust in general, many operations are much
/// more efficient when working with a single integer. This means you can do `a + 1` knowing that it
/// will be performed as efficiently as possible. Comparison with these integer types is also
/// possible, allowing checks for small constant values to be done easily:
///
/// ```
/// # use ramp::Int;
/// let big_i = Int::from(123456789);
/// assert!(big_i == 123456789);
/// ```
///
/// ### Semantics
///
/// Addition, subtraction and multiplication follow the expected rules for integers. Division of two
/// integers, `N / D` is defined as producing two values: a quotient, `Q`, and a remainder, `R`,
/// such that the following equation holds: `N = Q*D + R`. The division operator itself returns `Q`
/// while the remainder/modulo operator returns `R`. The sign of `R` is the same as the sign of `Q`.
///
/// The "bit-shift" operations are defined as being multiplication and division by a power-of-two for
/// shift-left and shift-right respectively. The sign of the number is unaffected.
///
/// The remaining bitwise operands act as if the numbers are stored in two's complement format and as
/// if the two inputs have the same number of bits.
pub struct Int {
ptr: Unique<Limb>,
size: i32,
cap: u32,
}
impl Int {
/// Creates the `Int` that represents zero.
pub fn zero() -> Int {
<Int as Zero>::zero()
}
/// Creates the `Int` that represents one.
pub fn one() -> Int {
<Int as One>::one()
}
/// Creates an `Int` from a single [`Limb`]
///
/// [`Limb`]: ../ll/limb/struct.Limb.html
pub fn from_single_limb(limb: Limb) -> Int {
let mut i = Int::with_capacity(1);
unsafe {
*i.ptr.as_mut() = limb;
}
i.size = 1;
i
}
/// Passes a `RawVec` version of this `Int`, which can be manipulated to alter this `Int`'s
/// allocation.
fn with_raw_vec<F: FnOnce(&mut RawVec<Limb>)>(&mut self, f: F) {
unsafe {
let old_cap = self.cap as usize;
let mut vec = RawVec::from_raw_parts(self.ptr.as_mut(), old_cap);
// if `f` panics, let `vec` do the cleaning up, not self.
self.cap = 0;
f(&mut vec);
// update `self` for any changes that happened
// vec.ptr() can't be null, so we can safely unwrap
self.ptr = Unique::new(vec.ptr()).unwrap();
let new_cap = vec.capacity();
assert!(new_cap <= std::u32::MAX as usize);
self.cap = new_cap as u32;
// ownership has transferred back into `self`, so make
// sure that allocation isn't freed by `vec`.
std::mem::forget(vec);
if old_cap < new_cap {
// the allocation got larger, new Limbs should be
// zero.
let self_ptr = self.limbs_uninit();
std::ptr::write_bytes(
&mut *self_ptr.offset(old_cap as isize) as *mut _ as *mut u8,
0,
(new_cap - old_cap) * std::mem::size_of::<Limb>(),
);
}
}
}
/// Creates an `Int` with the given capacity.
fn with_capacity(cap: u32) -> Int {
let mut ret = Int::zero();
if cap != 0 {
ret.with_raw_vec(|v| v.reserve_exact(0, cap as usize))
}
ret
}
/// Returns the sign of this `Int` as either -1, 0 or 1 depending on whether it is negative,
/// zero, or positive, respectively.
#[inline(always)]
pub fn sign(&self) -> i32 {
if self.size == 0 {
0
} else if self.size < 0 {
-1
} else {
1
}
}
/// Consumes this `Int` and returns its absolute value.
#[inline]
pub fn abs(mut self) -> Int {
self.size = self.size.abs();
self
}
/// Returns the least-significant [`Limb`] of this `Int`.
///
/// [`Limb`]: ../ll/limb/struct.Limb.html
#[inline]
pub fn to_single_limb(&self) -> Limb {
if self.sign() == 0 {
return Limb(0);
} else {
return unsafe { *self.ptr.as_ref() };
}
}
/// Gets the absolute value of this `Int`'s size.
#[inline(always)]
fn abs_size(&self) -> i32 {
self.size.abs()
}
/// Compares the absolute value of this `Int` with the absolute value of another.
pub fn abs_cmp(&self, other: &Int) -> Ordering {
if self.abs_size() > other.abs_size() {
Ordering::Greater
} else if self.abs_size() < other.abs_size() {
Ordering::Less
} else {
unsafe { ll::cmp(self.limbs(), other.limbs(), self.abs_size()) }
}
}
/// Returns whether this `Int` has the same absolute value as another.
pub fn abs_eq(&self, other: &Int) -> bool {
self.abs_cmp(other) == Ordering::Equal
}
/// Hashes the value without including the sign.
///
/// This is useful for when the sign is handled elsewhere and making a copy just to change the
/// sign is wasteful.
pub fn abs_hash<H>(&self, state: &mut H)
where
H: hash::Hasher,
{
use std::hash::Hash;
let mut size = self.abs_size();
unsafe {
let mut ptr = self.limbs();
while size > 0 {
let l = *ptr;
l.hash(state);
ptr = ptr.offset(1);
size -= 1;
}
}
}
/// Shrinks the allocated data for this `Int`, attempting to remove excess capacity.
pub fn shrink_to_fit(&mut self) {
let mut size = self.abs_size() as usize;
if (self.cap as usize) == size {
return;
} // already as small as possible
if size == 0 {
size = 1;
} // Keep space for at least one limb around
self.with_raw_vec(|v| {
v.shrink_to_fit(size);
})
}
/// Creates a string containing the value of this `Int` in base `base`.
///
/// For bases greater than ten, if `upper` is true, upper-case letters are used; otherwise,
/// lower-case letters are used.
///
/// # Panics
///
/// Panics if `base` is less than two or greater than 36.
pub fn to_str_radix(&self, base: u8, upper: bool) -> String {
if self.size == 0 {
return "0".to_string();
}
if base < 2 || base > 36 {
panic!("Invalid base: {}", base);
}
let size = self.abs_size();
let mut num_digits =
unsafe { ll::base::num_base_digits(self.limbs(), size - 1, base as u32) };
if self.sign() == -1 {
num_digits += 1;
}
let mut buf: Vec<u8> = Vec::with_capacity(num_digits);
self.write_radix(&mut buf, base, upper).unwrap();
unsafe { String::from_utf8_unchecked(buf) }
}
/// Similar to `to_str_radix`, writing to something that implements `io::Write` instead.
pub fn write_radix<W: io::Write>(&self, w: &mut W, base: u8, upper: bool) -> io::Result<()> {
debug_assert!(self.well_formed());
if self.sign() == -1 {
w.write_all(b"-")?;
}
let letter = if upper { b'A' } else { b'a' };
let size = self.abs_size();
unsafe {
ll::base::to_base(base as u32, self.limbs(), size, |b| {
if b < 10 {
w.write_all(&[b + b'0']).unwrap();
} else {
w.write_all(&[(b - 10) + letter]).unwrap();
}
});
}
Ok(())
}
/// Creates a new Int from the given string in base `base`.
pub fn from_str_radix(mut src: &str, base: u8) -> Result<Int, ParseIntError> {
if base < 2 || base > 36 {
panic!("Invalid base: {}", base);
}
let mut sign = 1;
if src.starts_with('-') {
sign = -1;
src = &src[1..];
}
if src.len() == 0 {
return Err(ParseIntError {
kind: ErrorKind::Empty,
});
}
// Strip leading zeros
let zeros = src.chars().take_while(|&digit| digit == '0').count();
src = &src[zeros..];
if src.len() == 0 {
return Ok(Int::zero());
}
let mut buf = Vec::with_capacity(src.len());
for c in src.bytes() {
let b = match c {
b'0'..=b'9' => c - b'0',
b'A'..=b'Z' => (c - b'A') + 10,
b'a'..=b'z' => (c - b'a') + 10,
_ => {
return Err(ParseIntError {
kind: ErrorKind::InvalidDigit,
});
}
};
if b >= base {
return Err(ParseIntError {
kind: ErrorKind::InvalidDigit,
});
}
buf.push(b);
}
let num_digits = ll::base::base_digits_to_len(src.len(), base as u32);
let mut i = Int::with_capacity(num_digits as u32);
unsafe {
let size = ll::base::from_base(
i.limbs_uninit(),
buf.as_ptr(),
buf.len() as i32,
base as u32,
);
i.size = (size as i32) * sign;
}
Ok(i)
}
/// Divides this `Int` by `other`, returning the quotient `q` and the remainder `r` as `(q, r)`.
///
/// This satisfies `self = q * other + r`, ensuring that `q` and `r` have the same sign.
///
/// # Panics
///
/// Panics if `other` is zero.
pub fn divmod(&self, other: &Int) -> (Int, Int) {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
if other.sign() == 0 {
ll::divide_by_zero();
}
if self.sign() == 0 {
return (self.clone(), Int::zero());
}
let out_size = if self.abs_size() < other.abs_size() {
1
} else {
(self.abs_size() - other.abs_size()) + 1
};
let out_sign = self.sign() * other.sign();
let mut q = Int::with_capacity(out_size as u32);
q.size = out_size * out_sign;
let mut r = Int::with_capacity(other.abs_size() as u32);
r.size = other.abs_size() * self.sign();
unsafe {
ll::divrem(
q.limbs_mut(),
r.limbs_mut(),
self.limbs(),
self.abs_size(),
other.limbs(),
other.abs_size(),
);
}
q.normalize();
r.normalize();
(q, r)
}
/// Raises this `Int` to the power of `exp`. `0^0 = 1`.
pub fn pow(&self, exp: usize) -> Int {
debug_assert!(self.well_formed());
match exp {
0 => Int::one(),
1 => self.clone(),
2 => self.square(),
_ => {
let mut signum = self.sign();
if signum == 0 {
return Int::zero();
}
if exp & 1 == 0 {
signum = 1
}
let ret_sz =
unsafe { ll::pow::num_pow_limbs(self.limbs(), self.abs_size(), exp as u32) };
let mut ret = Int::with_capacity(ret_sz as u32);
ret.size = ret_sz * signum;
unsafe {
ll::pow::pow(ret.limbs_mut(), self.limbs(), self.abs_size(), exp as u32);
}
ret.normalize();
ret
}
}
}
/// Raises this `Int` to the power `exp`, all modulo `modulus`. `0^0 mod m = 1 mod m`
///
/// # Panics
///
/// Panics if `exp` is negative or `modulus` is zero.
pub fn pow_mod(&self, exp: &Int, modulus: &Int) -> Int {
// Take care of invalid modulus
if modulus.sign() == 0 {
panic!("Got a zero modulus");
}
// Early return for non-positive exponents
match exp.sign() {
-1 => panic!("Got a negative exponent: {}", exp),
0 => return Int::one() % modulus, // this ends up being 0 iff modulus == 1<|fim▁hole|> }
let base = self % modulus;
if base.sign() == 0 {
return Int::zero();
}
let mut result = Int::one();
for i in (0..exp.bit_length()).rev() {
result = result.dsquare() % modulus;
// Accumulate current base if current exponent bit is 1
if exp.bit(i) {
result = (result * &base) % modulus;
}
}
result
}
/// Squares this `Int`.
pub fn square(&self) -> Int {
debug_assert!(self.well_formed());
let s = self.sign();
if s == 0 {
Int::zero()
} else if self.abs_size() == 1 {
let a = self.clone() * self.to_single_limb();
if s == -1 {
a.abs()
} else if s == 1 {
a
} else {
unreachable!()
}
} else {
let sz = self.abs_size() * 2;
let mut ret = Int::with_capacity(sz as u32);
ret.size = sz;
unsafe {
ll::sqr(ret.limbs_mut(), self.limbs(), self.abs_size());
}
ret.normalize();
ret
}
}
/// Consumes this `Int` and returns its square.
///
/// TODO: Is there a more idiomatic way of doing this?
pub fn dsquare(mut self) -> Int {
debug_assert!(self.well_formed());
let s = self.sign();
if s == 0 {
Int::zero()
} else if self.abs_size() == 1 {
let l = self.to_single_limb();
self = self * l;
if s == -1 {
self.abs()
} else if s == 1 {
self
} else {
unreachable!()
}
} else {
self.square()
}
}
/// Computes the nearest square root `s` of this number and its remainder `r` as
/// `Some((s, r))`, or `None` if this `Int` is negative.
///
/// `s` and `r` are both positive and satisfy `self = s * s + r`.
pub fn sqrt_rem(mut self) -> Option<(Int, Int)> {
debug_assert!(self.well_formed());
if self.sign() < 0 {
return None;
}
// the floor of a (correctly rounded) f64 sqrt gives the right
// answer, until this number (it is 67108865**2 - 1, but
// f64::sqrt is rounded *up* to 67108865 precisely).
if self < 4_503_599_761_588_224_u64 {
let this = u64::from(&self);
let sqrt = (this as f64).sqrt().floor() as u64;
let rem = this - sqrt * sqrt;
// reuse the memory
self.size = 0;
self.push(Limb(sqrt as BaseInt));
self.normalize();
Some((self, Int::from(rem)))
} else {
let n = self.bit_length();
let l = (n as usize - 1) / 4;
assert!(l > 0);
let mask = (Int::from(1) << l) - 1;
let low = &self & &mask;
self >>= l;
let mut middle = &self & mask;
self >>= l;
let (high_sqrt, mut high_rem) = self.sqrt_rem().unwrap();
high_rem <<= l;
middle |= high_rem;
let (q, u) = middle.divmod(&(&high_sqrt << 1));
let mut s = (high_sqrt << l) + &q;
let mut r = (u << l) + low - q.dsquare();
if r < 0 {
r += &s << 1;
r -= 1;
s -= 1;
}
debug_assert!(r >= 0);
Some((s, r))
}
}
/// Negates this `Int` in place.
pub fn negate(&mut self) {
self.size *= -1;
}
/// Returns whether this `Int` is even.
#[inline]
pub fn is_even(&self) -> bool {
debug_assert!(self.well_formed());
(self.to_single_limb().0 & 1) == 0
}
/// Returns the number of trailing zero bits for this `Int`, or zero if this `Int` is zero.
#[inline]
pub fn trailing_zeros(&self) -> u32 {
debug_assert!(self.well_formed());
if self.sign() == 0 {
0
} else {
unsafe { ll::scan_1(self.limbs(), self.abs_size()) }
}
}
/// Returns the number of trailing one bits (i.e. the population count) for this `Int`
///
/// If this number is negative, it has infinitely many ones (in two's complement). Therefore,
/// this method returns `usize::MAX` for negative numbers.
pub fn count_ones(&self) -> usize {
debug_assert!(self.well_formed());
if self.sign() < 0 {
std::usize::MAX
} else {
let bytes = unsafe {
std::slice::from_raw_parts(
self.ptr.as_ref() as *const _ as *const u8,
self.abs_size() as usize * std::mem::size_of::<Limb>(),
)
};
hamming::weight(bytes) as usize
}
}
/// Returns the number of bits required to represent the absolute value of this `Int`, i.e.,
/// `floor(log2(abs(self))) + 1`.
///
/// Returns one if this number is zero.
#[inline]
pub fn bit_length(&self) -> u32 {
if *self == 0 {
1
} else {
unsafe { ll::base::num_base_digits(self.limbs(), self.abs_size(), 2) as u32 }
}
}
/// Returns the value of the `bit`th bit in this `Int`, as if it were represented in two's
/// complement.
#[inline]
pub fn bit(&self, bit: u32) -> bool {
let word = (bit / Limb::BITS as u32) as isize;
let subbit = bit % Limb::BITS as u32;
if word < self.abs_size() as isize {
let b = unsafe {
let w: Limb = *self.limbs().offset(word);
w.0 & (1 << subbit) != 0
};
if self.sign() >= 0 {
b
} else {
let first_one = self.trailing_zeros();
// the number is negative, so, in two's complement,
// bits up to and including the first one are the same
// as their sign-magnitude values (... ^ false), while
// bits beyond that are complemented (... ^ true)
b ^ (bit > first_one)
}
} else {
// we're beyond the in-memory limbs, so the bits are
// either all zeros (positive) or all ones (negative)
self.sign() < 0
}
}
/// Sets the `bit`th bit of this number to `bit_val`, treating negative numbers as if they're
/// stored in two's complement.
pub fn set_bit(&mut self, bit: u32, bit_val: bool) {
debug_assert!(self.well_formed());
let word = bit / Limb::BITS as u32;
let subbit = bit % Limb::BITS as u32;
let flag = Limb(1 << subbit);
let sign = self.sign();
unsafe {
if word >= self.abs_size() as u32 {
// the bit is beyond the end, so more space is needed,
// and we need to be careful to ensure it's all zero
// because they'll all be part of the number itself
// used once the bit is set
self.ensure_capacity(word + 1);
let size = self.abs_size();
ll::zero(
self.limbs_uninit().offset(size as isize),
word as i32 - size + 1,
);
self.size = word as i32 + 1;
if sign < 0 {
self.size = -self.size
}
}
if sign < 0 {
// this could probably be replaced by something
// similar to what `bit` does
self.negate_twos_complement();
}
let mut ptr = self.limbs_mut().offset(word as isize);
let val = if bit_val { *ptr | flag } else { *ptr & !flag };
*ptr = val;
if sign < 0 {
// put self back to normal
self.negate_twos_complement();
}
}
self.normalize()
}
/// Gets the `Limbs` currently initialised or in use.
fn limbs(&self) -> Limbs {
unsafe { Limbs::new(self.ptr.as_ref(), 0, self.abs_size()) }
}
/// Gets the `LimbsMut` currently initialised or in use.
fn limbs_mut(&mut self) -> LimbsMut {
unsafe { LimbsMut::new(self.ptr.as_mut(), 0, self.abs_size()) }
}
/// Gets the `LimbsMut` to all allocated limbs.
unsafe fn limbs_uninit(&mut self) -> LimbsMut {
LimbsMut::new(self.ptr.as_mut(), 0, self.cap as i32)
}
/// Ensures that the `Int` has at least the given capacity.
fn ensure_capacity(&mut self, cap: u32) {
if cap > self.cap {
let old_cap = self.cap as usize;
self.with_raw_vec(|v| v.reserve_exact(old_cap, cap as usize - old_cap))
}
}
/// Pushes a `Limb` onto this `Int`.
fn push(&mut self, limb: Limb) {
let new_size = (self.abs_size() + 1) as u32;
self.ensure_capacity(new_size);
unsafe {
let pos = self.abs_size();
*self.limbs_uninit().offset(pos as isize) = limb;
// If it was previously empty, then just make it positive,
// otherwise maintain the signedness
if self.size == 0 {
self.size = 1;
} else {
self.size += self.sign();
}
}
}
/// Adjusts the size field so that the most-significant `Limb` is non-zero.
fn normalize(&mut self) {
if self.size == 0 {
return;
}
let sign = self.sign();
unsafe {
while self.size != 0 && *self.ptr.as_ptr().offset((self.abs_size() - 1) as isize) == 0 {
self.size -= sign;
}
}
debug_assert!(self.well_formed());
}
/// Returns whether the `Int` is well-formed, i.e. that the size doesn't exceed the capacity and
/// that the most significant `Limb` is non-zero.
fn well_formed(&self) -> bool {
if self.size == 0 {
return true;
}
if (self.abs_size() as u32) > self.cap {
return false;
}
let high_limb = unsafe { *self.ptr.as_ptr().offset((self.abs_size() - 1) as isize) };
return high_limb != 0;
}
/// Negates this `Int` using two's complement, i.e. `!self + 1`.
fn negate_twos_complement(&mut self) {
unsafe {
let self_ptr = self.limbs_mut();
let carry = ll::twos_complement(self_ptr, self_ptr.as_const(), self.abs_size());
if carry != 0 {
self.push(carry)
}
}
self.size = -self.size;
}
/// Computes the greates common divisor (GCD) of this `Int` and `other`.
///
/// The result is always positive.
#[inline]
pub fn gcd(&self, other: &Int) -> Int {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
let (mut a, mut b) = if self.abs_size() >= other.abs_size() {
((*self).clone(), (*other).clone())
} else {
((*other).clone(), (*self).clone())
};
if a == Int::zero() {
return b;
}
if b == Int::zero() {
return a;
}
let out_size = a.abs_size();
let mut r = Int::with_capacity(out_size as u32);
r.size = out_size;
unsafe {
ll::gcd(
r.limbs_mut(),
a.limbs_mut(),
a.abs_size(),
b.limbs_mut(),
b.abs_size(),
);
r.normalize();
r
}
}
/// Computes the lowest common multiple (LCM) of this `Int` and `other`.
#[inline]
pub fn lcm(&self, other: &Int) -> Int {
(self * other).abs() / self.gcd(other)
}
/// Converts this `Int` into an `f64`.
///
/// This is not an exact conversion, because this `Int` may be more precise than an `f64` can
/// account for.
pub fn to_f64(&self) -> f64 {
let sz = self.abs_size();
if sz == 0 {
return 0.0;
}
let mut highest_limb = unsafe { *self.limbs().offset((sz - 1) as isize) };
let leading_zeros = highest_limb.leading_zeros();
let mut shifted = 0;
if leading_zeros > 11 && sz > 1 {
highest_limb = highest_limb << leading_zeros;
let next_limb = unsafe { *self.limbs().offset((sz - 2) as isize) };
highest_limb = highest_limb | (next_limb >> (Limb::BITS - leading_zeros as usize));
shifted = leading_zeros;
}
let exp = ((sz - 1) * Limb::BITS as i32) - shifted as i32;
let f = highest_limb.0 as f64;
let exp = (2.0f64).powi(exp);
f * exp
}
}
impl Clone for Int {
fn clone(&self) -> Int {
debug_assert!(self.well_formed());
if self.sign() == 0 {
return Int::zero();
}
let mut new = Int::with_capacity(self.abs_size() as u32);
unsafe {
ll::copy_incr(self.limbs(), new.limbs_uninit(), self.abs_size());
}
new.size = self.size;
new
}
fn clone_from(&mut self, other: &Int) {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
if other.sign() == 0 {
self.size = 0;
return;
}
self.ensure_capacity(other.abs_size() as u32);
unsafe {
ll::copy_incr(other.limbs(), self.limbs_uninit(), other.abs_size());
self.size = other.size;
}
}
}
impl std::default::Default for Int {
#[inline]
fn default() -> Int {
Int::zero()
}
}
impl Drop for Int {
fn drop(&mut self) {
if self.cap > 0 {
unsafe {
drop(RawVec::from_raw_parts(self.ptr.as_mut(), self.cap as usize));
}
self.cap = 0;
self.size = 0;
}
}
}
impl PartialEq<Int> for Int {
#[inline]
fn eq(&self, other: &Int) -> bool {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
if self.size == other.size {
unsafe { ll::cmp(self.limbs(), other.limbs(), self.abs_size()) == Ordering::Equal }
} else {
false
}
}
}
impl PartialEq<Limb> for Int {
#[inline]
fn eq(&self, other: &Limb) -> bool {
if *other == 0 && self.size == 0 {
return true;
}
self.size == 1 && *self.limbs() == *other
}
}
impl PartialEq<Int> for Limb {
#[inline]
fn eq(&self, other: &Int) -> bool {
other.eq(self)
}
}
impl Eq for Int {}
impl Ord for Int {
#[inline]
fn cmp(&self, other: &Int) -> Ordering {
if self.size < other.size {
Ordering::Less
} else if self.size > other.size {
Ordering::Greater
} else {
// Same number of digits and same sign
// Check for zero
if self.size == 0 {
return Ordering::Equal;
}
unsafe {
// If both are positive, do `self cmp other`, if both are
// negative, do `other cmp self`
if self.sign() == 1 {
ll::cmp(self.limbs(), other.limbs(), self.abs_size())
} else {
ll::cmp(other.limbs(), self.limbs(), self.abs_size())
}
}
}
}
}
impl PartialOrd<Int> for Int {
#[inline]
fn partial_cmp(&self, other: &Int) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd<Limb> for Int {
#[inline]
fn partial_cmp(&self, other: &Limb) -> Option<Ordering> {
if self.eq(other) {
return Some(Ordering::Equal);
}
if self.size < 1 {
Some(Ordering::Less)
} else if self.size > 1 {
Some(Ordering::Greater)
} else {
(*self.limbs()).partial_cmp(other)
}
}
}
impl PartialOrd<Int> for Limb {
#[inline]
fn partial_cmp(&self, other: &Int) -> Option<Ordering> {
other.partial_cmp(self).map(|o| o.reverse())
}
}
impl hash::Hash for Int {
fn hash<H>(&self, state: &mut H)
where
H: hash::Hasher,
{
debug_assert!(self.well_formed());
// Normalize the Int so we get consistent hashing (since there are multiple limb
// representations for the same numeric value)
let mut n = self.clone();
n.normalize();
n.sign().hash(state);
n.abs_hash(state);
}
}
impl AddAssign<Limb> for Int {
fn add_assign(&mut self, other: Limb) {
debug_assert!(self.well_formed());
if other == 0 {
return;
}
// No capacity means `self` is zero. Just push `other` into it
if self.cap == 0 {
self.push(other);
return;
}
// This is zero, but has allocated space, so just store `other`
if self.size == 0 {
unsafe {
*self.limbs_uninit() = other;
self.size = 1;
return;
}
}
// `self` is non-zero, reuse the storage for the result.
unsafe {
let sign = self.sign();
let size = self.abs_size();
let mut ptr = self.limbs_mut();
// Self is positive, just add `other`
if sign == 1 {
let carry = ll::add_1(ptr, ptr.as_const(), size, other);
if carry != 0 {
self.push(carry);
}
} else {
// Self is negative, "subtract" other from self, basically doing:
// -(-self - other) == self + other
let borrow = ll::sub_1(ptr, ptr.as_const(), size, other);
if borrow != 0 {
// There was a borrow, this means that abs(other) > abs(self), i.e., we are a
// single limb and self - other has overflowed. So flip the result across MAX
// and keep it positive. Example: Int(-14) += Limb(15) becomes -(Int(14) - 15).
// But the -1 overflows. So make the -1 into +1 and return.
*ptr = BaseInt::max_value() - *ptr + 1;
self.size = self.abs_size();
}
self.normalize();
}
}
}
}
impl Add<Limb> for Int {
type Output = Int;
#[inline]
fn add(mut self, other: Limb) -> Int {
self += other;
self
}
}
impl<'a> AddAssign<&'a Int> for Int {
fn add_assign(&mut self, other: &'a Int) {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
if self.sign() == 0 {
// Try to reuse the allocation from `self`
self.clone_from(other);
return;
}
if other.sign() == 0 {
return;
}
if self.sign() == other.sign() {
// Signs are the same, add the two numbers together and re-apply
// the sign after.
let sign = self.sign();
unsafe {
// There's a restriction that x-size >= y-size, we can swap the operands
// no problem, but we'd like to re-use `self`s memory if possible, so
// if `self` is the smaller of the two we make sure it has enough space
// for the result
let (xp, xs, yp, ys) = if self.abs_size() >= other.abs_size() {
(
self.limbs(),
self.abs_size(),
other.limbs(),
other.abs_size(),
)
} else {
self.ensure_capacity(other.abs_size() as u32);
(
other.limbs(),
other.abs_size(),
self.limbs(),
self.abs_size(),
)
};
// Fetch the pointer first to make completely sure the compiler
// won't make bogus claims about nonaliasing due to the &mut
let ptr = self.limbs_uninit();
let carry = ll::add(ptr, xp, xs, yp, ys);
self.size = xs * sign;
if carry != 0 {
self.push(carry);
}
self.normalize();
}
} else {
// Signs are different, use the sign from the bigger (absolute value)
// of the two numbers and subtract the smaller one.
unsafe {
let (xp, xs, yp, ys) = if self.abs_size() > other.abs_size() {
(self.limbs(), self.size, other.limbs(), other.size)
} else if self.abs_size() < other.abs_size() {
self.ensure_capacity(other.abs_size() as u32);
(other.limbs(), other.size, self.limbs(), self.size)
} else {
match self.abs_cmp(other) {
Ordering::Equal => {
// They're equal, but opposite signs, so the result
// will be zero, clear `self` and return
self.size = 0;
return;
}
Ordering::Greater => (self.limbs(), self.size, other.limbs(), other.size),
Ordering::Less => (other.limbs(), other.size, self.limbs(), self.size),
}
};
// Fetch the pointer first to make completely sure the compiler
// won't make bogus claims about nonaliasing due to the &mut
let ptr = self.limbs_uninit();
let _borrow = ll::sub(ptr, xp, xs.abs(), yp, ys.abs());
// There shouldn't be any borrow
debug_assert!(_borrow == 0);
self.size = xs;
self.normalize();
debug_assert!(self.abs_size() > 0);
}
}
}
}
impl<'a> Add<&'a Int> for Int {
type Output = Int;
#[inline]
fn add(mut self, other: &'a Int) -> Int {
self += other;
self
}
}
impl<'a> Add<Int> for &'a Int {
type Output = Int;
#[inline]
fn add(self, other: Int) -> Int {
// Forward to other + self
other.add(self)
}
}
impl Add<Int> for Int {
type Output = Int;
#[inline]
fn add(self, other: Int) -> Int {
// Check for self or other being zero.
if self.sign() == 0 {
return other;
}
if other.sign() == 0 {
return self;
}
let (x, y) = if self.abs_size() >= other.abs_size() {
(self, &other)
} else {
(other, &self)
};
return x.add(y);
}
}
impl AddAssign<Int> for Int {
#[inline]
fn add_assign(&mut self, mut other: Int) {
// Use the allocation of the larger of the two inputs.
// Doing the obvious and simply swapping self and other
// when other.size > self.size results in poor codegen.
if other.abs_size() > self.abs_size() {
// Instead we doing the addition in-place, then overwrite
// self with other. This results in better codegen and better
// memory allocation behaviour.
other += &*self;
*self = other;
} else {
*self += &other;
}
}
}
impl<'a, 'b> Add<&'a Int> for &'b Int {
type Output = Int;
#[inline]
fn add(self, other: &'a Int) -> Int {
if self.sign() == 0 {
return other.clone();
}
if other.sign() == 0 {
return self.clone();
}
// Clone the bigger of the two
if self.abs_size() >= other.abs_size() {
self.clone().add(other)
} else {
other.clone().add(self)
}
}
}
impl SubAssign<Limb> for Int {
fn sub_assign(&mut self, other: Limb) {
debug_assert!(self.well_formed());
if other == 0 {
return;
}
// No capacity means `self` is zero. Just push the limb.
if self.cap == 0 {
self.push(other);
self.size = -1;
return;
}
// This is zero, but has allocated space, so just store `other`
if self.size == 0 {
unsafe {
*self.limbs_uninit() = other;
self.size = -1;
}
return;
}
// `self` is non-zero, reuse the storage for the result.
unsafe {
let sign = self.sign();
let size = self.abs_size();
let ptr = self.limbs_mut();
// Self is negative, just "add" `other`
if sign == -1 {
let carry = ll::add_1(ptr, ptr.as_const(), size, other);
if carry != 0 {
self.push(carry);
}
} else {
// Self is positive, subtract other from self
let carry = ll::sub_1(ptr, ptr.as_const(), size, other);
self.normalize();
if carry != 0 {
// There was a carry, and the native operations
// work with two's complement, so we need to get
// everything back into sign-magnitude form
self.negate_twos_complement()
}
}
}
debug_assert!(self.well_formed());
}
}
impl Sub<Limb> for Int {
type Output = Int;
#[inline]
fn sub(mut self, other: Limb) -> Int {
self -= other;
self
}
}
impl<'a> SubAssign<&'a Int> for Int {
fn sub_assign(&mut self, other: &'a Int) {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
// LHS is zero, set self to the negation of the RHS
if self.sign() == 0 {
self.clone_from(other);
self.size *= -1;
return;
}
// RHS is zero, do nothing
if other.sign() == 0 {
return;
}
if self.sign() == other.sign() {
unsafe {
// Signs are the same, subtract the smaller one from
// the bigger one and adjust the sign as appropriate
let (xp, xs, yp, ys, flip) = match self.abs_cmp(other) {
Ordering::Equal => {
// x - x, just return zero
self.size = 0;
return;
}
Ordering::Less => {
self.ensure_capacity(other.abs_size() as u32);
(other.limbs(), other.size, self.limbs(), self.size, true)
}
Ordering::Greater => {
(self.limbs(), self.size, other.limbs(), other.size, false)
}
};
// Fetch the pointer first to make completely sure the compiler
// won't make bogus claims about nonaliasing due to the &mut
let ptr = self.limbs_uninit();
let _borrow = ll::sub(ptr, xp, xs.abs(), yp, ys.abs());
debug_assert!(_borrow == 0);
self.size = if flip { xs * -1 } else { xs };
}
self.normalize();
} else {
// Different signs
if self.sign() == -1 {
// self is negative, use addition and negation
self.size *= -1;
*self += other;
self.size *= -1;
} else {
unsafe {
// Other is negative, handle as addition
let (xp, xs, yp, ys) = if self.abs_size() >= other.abs_size() {
(
self.limbs(),
self.abs_size(),
other.limbs(),
other.abs_size(),
)
} else {
self.ensure_capacity(other.abs_size() as u32);
(
other.limbs(),
other.abs_size(),
self.limbs(),
self.abs_size(),
)
};
// Fetch the pointer first to make completely sure the compiler
// won't make bogus claims about nonaliasing due to the &mut
let ptr = self.limbs_uninit();
let carry = ll::add(ptr, xp, xs, yp, ys);
self.size = xs;
if carry != 0 {
self.push(carry);
}
self.normalize();
}
}
}
}
}
impl<'a> Sub<&'a Int> for Int {
type Output = Int;
#[inline]
fn sub(mut self, other: &'a Int) -> Int {
self -= other;
self
}
}
impl<'a> Sub<Int> for &'a Int {
type Output = Int;
#[inline]
fn sub(self, mut other: Int) -> Int {
if self.sign() == 0 {
return -other;
}
if other.sign() == 0 {
other.clone_from(self);
return other;
}
-(other.sub(self))
}
}
impl Sub<Int> for Int {
type Output = Int;
#[inline]
fn sub(self, other: Int) -> Int {
if self.sign() == 0 {
return -other;
}
if other.sign() == 0 {
return self;
}
self.sub(&other)
}
}
impl SubAssign<Int> for Int {
#[inline]
fn sub_assign(&mut self, other: Int) {
if other.sign() == 0 {
return;
}
if self.sign() == 0 {
*self = -other;
return;
}
*self -= &other;
}
}
impl<'a, 'b> Sub<&'a Int> for &'b Int {
type Output = Int;
#[inline]
fn sub(self, other: &'a Int) -> Int {
if self.sign() == 0 {
return -other;
}
if other.sign() == 0 {
return self.clone();
}
self.clone().sub(other)
}
}
impl MulAssign<Limb> for Int {
fn mul_assign(&mut self, other: Limb) {
debug_assert!(self.well_formed());
if other == 0 || self.sign() == 0 {
self.size = 0;
return;
}
if other == 1 {
return;
}
unsafe {
// Fetch the pointer first to make completely sure the compiler
// won't make bogus claims about nonaliasing due to the &mut
let carry = ll::mul_1(self.limbs_mut(), self.limbs(), self.abs_size(), other);
if carry != 0 {
self.push(carry);
}
}
}
}
impl Mul<Limb> for Int {
type Output = Int;
#[inline]
fn mul(mut self, other: Limb) -> Int {
self *= other;
self
}
}
impl<'a, 'b> Mul<&'a Int> for &'b Int {
type Output = Int;
fn mul(self, other: &'a Int) -> Int {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
// This is the main function, since in the general case
// we need to allocate space for the return. Special cases
// where this isn't necessary are handled in the other impls
// 0 * x = 0
if self.sign() == 0 || other.sign() == 0 {
return Int::zero();
}
let out_sign = self.sign() * other.sign();
if self.abs_size() == 1 {
let mut ret = other.clone() * *self.limbs();
let size = ret.abs_size();
ret.size = size * out_sign;
return ret;
}
if other.abs_size() == 1 {
let mut ret = self.clone() * *other.limbs();
let size = ret.abs_size();
ret.size = size * out_sign;
return ret;
}
let out_size = self.abs_size() + other.abs_size();
let mut out = Int::with_capacity(out_size as u32);
out.size = out_size * out_sign;
unsafe {
let (xp, xs, yp, ys) = if self.abs_size() >= other.abs_size() {
(
self.limbs(),
self.abs_size(),
other.limbs(),
other.abs_size(),
)
} else {
(
other.limbs(),
other.abs_size(),
self.limbs(),
self.abs_size(),
)
};
ll::mul(out.limbs_mut(), xp, xs, yp, ys);
// Top limb may be zero
out.normalize();
return out;
}
}
}
impl<'a> Mul<&'a Int> for Int {
type Output = Int;
#[inline]
fn mul(mut self, other: &'a Int) -> Int {
// `other` is zero
if other.sign() == 0 {
self.size = 0;
return self;
}
// `other` is a single limb, reuse the allocation of self
if other.abs_size() == 1 {
let mut out = self * *other.limbs();
out.size *= other.sign();
return out;
}
// Forward to the by-reference impl
(&self) * other
}
}
impl<'a> Mul<Int> for &'a Int {
type Output = Int;
#[inline]
fn mul(self, other: Int) -> Int {
// Swap arguments
other * self
}
}
impl Mul<Int> for Int {
type Output = Int;
fn mul(mut self, other: Int) -> Int {
if self.sign() == 0 || other.sign() == 0 {
self.size = 0;
return self;
}
// One of them is a single limb big, so we can re-use the
// allocation of the other
if self.abs_size() == 1 {
let val = *self.limbs();
let mut out = other * val;
out.size *= self.sign();
return out;
}
if other.abs_size() == 1 {
let val = *other.limbs();
let mut out = self * val;
out.size *= other.sign();
return out;
}
// Still need to allocate for the result, just forward to
// the by-reference impl
(&self) * (&other)
}
}
impl<'a> MulAssign<&'a Int> for Int {
#[inline]
fn mul_assign(&mut self, other: &'a Int) {
if self.sign() == 0 {
return;
}
if other.sign() == 0 {
self.size = 0;
return;
}
let res = &*self * other;
*self = res;
}
}
impl MulAssign<Int> for Int {
#[inline]
fn mul_assign(&mut self, other: Int) {
if self.sign() == 0 {
return;
}
if other.sign() == 0 {
self.size = 0;
return;
}
let res = &*self * other;
*self = res;
}
}
impl DivAssign<Limb> for Int {
fn div_assign(&mut self, other: Limb) {
debug_assert!(self.well_formed());
if other == 0 {
ll::divide_by_zero();
}
if other == 1 || self.sign() == 0 {
return;
}
unsafe {
// Ignore the remainder
ll::divrem_1(self.limbs_mut(), 0, self.limbs(), self.abs_size(), other);
// Adjust the size if necessary
self.normalize();
}
}
}
impl Div<Limb> for Int {
type Output = Int;
#[inline]
fn div(mut self, other: Limb) -> Int {
self /= other;
self
}
}
impl<'a, 'b> Div<&'a Int> for &'b Int {
type Output = Int;
fn div(self, other: &'a Int) -> Int {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
if other.sign() == 0 {
ll::divide_by_zero();
}
if other.abs_size() == 1 {
let l = *other.limbs();
let out_sign = self.sign() * other.sign();
let mut out = self.clone() / l;
out.size = out.abs_size() * out_sign;
return out;
}
self.divmod(other).0
}
}
impl<'a> Div<&'a Int> for Int {
type Output = Int;
#[inline]
fn div(self, other: &'a Int) -> Int {
(&self) / other
}
}
impl<'a> Div<Int> for &'a Int {
type Output = Int;
#[inline]
fn div(self, other: Int) -> Int {
self / (&other)
}
}
impl Div<Int> for Int {
type Output = Int;
#[inline]
fn div(self, other: Int) -> Int {
(&self) / (&other)
}
}
impl<'a> DivAssign<&'a Int> for Int {
#[inline]
fn div_assign(&mut self, other: &'a Int) {
let res = &*self / other;
*self = res;
}
}
impl DivAssign<Int> for Int {
#[inline]
fn div_assign(&mut self, other: Int) {
let res = &*self / other;
*self = res;
}
}
impl Rem<Limb> for Int {
type Output = Int;
#[inline]
fn rem(mut self, other: Limb) -> Int {
self %= other;
self
}
}
impl RemAssign<Limb> for Int {
fn rem_assign(&mut self, other: Limb) {
debug_assert!(self.well_formed());
if other == 0 {
ll::divide_by_zero();
}
// x % 1 == 0, 0 % n == 0
if other == 1 || self.sign() == 0 {
self.size = 0;
return;
}
unsafe {
let rem = ll::divrem_1(self.limbs_mut(), 0, self.limbs(), self.abs_size(), other);
// Reuse the space from `self`, taking the sign from the numerator
// Since `rem` has to satisfy `N = QD + R` and D is always positive,
// `R` will always be the same sign as the numerator.
*self.limbs_mut() = rem;
let sign = self.sign();
self.size = sign;
self.normalize();
if self.cap > 8 {
// Shrink self, since it's at least 8 times bigger than necessary
self.shrink_to_fit();
}
}
}
}
impl DivRem<Limb> for Int {
type Output = (Int, Limb);
fn divrem(mut self, other: Limb) -> Self::Output {
debug_assert!(self.well_formed());
if other == 0 {
ll::divide_by_zero();
}
// x % 1 == 0, 0 % n == 0
if other == 1 || self.sign() == 0 {
self.size = 0;
return (self, Limb(0));
}
let rem =
unsafe { ll::divrem_1(self.limbs_mut(), 0, self.limbs(), self.abs_size(), other) };
self.normalize();
return (self, rem);
}
}
// TODO: There's probably too much cloning happening here, need to figure out
// the best way of avoiding over-copying.
impl<'a, 'b> Rem<&'a Int> for &'b Int {
type Output = Int;
fn rem(self, other: &'a Int) -> Int {
debug_assert!(self.well_formed());
debug_assert!(other.well_formed());
if other.sign() == 0 {
ll::divide_by_zero();
}
if other.abs_size() == 1 {
let l = *other.limbs();
return self.clone() % l;
}
self.divmod(other).1
}
}
impl<'a> Rem<&'a Int> for Int {
type Output = Int;
#[inline]
fn rem(self, other: &'a Int) -> Int {
(&self) % other
}
}
impl<'a> Rem<Int> for &'a Int {
type Output = Int;
#[inline]
fn rem(self, other: Int) -> Int {
self % (&other)
}
}
impl Rem<Int> for Int {
type Output = Int;
#[inline]
fn rem(self, other: Int) -> Int {
(&self) % (&other)
}
}
impl<'a, 'b> DivRem<&'a Int> for &'b Int {
type Output = (Int, Int);
#[inline]
fn divrem(self, other: &'a Int) -> (Int, Int) {
self.divmod(other)
}
}
impl RemAssign<Int> for Int {
#[inline]
fn rem_assign(&mut self, other: Int) {
let res = &*self % other;
*self = res;
}
}
impl<'a> RemAssign<&'a Int> for Int {
#[inline]
fn rem_assign(&mut self, other: &'a Int) {
let res = &*self % other;
*self = res;
}
}
impl Neg for Int {
type Output = Int;
#[inline]
fn neg(mut self) -> Int {
debug_assert!(self.well_formed());
self.size *= -1;
self
}
}
impl<'a> Neg for &'a Int {
type Output = Int;
#[inline]
fn neg(self) -> Int {
self.clone().neg()
}
}
impl ShlAssign<usize> for Int {
#[inline]
fn shl_assign(&mut self, mut cnt: usize) {
debug_assert!(self.well_formed());
if self.sign() == 0 {
return;
}
if cnt >= Limb::BITS as usize {
let extra_limbs = (cnt / Limb::BITS as usize) as u32;
debug_assert!(extra_limbs >= 1);
cnt = cnt % Limb::BITS as usize;
let size = self.abs_size() as u32;
// Extend for the extra limbs, then another one for any potential extra limbs
self.ensure_capacity(extra_limbs + size + 1);
unsafe {
let ptr = self.limbs_uninit();
let shift = ptr.offset(extra_limbs as isize);
ll::copy_decr(ptr.as_const(), shift, self.abs_size());
ll::zero(ptr, extra_limbs as i32);
}
self.size += (extra_limbs as i32) * self.sign();
}
debug_assert!(cnt < Limb::BITS as usize);
if cnt == 0 {
return;
}
let size = self.abs_size();
unsafe {
let ptr = self.limbs_mut();
let c = ll::shl(ptr, ptr.as_const(), size, cnt as u32);
if c > 0 {
self.push(c);
}
}
}
}
impl<'a> Shl<usize> for &'a Int {
type Output = Int;
#[inline]
fn shl(self, cnt: usize) -> Int {
let mut new = self.clone();
new <<= cnt;
new
}
}
impl Shl<usize> for Int {
type Output = Int;
#[inline]
fn shl(mut self, other: usize) -> Int {
self <<= other;
self
}
}
impl ShrAssign<usize> for Int {
#[inline]
fn shr_assign(&mut self, mut cnt: usize) {
debug_assert!(self.well_formed());
if self.sign() == 0 {
return;
}
if cnt >= Limb::BITS as usize {
let removed_limbs = (cnt / Limb::BITS as usize) as u32;
let size = self.abs_size();
if removed_limbs as i32 >= size {
*self = Int::zero();
return;
}
debug_assert!(removed_limbs > 0);
cnt = cnt % Limb::BITS as usize;
unsafe {
let ptr = self.limbs_mut();
let shift = ptr.offset(removed_limbs as isize);
let new_size = size - removed_limbs as i32;
// Shift down a whole number of limbs
ll::copy_incr(shift.as_const(), ptr, new_size);
// Zero out the high limbs
ll::zero(ptr.offset(new_size as isize), removed_limbs as i32);
self.size = new_size * self.sign();
}
}
debug_assert!(cnt < Limb::BITS as usize);
if cnt == 0 {
return;
}
let size = self.abs_size();
unsafe {
let ptr = self.limbs_mut();
ll::shr(ptr, ptr.as_const(), size, cnt as u32);
self.normalize();
}
}
}
impl<'a> Shr<usize> for &'a Int {
type Output = Int;
#[inline]
fn shr(self, other: usize) -> Int {
let mut new = self.clone();
new >>= other;
new
}
}
impl Shr<usize> for Int {
type Output = Int;
#[inline]
fn shr(mut self, other: usize) -> Int {
self >>= other;
self
}
}
#[derive(Copy, Clone)]
enum BitOp {
And,
Or,
Xor,
}
fn bitop_ref(this: &mut Int, other: &Int, op: BitOp) -> Result<(), ()> {
let this_sign = this.sign();
let other_sign = other.sign();
// if other is small, we can fall back to something that'll be
// more efficient (especially if other is negative)
if other.abs_size() <= 1 {
// the magnitude of the limb
let mut limb = other.to_single_limb();
if other_sign < 0 {
if limb.high_bit_set() {
// the limb is too large to be put into two's
// complement form (NB. that if other is positive, we
// don't need to worry about two's complement, since
// bitop_limb can handle unsigned Limbs)
return Err(());
} else {
limb = -limb;
}
}
// as mentioned above, we only have to say that `limb` is
// signed when it is actually negative
bitop_limb(this, limb, other_sign < 0, op);
return Ok(());
}
if this_sign < 0 || other_sign < 0 {
return Err(());
}
unsafe {
let other_ptr = other.limbs();
let min_size = std::cmp::min(this.abs_size(), other.abs_size());
let max_size = std::cmp::max(this.abs_size(), other.abs_size());
match op {
BitOp::And => {
let this_ptr = this.limbs_mut();
ll::and_n(this_ptr, this_ptr.as_const(), other_ptr, min_size);
this.size = min_size;
}
BitOp::Or => {
this.ensure_capacity(max_size as u32);
let this_ptr = this.limbs_uninit();
ll::or_n(this_ptr, this_ptr.as_const(), other_ptr, min_size);
if this.abs_size() < max_size {
ll::copy_rest(other_ptr, this_ptr, max_size, min_size);
}
this.size = max_size;
}
BitOp::Xor => {
this.ensure_capacity(max_size as u32);
let this_ptr = this.limbs_uninit();
ll::xor_n(this_ptr, this_ptr.as_const(), other_ptr, min_size);
if this.abs_size() < max_size {
ll::copy_rest(other_ptr, this_ptr, max_size, min_size);
}
this.size = max_size;
}
}
}
this.normalize();
Ok(())
}
// one of the inputs is negative. The answer is as if `Int` was stored
// in two's complement (in infinite precision), which means converting
// to that format, doing the operation, and then converting back out
// of it, if necessary.
fn bitop_neg(mut a: Int, mut b: Int, op: BitOp) -> Int {
debug_assert!(a.sign() < 0 || b.sign() < 0);
let a_sign = a.sign();
let b_sign = b.sign();
if a_sign < 0 {
a.negate_twos_complement();
}
if b_sign < 0 {
b.negate_twos_complement();
}
let (mut a, b, a_sign, b_sign) = if a.abs_size() < b.abs_size() {
(b, a, b_sign, a_sign)
} else {
(a, b, a_sign, b_sign)
};
unsafe {
let a_ptr = a.limbs_mut();
let b_ptr = b.limbs();
let min_size = b.abs_size();
let max_size = a.abs_size();
let (neg_result, use_max_size) = match op {
BitOp::And => {
ll::and_n(a_ptr, a_ptr.as_const(), b_ptr, min_size);
(a_sign < 0 && b_sign < 0, b_sign < 0)
}
BitOp::Or => {
ll::or_n(a_ptr, a_ptr.as_const(), b_ptr, min_size);
// (no need to copy trailing, a is longer than b)
(a_sign < 0 || b_sign < 0, b_sign >= 0)
}
BitOp::Xor => {
ll::xor_n(a_ptr, a_ptr.as_const(), b_ptr, min_size);
if b_sign < 0 {
let ptr = a_ptr.offset(min_size as isize);
ll::not(ptr, ptr.as_const(), max_size - min_size);
}
((a_sign < 0) ^ (b_sign < 0), true)
}
};
a.size = if use_max_size { max_size } else { min_size };
if neg_result {
a.negate_twos_complement();
}
}
a.normalize();
return a;
}
// do a bit operation on `a` and `b`.
//
// If `signed` only indicates whether to interpret `b` as two's
// complement or not (i.e. if it is true, then `1` is still `1`, and
// `-1` is `!0`)
fn bitop_limb(a: &mut Int, b: Limb, signed: bool, op: BitOp) {
let a_sign = a.sign();
let b_negative = signed && b.high_bit_set();
let b_sign = if b_negative {
-1
} else if b == 0 {
0
} else {
1
};
if a_sign < 0 {
a.negate_twos_complement();
}
// b is already in two's complement if it is negative
if a_sign == 0 {
match op {
// 0 ^ x == 0 | x == x
BitOp::Or | BitOp::Xor => {
if b_sign < 0 {
a.push(-b);
a.negate();
} else {
a.push(b)
}
}
// 0 & x == 0
BitOp::And => {}
}
} else {
unsafe {
let mut a_ptr = a.limbs_mut();
let min_size = if b == 0 { 0 } else { 1 };
let max_size = a.abs_size();
// we've got to have space to write data to this pointer
debug_assert!(max_size >= 1);
let (neg_result, use_max_size) = match op {
BitOp::And => {
*a_ptr = *a_ptr & b;
(a_sign < 0 && b_sign < 0, b_sign < 0)
}
BitOp::Or => {
*a_ptr = *a_ptr | b;
(a_sign < 0 || b_sign < 0, b_sign >= 0)
}
BitOp::Xor => {
*a_ptr = *a_ptr ^ b;
if b_sign < 0 {
let ptr = a_ptr.offset(min_size as isize);
ll::not(ptr, ptr.as_const(), max_size - min_size);
}
((a_sign < 0) ^ (b_sign < 0), true)
}
};
a.size = if use_max_size { max_size } else { min_size };
if neg_result {
a.negate_twos_complement();
}
}
}
a.normalize();
}
impl<'a> BitAnd<Limb> for Int {
type Output = Int;
fn bitand(mut self, other: Limb) -> Int {
self &= other;
self
}
}
impl BitAndAssign<Limb> for Int {
fn bitand_assign(&mut self, other: Limb) {
bitop_limb(self, other, false, BitOp::And)
}
}
impl<'a> BitAnd<&'a Int> for Int {
type Output = Int;
fn bitand(mut self, other: &'a Int) -> Int {
if let Ok(_) = bitop_ref(&mut self, other, BitOp::And) {
self
} else {
bitop_neg(self, other.clone(), BitOp::And)
}
}
}
impl<'a> BitAnd<Int> for &'a Int {
type Output = Int;
#[inline]
fn bitand(self, other: Int) -> Int {
other.bitand(self)
}
}
impl<'a, 'b> BitAnd<&'a Int> for &'b Int {
type Output = Int;
#[inline]
fn bitand(self, other: &'a Int) -> Int {
self.clone().bitand(other)
}
}
impl BitAnd<Int> for Int {
type Output = Int;
fn bitand(mut self, other: Int) -> Int {
if let Ok(_) = bitop_ref(&mut self, &other, BitOp::And) {
self
} else {
bitop_neg(self, other, BitOp::And)
}
}
}
impl BitAndAssign<Int> for Int {
#[inline]
fn bitand_assign(&mut self, other: Int) {
if let Err(_) = bitop_ref(self, &other, BitOp::And) {
let res = &*self & other;
*self = res;
}
}
}
impl<'a> BitAndAssign<&'a Int> for Int {
#[inline]
fn bitand_assign(&mut self, other: &'a Int) {
if let Err(_) = bitop_ref(self, other, BitOp::And) {
let res = &*self & other;
*self = res;
}
}
}
impl BitOr<Limb> for Int {
type Output = Int;
fn bitor(mut self, other: Limb) -> Int {
self |= other;
self
}
}
impl BitOrAssign<Limb> for Int {
fn bitor_assign(&mut self, other: Limb) {
bitop_limb(self, other, false, BitOp::Or)
}
}
impl<'a> BitOr<&'a Int> for Int {
type Output = Int;
fn bitor(mut self, other: &'a Int) -> Int {
if let Ok(_) = bitop_ref(&mut self, other, BitOp::Or) {
self
} else {
bitop_neg(self, other.clone(), BitOp::Or)
}
}
}
impl<'a> BitOr<Int> for &'a Int {
type Output = Int;
#[inline]
fn bitor(self, other: Int) -> Int {
other.bitor(self)
}
}
impl<'a, 'b> BitOr<&'a Int> for &'b Int {
type Output = Int;
#[inline]
fn bitor(self, other: &'a Int) -> Int {
self.clone().bitor(other)
}
}
impl BitOr<Int> for Int {
type Output = Int;
#[inline]
fn bitor(mut self, other: Int) -> Int {
if let Ok(_) = bitop_ref(&mut self, &other, BitOp::Or) {
self
} else {
bitop_neg(self, other, BitOp::Or)
}
}
}
impl BitOrAssign<Int> for Int {
#[inline]
fn bitor_assign(&mut self, other: Int) {
if let Err(_) = bitop_ref(self, &other, BitOp::Or) {
let res = &*self | other;
*self = res;
}
}
}
impl<'a> BitOrAssign<&'a Int> for Int {
#[inline]
fn bitor_assign(&mut self, other: &'a Int) {
if let Err(_) = bitop_ref(self, &other, BitOp::Or) {
let res = &*self | other;
*self = res;
}
}
}
impl<'a> BitXor<Limb> for Int {
type Output = Int;
fn bitxor(mut self, other: Limb) -> Int {
self ^= other;
self
}
}
impl BitXorAssign<Limb> for Int {
fn bitxor_assign(&mut self, other: Limb) {
bitop_limb(self, other, false, BitOp::Xor)
}
}
impl<'a> BitXor<&'a Int> for Int {
type Output = Int;
fn bitxor(mut self, other: &'a Int) -> Int {
if let Ok(_) = bitop_ref(&mut self, other, BitOp::Xor) {
self
} else {
bitop_neg(self, other.clone(), BitOp::Xor)
}
}
}
impl<'a> BitXor<Int> for &'a Int {
type Output = Int;
#[inline]
fn bitxor(self, other: Int) -> Int {
other.bitxor(self)
}
}
impl<'a, 'b> BitXor<&'a Int> for &'b Int {
type Output = Int;
#[inline]
fn bitxor(self, other: &'a Int) -> Int {
self.clone().bitxor(other)
}
}
impl BitXor<Int> for Int {
type Output = Int;
#[inline]
fn bitxor(mut self, other: Int) -> Int {
if let Ok(_) = bitop_ref(&mut self, &other, BitOp::Xor) {
self
} else {
bitop_neg(self, other, BitOp::Xor)
}
}
}
impl BitXorAssign<Int> for Int {
#[inline]
fn bitxor_assign(&mut self, other: Int) {
if let Err(_) = bitop_ref(self, &other, BitOp::Xor) {
let res = &*self ^ other;
*self = res;
}
}
}
impl<'a> BitXorAssign<&'a Int> for Int {
#[inline]
fn bitxor_assign(&mut self, other: &'a Int) {
if let Err(_) = bitop_ref(self, &other, BitOp::Xor) {
let res = &*self ^ other;
*self = res;
}
}
}
macro_rules! impl_arith_prim (
(signed $t:ty) => (
// Limbs are unsigned, so make sure we account for the sign
// when $t is signed
impl Add<$t> for Int {
type Output = Int;
#[inline]
fn add(self, other: $t) -> Int {
if other == 0 {
return self;
}
if other < 0 {
return self - Limb(other.abs() as BaseInt);
}
return self + Limb(other as BaseInt);
}
}
impl AddAssign<$t> for Int {
#[inline]
fn add_assign(&mut self, other: $t) {
if other < 0 {
*self -= Limb(other.abs() as BaseInt);
} else if other > 0 {
*self += Limb(other as BaseInt);
}
}
}
impl Sub<$t> for Int {
type Output = Int;
#[inline]
fn sub(self, other: $t) -> Int {
if other == 0 {
return self;
}
if other < 0 {
return self + Limb(other.abs() as BaseInt);
}
return self - Limb(other as BaseInt);
}
}
impl SubAssign<$t> for Int {
#[inline]
fn sub_assign(&mut self, other: $t) {
if other < 0 {
*self += Limb(other.abs() as BaseInt);
} else if other > 0 {
*self -= Limb(other as BaseInt);
}
}
}
impl Mul<$t> for Int {
type Output = Int;
#[inline]
fn mul(mut self, other: $t) -> Int {
self *= other;
self
}
}
impl MulAssign<$t> for Int {
#[inline]
fn mul_assign(&mut self, other: $t) {
if other == 0 {
self.size = 0;
} else if other == -1 {
self.negate();
} else if other < 0 {
self.negate();
*self *= Limb(other.abs() as BaseInt);
} else {
*self *= Limb(other as BaseInt);
}
}
}
impl DivAssign<$t> for Int {
#[inline]
fn div_assign(&mut self, other: $t) {
if other == 0 {
ll::divide_by_zero();
}
if other == 1 || self.sign() == 0 {
return;
}
if other == -1 {
self.negate();
} else if other < 0 {
self.negate();
*self /= Limb(other.abs() as BaseInt);
} else {
*self /= Limb(other as BaseInt);
}
}
}
impl Div<$t> for Int {
type Output = Int;
#[inline]
fn div(mut self, other: $t) -> Int {
self /= other;
self
}
}
impl RemAssign<$t> for Int {
#[inline]
fn rem_assign(&mut self, other: $t) {
let res = &*self % other;
*self = res;
}
}
impl Rem<$t> for Int {
type Output = Int;
#[inline]
fn rem(mut self, other: $t) -> Int {
if other == 0 {
ll::divide_by_zero();
}
if other == 1 ||other == -1 || self.sign() == 0 {
self.size = 0;
return self;
}
return self % Limb(other.abs() as BaseInt);
}
}
impl DivRem<$t> for Int {
type Output = (Int, $t);
#[inline]
fn divrem(mut self, other: $t) -> Self::Output {
if other == 0 {
ll::divide_by_zero();
}
let sign = self.sign();
let (q, r) = {
if other == 1 || sign == 0 {
return (self, 0);
} else if other == -1 {
self.negate();
return (self, 0);
} else if other < 0 {
self.negate();
self.divrem(Limb(other.abs() as BaseInt))
} else {
self.divrem(Limb(other as BaseInt))
}
};
let r = (r.0 as $t).checked_mul(sign).unwrap();
debug_assert!(sign > 0 || r <= 0);
debug_assert!(sign < 0 || r >= 0);
debug_assert!(r.abs() < other.abs());
(q, r)
}
}
impl BitAndAssign<$t> for Int {
#[inline]
fn bitand_assign(&mut self, other: $t) {
bitop_limb(self, Limb(other as BaseInt), true, BitOp::And)
}
}
impl BitOrAssign<$t> for Int {
#[inline]
fn bitor_assign(&mut self, other: $t) {
bitop_limb(self, Limb(other as BaseInt), true, BitOp::Or)
}
}
impl BitXorAssign<$t> for Int {
#[inline]
fn bitxor_assign(&mut self, other: $t) {
bitop_limb(self, Limb(other as BaseInt), true, BitOp::Xor)
}
}
impl_arith_prim!(common $t);
);
(unsigned $t:ty) => (
impl Add<$t> for Int {
type Output = Int;
#[inline]
fn add(self, other: $t) -> Int {
if other == 0 {
return self;
}
return self + Limb(other as BaseInt);
}
}
impl AddAssign<$t> for Int {
#[inline]
fn add_assign(&mut self, other: $t) {
if other != 0 {
*self += Limb(other as BaseInt);
}
}
}
impl Sub<$t> for Int {
type Output = Int;
#[inline]
fn sub(self, other: $t) -> Int {
if other == 0 {
return self;
}
return self - Limb(other as BaseInt);
}
}
impl SubAssign<$t> for Int {
#[inline]
fn sub_assign(&mut self, other: $t) {
if other != 0 {
*self -= Limb(other as BaseInt);
}
}
}
impl Mul<$t> for Int {
type Output = Int;
#[inline]
fn mul(mut self, other: $t) -> Int {
if other == 0 {
self.size = 0;
return self;
}
if other == 1 || self.sign() == 0 {
return self;
}
return self * Limb(other as BaseInt);
}
}
impl MulAssign<$t> for Int {
#[inline]
fn mul_assign(&mut self, other: $t) {
if other == 0 {
self.size = 0;
} else if other > 1 && self.sign() != 0 {
*self *= Limb(other as BaseInt);
}
}
}
impl Div<$t> for Int {
type Output = Int;
#[inline]
fn div(self, other: $t) -> Int {
if other == 0 {
ll::divide_by_zero();
}
if other == 1 || self.sign() == 0 {
return self;
}
return self / Limb(other as BaseInt);
}
}
impl DivAssign<$t> for Int {
#[inline]
fn div_assign(&mut self, other: $t) {
if other == 0 {
ll::divide_by_zero();
} else if other > 1 && self.sign() != 0 {
*self /= Limb(other as BaseInt);
}
}
}
impl Rem<$t> for Int {
type Output = Int;
#[inline]
fn rem(mut self, other: $t) -> Int {
if other == 0 {
ll::divide_by_zero();
}
if other == 1 || self.sign() == 0 {
self.size = 0;
return self;
}
return self % Limb(other as BaseInt);
}
}
impl RemAssign<$t> for Int {
#[inline]
fn rem_assign(&mut self, other: $t) {
*self %= Limb(other as BaseInt);
}
}
impl DivRem<$t> for Int {
type Output = (Int, $t);
#[inline]
fn divrem(self, other: $t) -> Self::Output {
let other = other as BaseInt;
let (q, r) = self.divrem(Limb(other));
debug_assert!(r < other);
(q, r.0 as $t)
}
}
impl BitAndAssign<$t> for Int {
#[inline]
fn bitand_assign(&mut self, other: $t) {
bitop_limb(self, Limb(other as BaseInt), false, BitOp::And)
}
}
impl BitOrAssign<$t> for Int {
#[inline]
fn bitor_assign(&mut self, other: $t) {
bitop_limb(self, Limb(other as BaseInt), false, BitOp::Or)
}
}
impl BitXorAssign<$t> for Int {
#[inline]
fn bitxor_assign(&mut self, other: $t) {
bitop_limb(self, Limb(other as BaseInt), false, BitOp::Xor)
}
}
impl_arith_prim!(common $t);
);
(common $t:ty) => (
// Common impls, these should just forward to the above
// impls
impl<'a> Add<$t> for &'a Int {
type Output = Int;
#[inline]
fn add(self, other: $t) -> Int {
self.clone() + other
}
}
impl Add<Int> for $t {
type Output = Int;
#[inline]
fn add(self, other: Int) -> Int {
return other + self;
}
}
impl<'a> Add<&'a Int> for $t {
type Output = Int;
#[inline]
fn add(self, other: &'a Int) -> Int {
other.clone() + self
}
}
impl<'a> Sub<$t> for &'a Int {
type Output = Int;
#[inline]
fn sub(self, other: $t) -> Int {
self.clone() - other
}
}
impl Sub<Int> for $t {
type Output = Int;
#[inline]
fn sub(self, other: Int) -> Int {
-other + self
}
}
impl<'a> Sub<&'a Int> for $t {
type Output = Int;
#[inline]
fn sub(self, other: &'a Int) -> Int {
-(other - self)
}
}
impl<'a> Mul<$t> for &'a Int {
type Output = Int;
#[inline]
fn mul(self, other: $t) -> Int {
return self.clone() * other;
}
}
impl Mul<Int> for $t {
type Output = Int;
#[inline]
fn mul(self, other: Int) -> Int {
other * self
}
}
impl<'a> Mul<&'a Int> for $t {
type Output = Int;
#[inline]
fn mul(self, other: &'a Int) -> Int {
// Check for zero here to avoid cloning unnecessarily
if self == 0 { return Int::zero() };
other.clone() * self
}
}
impl<'a> Div<$t> for &'a Int {
type Output = Int;
#[inline]
fn div(self, other: $t) -> Int {
if other == 0 {
ll::divide_by_zero();
}
return self.clone() / other;
}
}
impl Div<Int> for $t {
type Output = Int;
#[inline]
fn div(self, mut other: Int) -> Int {
if self == 0 {
other.size = 0;
return other;
}
if other.sign() == 0 {
ll::divide_by_zero();
}
// There's probably a better way of doing this, but
// I don't see n / <bigint> being common in code
Int::from(self) / other
}
}
impl<'a> Div<&'a Int> for $t {
type Output = Int;
#[inline]
fn div(self, other: &'a Int) -> Int {
if self == 0 { return Int::zero() };
if other.sign() == 0 {
ll::divide_by_zero();
}
self / other.clone()
}
}
impl<'a> Rem<$t> for &'a Int {
type Output = Int;
#[inline]
fn rem(self, other: $t) -> Int {
if other == 0 {
ll::divide_by_zero();
}
if self.sign() == 0 || other == 1 {
return Int::zero()
};
return self.clone() % other;
}
}
impl Rem<Int> for $t {
type Output = Int;
#[inline]
fn rem(self, mut other: Int) -> Int {
if self == 0 || other == 1 {
other.size = 0;
return other;
}
if other.sign() == 0 {
ll::divide_by_zero();
}
// There's probably a better way of doing this, but
// I don't see n % <bigint> being common in code
Int::from(self) % other
}
}
impl<'a> Rem<&'a Int> for $t {
type Output = Int;
#[inline]
fn rem(self, other: &'a Int) -> Int {
if self == 0 { return Int::zero() };
if other.sign() == 0 {
ll::divide_by_zero();
}
self % other.clone()
}
}
impl BitAnd<$t> for Int {
type Output = Int;
#[inline]
fn bitand(mut self, other: $t) -> Int {
self &= other;
self
}
}
impl<'a> BitAnd<$t> for &'a Int {
type Output = Int;
#[inline]
fn bitand(self, other: $t) -> Int {
self.clone() & other
}
}
impl BitAnd<Int> for $t {
type Output = Int;
#[inline]
fn bitand(self, other: Int) -> Int {
other & self
}
}
impl<'a> BitAnd<&'a Int> for $t {
type Output = Int;
#[inline]
fn bitand(self, other: &'a Int) -> Int {
other & self
}
}
impl BitOr<$t> for Int {
type Output = Int;
#[inline]
fn bitor(mut self, other: $t) -> Int {
self |= other;
self
}
}
impl<'a> BitOr<$t> for &'a Int {
type Output = Int;
#[inline]
fn bitor(self, other: $t) -> Int {
self.clone() | other
}
}
impl BitOr<Int> for $t {
type Output = Int;
#[inline]
fn bitor(self, other: Int) -> Int {
other | self
}
}
impl<'a> BitOr<&'a Int> for $t {
type Output = Int;
#[inline]
fn bitor(self, other: &'a Int) -> Int {
other | self
}
}
impl BitXor<$t> for Int {
type Output = Int;
#[inline]
fn bitxor(mut self, other: $t) -> Int {
self ^= other;
self
}
}
impl<'a> BitXor<$t> for &'a Int {
type Output = Int;
#[inline]
fn bitxor(self, other: $t) -> Int {
self.clone() ^ other
}
}
impl BitXor<Int> for $t {
type Output = Int;
#[inline]
fn bitxor(self, other: Int) -> Int {
other ^ self
}
}
impl<'a> BitXor<&'a Int> for $t {
type Output = Int;
#[inline]
fn bitxor(self, other: &'a Int) -> Int {
other ^ self
}
}
)
);
// Implement for `i32` which is the fallback type, usize and the base integer type.
// No more than this because the rest of Rust doesn't much coercion for integer types,
// but allocating an entire multiple-precision `Int` to do `+ 1` seems silly.
impl_arith_prim!(signed i32);
impl_arith_prim!(unsigned usize);
impl_arith_prim!(unsigned BaseInt);
impl PartialEq<i32> for Int {
#[inline]
fn eq(&self, &other: &i32) -> bool {
let sign = self.sign();
// equals zero
if sign == 0 || other == 0 {
return other == sign;
}
// Differing signs
if sign < 0 && other > 0 || sign > 0 && other < 0 {
return false;
}
// We can't fall back to the `== Limb` impl when self is negative
// since it'll fail because of signs
if sign < 0 {
if self.abs_size() > 1 {
return false;
}
return *self.limbs() == (other.abs() as BaseInt);
}
self.eq(&Limb(other.abs() as BaseInt))
}
}
impl PartialEq<Int> for i32 {
#[inline]
fn eq(&self, other: &Int) -> bool {
other.eq(self)
}
}
impl PartialOrd<i32> for Int {
#[inline]
fn partial_cmp(&self, &other: &i32) -> Option<Ordering> {
let self_sign = self.sign();
let other_sign = if other < 0 {
-1
} else if other > 0 {
1
} else {
0
};
// Both are equal
if self_sign == 0 && other_sign == 0 {
return Some(Ordering::Equal);
}
let ord = if self_sign > other_sign {
Ordering::Greater
} else if self_sign < other_sign {
Ordering::Less
} else {
// Now both signs are the same, and non-zero
if self_sign < 0 {
if self.abs_size() > 1 {
Ordering::Less
} else {
self.to_single_limb()
.cmp(&Limb(other.abs() as BaseInt))
.reverse()
}
} else {
return self.partial_cmp(&Limb(other.abs() as BaseInt));
}
};
Some(ord)
}
}
impl PartialOrd<Int> for i32 {
#[inline]
fn partial_cmp(&self, other: &Int) -> Option<Ordering> {
other.partial_cmp(self).map(|o| o.reverse())
}
}
impl PartialEq<usize> for Int {
#[inline]
fn eq(&self, &other: &usize) -> bool {
return self.eq(&Limb(other as BaseInt));
}
}
impl PartialEq<Int> for usize {
#[inline]
fn eq(&self, other: &Int) -> bool {
other.eq(self)
}
}
impl PartialOrd<usize> for Int {
#[inline]
fn partial_cmp(&self, &other: &usize) -> Option<Ordering> {
self.partial_cmp(&Limb(other as BaseInt))
}
}
impl PartialOrd<Int> for usize {
#[inline]
fn partial_cmp(&self, other: &Int) -> Option<Ordering> {
Limb(*self as BaseInt).partial_cmp(other)
}
}
const MAX_LIMB: u64 = !0 >> (64 - Limb::BITS);
// do a sign-magnitude comparison
fn eq_64(x: &Int, mag: u64, neg: bool) -> bool {
let sign = if mag == 0 {
0
} else if neg {
-1
} else {
1
};
if x.sign() != sign {
return false;
} else if mag == 0 {
// we're guaranteed to have x == 0 since the signs match
return true;
}
let abs_size = x.abs_size();
debug_assert!(abs_size >= 1);
let ptr = x.limbs();
let lo_limb = *ptr;
if mag < MAX_LIMB {
abs_size == 1 && lo_limb.0 == mag as BaseInt
} else {
// we can only get here when Limbs are small, and the Int
// is large
assert_eq!(Limb::BITS, 32);
if abs_size == 2 {
let hi_limb = unsafe { *ptr.offset(1) };
hi_limb.0 == (mag >> 32) as BaseInt && lo_limb.0 == mag as BaseInt
} else {
false
}
}
}
fn cmp_64(x: &Int, mag: u64, neg: bool) -> Ordering {
if mag == 0 {
return x.sign().cmp(&0);
}
let size = x.size;
if (size < 0) != neg || size == 0 {
// they have different signs
return size.cmp(&if neg { -1 } else { 1 });
}
let ptr = x.limbs();
let lo_limb = *ptr;
let mag_ord = if mag < MAX_LIMB {
(size.abs(), lo_limb.0).cmp(&(1, mag as BaseInt))
} else {
assert_eq!(Limb::BITS, 32);
debug_assert!(size.abs() >= 1);
let hi_limb = if size.abs() == 1 {
Limb(0)
} else {
unsafe { *ptr.offset(1) }
};
(size.abs(), hi_limb.0, lo_limb.0).cmp(&(2, (mag >> 32) as BaseInt, mag as BaseInt))
};
if size < 0 && neg {
// both negative, so the magnitude orderings need to be
// flipped
mag_ord.reverse()
} else {
mag_ord
}
}
impl PartialEq<u64> for Int {
fn eq(&self, &other: &u64) -> bool {
eq_64(self, other, false)
}
}
impl PartialEq<Int> for u64 {
fn eq(&self, other: &Int) -> bool {
eq_64(other, *self, false)
}
}
impl PartialOrd<u64> for Int {
fn partial_cmp(&self, &other: &u64) -> Option<Ordering> {
Some(cmp_64(self, other, false))
}
}
impl PartialOrd<Int> for u64 {
fn partial_cmp(&self, other: &Int) -> Option<Ordering> {
Some(cmp_64(other, *self, false).reverse())
}
}
impl PartialEq<i64> for Int {
fn eq(&self, &other: &i64) -> bool {
eq_64(self, other.abs() as u64, other < 0)
}
}
impl PartialEq<Int> for i64 {
fn eq(&self, other: &Int) -> bool {
eq_64(other, self.abs() as u64, *self < 0)
}
}
impl PartialOrd<i64> for Int {
fn partial_cmp(&self, &other: &i64) -> Option<Ordering> {
Some(cmp_64(self, other.abs() as u64, other < 0))
}
}
impl PartialOrd<Int> for i64 {
fn partial_cmp(&self, other: &Int) -> Option<Ordering> {
Some(cmp_64(other, self.abs() as u64, *self < 0).reverse())
}
}
macro_rules! impl_from_prim (
(signed $($t:ty),*) => {
$(impl ::std::convert::From<$t> for Int {
fn from(val: $t) -> Int {
if val == 0 {
return Int::zero();
} if val == <$t>::min_value() {
let shift = val.trailing_zeros() as usize;
let mut i = Int::one();
i = i << shift;
return -i;
}
let sizeof_t = std::mem::size_of::<$t>();
let sizeof_baseint = std::mem::size_of::<BaseInt>();
// Handle conversion where BaseInt is smaller than $t
if sizeof_baseint < sizeof_t {
let val_abs = val.abs();
let mask : BaseInt = !0;
let size_factor = sizeof_t / sizeof_baseint;
let limb_bits = Limb::BITS as u32;
let mut i = Int::zero();
for j in 0..size_factor {
// This won't wrap, since sizeof($t) = size_factor * sizeof(BaseInt)
let vlimb = val_abs.wrapping_shr(limb_bits * (j as u32)) & (mask as $t);
i.push(Limb(vlimb as BaseInt));
}
if val < 0 {
i.size *= -1;
}
i.normalize();
return i;
} else {
let limb = Limb(val.abs() as BaseInt);
let mut i = Int::from_single_limb(limb);
if val < 0 {
i.size *= -1;
}
return i;
}
}
})*
};
(unsigned $($t:ty),*) => {
$(impl ::std::convert::From<$t> for Int {
fn from(val: $t) -> Int {
if val == 0 {
return Int::zero();
}
let sizeof_t = std::mem::size_of::<$t>();
let sizeof_baseint = std::mem::size_of::<BaseInt>();
// Handle conversion where BaseInt is smaller than $t
if sizeof_baseint < sizeof_t {
let mask : BaseInt = !0;
let size_factor = sizeof_t / sizeof_baseint;
let limb_bits = Limb::BITS as u32;
let mut i = Int::zero();
for j in 0..size_factor {
// This won't wrap, since sizeof($t) = size_factor * sizeof(BaseInt)
let vlimb = val.wrapping_shr(limb_bits * (j as u32)) & (mask as $t);
i.push(Limb(vlimb as BaseInt));
}
i.normalize();
return i;
} else {
let limb = Limb(val as BaseInt);
return Int::from_single_limb(limb);
}
}
})*
}
);
impl_from_prim!(signed i8, i16, i32, i64, i128, isize);
impl_from_prim!(unsigned u8, u16, u32, u64, u128, usize);
// Number formatting - There's not much difference between the impls,
// hence the macro
macro_rules! impl_fmt (
($t:path, $radix:expr, $upper:expr, $prefix:expr) => {
impl $t for Int {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s : &str = &self.to_str_radix($radix, $upper);
let is_positive = self.sign() >= 0;
// to_str_radix adds the sign if `self` is negative, but
// pad_integral adds it's own sign, so slice the sign off
if !is_positive {
s = &s[1..];
}
f.pad_integral(is_positive, $prefix, s)
}
}
};
($t:path, $radix:expr, $prefix:expr) => {
impl_fmt!($t, $radix, false, $prefix);
}
);
impl_fmt!(fmt::Binary, 2, "0b");
impl_fmt!(fmt::Octal, 8, "0o");
impl_fmt!(fmt::Display, 10, "");
impl_fmt!(fmt::Debug, 10, "");
impl_fmt!(fmt::LowerHex, 16, false, "0x");
impl_fmt!(fmt::UpperHex, 16, true, "0x");
/// Error that arises when parsing an [`Int`].
///
/// [`Int`]: struct.Int.html
#[derive(Debug, Clone, PartialEq)]
pub struct ParseIntError {
kind: ErrorKind,
}
#[derive(Debug, Clone, PartialEq)]
enum ErrorKind {
Empty,
InvalidDigit,
}
impl fmt::Display for ParseIntError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.kind {
ErrorKind::Empty => f.write_str("cannot parse empty string"),
ErrorKind::InvalidDigit => f.write_str("invalid digit found in string"),
}
}
}
impl Error for ParseIntError {}
impl FromStr for Int {
type Err = ParseIntError;
fn from_str(src: &str) -> Result<Int, ParseIntError> {
Int::from_str_radix(src, 10)
}
}
// Conversion *to* primitives via the From trait.
macro_rules! impl_from_for_prim (
(signed $($t:ty),*) => (
$(impl<'a> std::convert::From<&'a Int> for $t {
fn from(i: &'a Int) -> $t {
let sign = i.sign() as $t;
if sign == 0 {
return 0;
}
let sizeof_t = std::mem::size_of::<$t>();
let sizeof_baseint = std::mem::size_of::<BaseInt>();
// Handle conversion where BaseInt is smaller than $t
if sizeof_baseint < sizeof_t {
// Fallthrough if there's only one limb
if i.abs_size() >= 2 {
// The number of limbs that fit in $t
let size_factor = sizeof_t / sizeof_baseint;
// We can't copy more limbs than we have
let num_limbs_to_copy = std::cmp::min(size_factor, i.abs_size() as usize);
// Accumulate the limbs into $t
let limb_size = Limb::BITS as u32;
let mut acc: $t = 0;
for j in 0..num_limbs_to_copy {
let limb = unsafe { (*i.ptr.as_ptr().offset(j as isize)).0 } as $t;
acc |= limb.wrapping_shl(limb_size * (j as u32));
}
// Apply the sign
return acc.wrapping_mul(sign);
}
}
let n = i.to_single_limb().0;
// Using wrapping_mul to account for when the binary
// representation of n == $t::MIN
return (n as $t).wrapping_mul(sign);
}
})*
);
(unsigned $($t:ty),*) => (
$(impl<'a> std::convert::From<&'a Int> for $t {
fn from(i: &'a Int) -> $t {
if i.sign() == 0 {
return 0;
}
let sizeof_t = std::mem::size_of::<$t>();
let sizeof_baseint = std::mem::size_of::<BaseInt>();
// Handle conversion where BaseInt is smaller than $t
if sizeof_baseint < sizeof_t {
// Fallthrough if there's only one limb
if i.abs_size() >= 2 {
// The number of limbs that fit in $t
let size_factor = sizeof_t / sizeof_baseint;
// We can't copy more limbs than we have
let num_limbs_to_copy = std::cmp::min(size_factor, i.abs_size() as usize);
// Accumulate the limbs into $t
let limb_size = Limb::BITS as u32;
let mut acc: $t = 0;
for j in 0..num_limbs_to_copy {
let limb = unsafe { (*i.ptr.as_ptr().offset(j as isize)).0 } as $t;
acc |= limb.wrapping_shl(limb_size * (j as u32));
}
return acc;
}
}
let n = i.to_single_limb().0;
return n as $t;
}
})*
)
);
impl_from_for_prim!(signed i8, i16, i32, i64, i128, isize);
impl_from_for_prim!(unsigned u8, u16, u32, u64, u128, usize);
impl Zero for Int {
fn zero() -> Int {
Int {
ptr: Unique::dangling(),
size: 0,
cap: 0,
}
}
fn is_zero(&self) -> bool {
self.sign() == 0
}
}
impl One for Int {
fn one() -> Int {
Int::from(1)
}
}
impl Num for Int {
type FromStrRadixErr = ParseIntError;
#[inline]
fn from_str_radix(src: &str, radix: u32) -> Result<Int, ParseIntError> {
Int::from_str_radix(src, radix as u8)
}
}
impl Integer for Int {
#[inline]
fn div_floor(&self, other: &Int) -> Int {
self / other
}
#[inline]
fn mod_floor(&self, other: &Int) -> Int {
self % other
}
#[inline]
fn gcd(&self, other: &Int) -> Int {
self.gcd(other)
}
#[inline]
fn lcm(&self, other: &Int) -> Int {
self.lcm(other)
}
#[inline]
fn divides(&self, other: &Int) -> bool {
other.is_multiple_of(self)
}
#[inline]
fn is_multiple_of(&self, other: &Int) -> bool {
(self % other).is_zero()
}
#[inline]
fn is_even(&self) -> bool {
self.is_even()
}
#[inline]
fn is_odd(&self) -> bool {
!self.is_even()
}
#[inline]
fn div_rem(&self, other: &Int) -> (Int, Int) {
self.divrem(other)
}
}
impl std::iter::Step for Int {
fn steps_between(start: &Int, end: &Int) -> Option<usize> {
let diff = (start - end).abs();
// Check to see if result fits in a usize
if diff > !0usize {
None
} else {
Some(usize::from(&diff))
}
}
fn forward_checked(start: Int, n: usize) -> Option<Self> {
Some(start + Int::from(n))
}
fn backward_checked(start: Int, n: usize) -> Option<Self> {
Some(start - Int::from(n))
}
}
/// Trait for generating random `Int`s.
///
/// # Example
///
/// Generate a random `Int` of size `256` bits:
///
/// ```
/// extern crate rand;
/// extern crate ramp;
///
/// use ramp::RandomInt;
///
/// fn main() {
/// let mut rng = rand::thread_rng();
/// let big_i = rng.gen_int(256);
/// }
/// ```
pub trait RandomInt {
/// Generate a random unsigned `Int` of given bit size.
fn gen_uint(&mut self, bits: usize) -> Int;
/// Generate a random `Int` of given bit size.
fn gen_int(&mut self, bits: usize) -> Int;
/// Generate a random unsigned `Int` less than the given bound.
/// Fails when the bound is zero or negative.
fn gen_uint_below(&mut self, bound: &Int) -> Int;
/// Generate a random `Int` within the given range.
/// The lower bound is inclusive; the upper bound is exclusive.
/// Fails when the upper bound is not greater than the lower bound.
fn gen_int_range(&mut self, lbound: &Int, ubound: &Int) -> Int;
}
impl<R: Rng> RandomInt for R {
fn gen_uint(&mut self, bits: usize) -> Int {
assert!(bits > 0);
let limbs = (bits / &Limb::BITS) as u32;
let rem = bits % &Limb::BITS;
let mut i = Int::with_capacity(limbs + 1);
for _ in 0..limbs {
let limb = Limb(self.gen());
i.push(limb);
}
if rem > 0 {
let final_limb = Limb(self.gen());
i.push(final_limb >> (&Limb::BITS - rem));
}
i.normalize();
i
}
fn gen_int(&mut self, bits: usize) -> Int {
let i = self.gen_uint(bits);
let r = if i == Int::zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
return self.gen_uint(bits);
} else {
i
}
} else if self.gen() {
-i
} else {
i
};
r
}
fn gen_uint_below(&mut self, bound: &Int) -> Int {
assert!(*bound > Int::zero());
// If we haven't got a valid number after 10,000 tries, then something
// has probably gone wrong, as there is a 1 in 10^3000 chance of this
// happening, in the worst case.
const ITER_LIMIT: usize = 10000;
let bits = bound.bit_length() as usize;
// Since it uses a number of bits, gen_uint may return a number too large,
// loop until we generate a valid number.
// Since the greatest gap between the bound and the largest number produced
// is when bound = 2^n (bit string [100000....]), we have, at worst, a 50/50
// chance of producing an invalid number each iteration.
let mut count = 0;
while count < ITER_LIMIT {
let n = self.gen_uint(bits);
if n < *bound {
return n;
}
count += 1;
}
panic!(
"No valid number generated in {} iterations.\n\
Please open an issue at https://github.com/Aatch/ramp",
ITER_LIMIT
);
}
fn gen_int_range(&mut self, lbound: &Int, ubound: &Int) -> Int {
assert!(*lbound < *ubound);
lbound + self.gen_uint_below(&(ubound - lbound))
}
}
#[cfg(test)]
mod test {
use super::*;
use ll::limb::Limb;
use rand::{self, Rng};
use std;
use std::hash::{Hash, Hasher};
use std::str::FromStr;
use test::{self, Bencher};
use traits::DivRem;
macro_rules! assert_mp_eq (
($l:expr, $r:expr) => (
{
let l : &Int = &$l;
let r : &Int = &$r;
if l != r {
println!("assertion failed: {} == {}", stringify!($l), stringify!($r));
panic!("{:} != {:}", l, r);
}
}
)
);
#[test]
fn from_string_10() {
let cases = [
("0", 0i32),
("123456", 123456),
("0123", 123),
("000000", 0),
("-0", 0),
("-1", -1),
("-123456", -123456),
("-0123", -123),
];
for &(s, n) in cases.iter() {
let i: Int = s.parse().unwrap();
assert_eq!(i, n);
}
}
#[test]
fn from_string_16() {
let cases = [
("0", 0i32),
("abcde", 0xabcde),
("0ABC", 0xabc),
("12AB34cd", 0x12ab34cd),
("-ABC", -0xabc),
("-0def", -0xdef),
("00000000000000000", 0),
];
for &(s, n) in cases.iter() {
let i: Int = Int::from_str_radix(s, 16).unwrap();
assert_eq!(i, n, "Assertion failed: {:#x} != {:#x}", i, n);
}
}
#[test]
fn to_string_10() {
let cases = [
("0", Int::zero()),
("1", Int::from(1)),
("123", Int::from(123)),
("-456", Int::from(-456)),
(
"987654321012345678910111213",
Int::from_str("987654321012345678910111213").unwrap(),
),
];
for &(s, ref n) in cases.iter() {
assert_eq!(s, &n.to_string());
}
}
#[test]
fn to_string_16() {
let cases = [
("0", Int::zero()),
("1", Int::from(1)),
("-1", Int::from(-1)),
("abc", Int::from(0xabc)),
("-456", Int::from(-0x456)),
(
"987654321012345678910111213",
Int::from_str_radix("987654321012345678910111213", 16).unwrap(),
),
];
for &(s, ref n) in cases.iter() {
assert_eq!(s, &n.to_str_radix(16, false));
}
}
#[test]
fn num_base_digits_pow2() {
use ll::base::num_base_digits;
let cases = [
("10", 2, 4), // 0b 1010
("15", 2, 4), // 0b 1111
("16", 2, 5), // 0b10000
("1023", 2, 10), // 0b 1111111111
("1024", 2, 11), // 0b10000000000
("341", 4, 5), // 4»11111
("5461", 4, 7), // 4»1111111
("16383", 4, 7), // 4» 3333333
("16384", 4, 8), // 4»10000000
("65535", 4, 8), // 4»33333333
("299593", 8, 7), // 8»1111111 // 8**0 + 8**1 + 8**2 + 8**3 + 8**4 + 8**5 + 8**6
("299594", 8, 7), // 8»1111112
("2097151", 8, 7), // 8» 7777777
("2097152", 8, 8), // 8»10000000
("268435455", 16, 7), // 0x fffffff
("268435456", 16, 8), // 0x10000000
("13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095", 2, 512), // 512 bits with value 1 -> 1 Limb
("13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084096", 2, 513), // 513 bits -> 2 Limbs
];
for &(s, base, digits) in cases.iter() {
let i: Int = s.parse().unwrap();
unsafe {
assert_eq!(digits, num_base_digits(i.limbs(), i.abs_size(), base));
}
}
}
#[test]
fn num_base_digits() {
use ll::base::num_base_digits;
let cases = [
("0", 15, 1),
("0", 58, 1),
("49172", 10, 5),
("49172", 57, 3), // [15, 7, 38], 38 + 7*57 + 15*57**2 = 49172
("185192", 57, 3), // [56, 56, 56], 56 + 56*57 + 56*57**2 = 185192
("185193", 57, 4), // [1, 0, 0, 0], 1*57**3 = 185193
("250046", 63, 3), // [62, 62, 62], 62 + 62*63 + 62*63**2 = 250046
("250047", 63, 4), // [1, 0, 0, 0], 1*63**3 = 250047
("274624", 65, 3), // [64, 64, 64], 64 + 64*65 + 64*65**2 = 274624
("274625", 65, 4), // [1, 0, 0, 0], 1*65**3 = 274625
];
for &(s, base, digits) in cases.iter() {
let i: Int = s.parse().unwrap();
unsafe {
let estimate = num_base_digits(i.limbs(), i.abs_size(), base);
assert!(digits == estimate || digits == estimate - 1);
}
}
}
#[test]
fn pow() {
let bases = [
"0",
"1",
"190000000000000",
"192834857324591531",
"340282366920938463463374607431768211456", // 2**128
"100000000",
"-1",
"-100",
"-200",
"-192834857324591531",
"-431343873217510631841",
"-340282366920938463463374607431768211456",
];
for b in bases.iter() {
let b: Int = b.parse().unwrap();
let mut x = Int::one();
for e in 0..512 {
let a = &b.pow(e);
// println!("b={}, e={}, a={}, x={}", &b, &e, &a, &x);
assert_mp_eq!(a.clone(), x.clone());
x = &x * &b
}
}
}
/// The `bigresults` testcases were generated by the following Python code
///
/// ```python
/// bases = [
/// "0", "1", "190000000000000", "192834857324591531",
/// "340282366920938463463374607431768211456", "100000000", "-1", "-100", "-200",
/// "-192834857324591531", "-431343873217510631841",
/// "-340282366920938463463374607431768211456"
/// ]
///
/// moduli = [
/// "1", "2", "77", "102847", "923847928374928374928098123", "-1", "-2", "-77", "-102847",
/// "-923847928374928374928098123"
/// ]
///
/// # Pick one of the exponents below
/// e = int("983459824098102")
/// #e = int("30297523982304983")
///
/// for b in map(int, bases):
/// for m in map(int, moduli):
/// x = pow(b, e, m)
///
/// # Skip the manipulation below
/// if x == 0:
/// print(x)
/// continue
///
/// # True iff the sign of b^e is negative
/// powneg = (b < 0) and (e % 2 == 1)
///
/// # If b^e is negative XOR the modulus is positive, print the result as-is
/// if powneg ^ (m >= 0):
/// print(x)
/// # Otherwise, flip it
/// else:
/// print(x - m)
/// ```
#[test]
fn pow_mod() {
let bases = [
"0",
"1",
"190000000000000",
"192834857324591531",
"340282366920938463463374607431768211456",
"100000000",
"-1",
"-100",
"-200",
"-192834857324591531",
"-431343873217510631841",
"-340282366920938463463374607431768211456",
];
let moduli = [
"1",
"2",
"77",
"102847",
"923847928374928374928098123",
"-1",
"-2",
"-77",
"-102847",
"-923847928374928374928098123",
];
let big_expt1: Int = "983459824098102".parse().unwrap();
let big_results1 = [
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"1",
"1",
"1",
"1",
"0",
"1",
"1",
"1",
"1",
"0",
"0",
"64",
"81700",
"650397494965841282553028378",
"0",
"0",
"64",
"81700",
"650397494965841282553028378",
"0",
"1",
"14",
"2984",
"293908402213598797808604757",
"0",
"1",
"14",
"2984",
"293908402213598797808604757",
"0",
"0",
"64",
"79782",
"294430149849779620413037114",
"0",
"0",
"64",
"79782",
"294430149849779620413037114",
"0",
"0",
"1",
"45620",
"410140929051586324284535549",
"0",
"0",
"1",
"45620",
"410140929051586324284535549",
"0",
"1",
"1",
"1",
"1",
"0",
"1",
"1",
"1",
"1",
"0",
"0",
"1",
"64031",
"36844801842501039220835119",
"0",
"0",
"1",
"64031",
"36844801842501039220835119",
"0",
"0",
"15",
"2604",
"177460521562747360312778824",
"0",
"0",
"15",
"2604",
"177460521562747360312778824",
"0",
"1",
"14",
"2984",
"293908402213598797808604757",
"0",
"1",
"14",
"2984",
"293908402213598797808604757",
"0",
"1",
"1",
"18906",
"873601645071303646536712038",
"0",
"1",
"1",
"18906",
"873601645071303646536712038",
"0",
"0",
"64",
"79782",
"294430149849779620413037114",
"0",
"0",
"64",
"79782",
"294430149849779620413037114",
];
let big_expt2: Int = "30297523982304983".parse().unwrap();
let big_results2 = [
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"0",
"1",
"1",
"1",
"1",
"0",
"1",
"1",
"1",
"1",
"0",
"0",
"71",
"95361",
"176018964989535053333805796",
"0",
"0",
"71",
"95361",
"176018964989535053333805796",
"0",
"1",
"70",
"97130",
"379622449400387577033857765",
"0",
"1",
"70",
"97130",
"379622449400387577033857765",
"0",
"0",
"16",
"22931",
"923257520141185284248663506",
"0",
"0",
"16",
"22931",
"923257520141185284248663506",
"0",
"0",
"67",
"61927",
"735007863677635702331962114",
"0",
"0",
"67",
"61927",
"735007863677635702331962114",
"0",
"-1",
"-1",
"-1",
"-1",
"0",
"-1",
"-1",
"-1",
"-1",
"0",
"0",
"-67",
"-50340",
"-639061946068491056215087558",
"0",
"0",
"-67",
"-50340",
"-639061946068491056215087558",
"0",
"0",
"-30",
"-48472",
"-783769839004928514409300748",
"0",
"0",
"-30",
"-48472",
"-783769839004928514409300748",
"0",
"-1",
"-70",
"-97130",
"-379622449400387577033857765",
"0",
"-1",
"-70",
"-97130",
"-379622449400387577033857765",
"0",
"-1",
"-1",
"-55833",
"-913941961265646087594551688",
"0",
"-1",
"-1",
"-55833",
"-913941961265646087594551688",
"0",
"0",
"-16",
"-22931",
"-923257520141185284248663506",
"0",
"0",
"-16",
"-22931",
"-923257520141185284248663506",
];
// Do the calculations for small expts first. Just do up to 80
for b in bases.iter() {
for m in moduli.iter() {
for i in 0..80 {
let b: Int = b.parse().unwrap();
let e: Int = Int::from(i);
let m: Int = m.parse().unwrap();
let a = b.pow_mod(&e, &m);
assert_mp_eq!(a, b.pow(i as usize) % m)
}
}
}
// Now do the big expts
for (big_expt, big_results) in [big_expt1, big_expt2]
.iter()
.zip([big_results1, big_results2].iter())
{
for (b_i, b) in bases.iter().enumerate() {
for (m_i, m) in moduli.iter().enumerate() {
let b: Int = b.parse().unwrap();
let m: Int = m.parse().unwrap();
let a = b.pow_mod(&big_expt, &m);
let expected: Int = big_results[b_i * moduli.len() + m_i].parse().unwrap();
assert_mp_eq!(a, expected);
}
}
}
}
#[should_panic]
#[test]
fn pow_mod_zeromod() {
let b = Int::from(10);
let e = Int::from(2);
let m = Int::zero();
b.pow_mod(&e, &m);
}
#[should_panic]
#[test]
fn pow_mod_negexp() {
let b = Int::from(10);
let e = Int::from(-2);
let m = Int::from(50);
b.pow_mod(&e, &m);
}
#[test]
fn add() {
let cases = [
("0", "0", "0"),
("1", "0", "1"),
("1", "1", "2"),
("190000000000000", "1", "190000000000001"),
(
"192834857324591531",
"431343873217510631841",
"431536708074835223372",
),
("0", "-1", "-1"),
("1", "-1", "0"),
("100000000", "-1", "99999999"),
("-100", "-100", "-200"),
(
"-192834857324591531",
"-431343873217510631841",
"-431536708074835223372",
),
// (2**64 - 1) * 2**64 + 2**64 == 2**128
(
"340282366920938463444927863358058659840",
"18446744073709551616",
"340282366920938463463374607431768211456",
),
];
for &(l, r, a) in cases.iter() {
let l: Int = l.parse().unwrap();
let r: Int = r.parse().unwrap();
let a: Int = a.parse().unwrap();
assert_mp_eq!(l + r, a);
}
}
#[test]
fn sub() {
let cases = [
("0", "0", "0"),
("1", "0", "1"),
("1", "1", "0"),
("0", "1", "-1"),
("190000000000000", "1", "189999999999999"),
(
"192834857324591531",
"431343873217510631841",
"-431151038360186040310",
),
("0", "-1", "1"),
("1", "-1", "2"),
("100000000", "-1", "100000001"),
("-100", "-100", "0"),
("-100", "100", "-200"),
("237", "236", "1"),
(
"-192834857324591531",
"-431343873217510631841",
"431151038360186040310",
),
// (2**64 - 1) * 2**64 - -2**64 == 2**128
(
"340282366920938463444927863358058659840",
"-18446744073709551616",
"340282366920938463463374607431768211456",
),
];
for &(l, r, a) in cases.iter() {
let l: Int = l.parse().unwrap();
let r: Int = r.parse().unwrap();
let a: Int = a.parse().unwrap();
assert_mp_eq!(&l - &r, a.clone());
assert_mp_eq!(&l - r.clone(), a.clone());
assert_mp_eq!(l.clone() - &r, a.clone());
assert_mp_eq!(l - r, a);
}
}
#[test]
fn mul() {
let cases = [
("0", "0", "0"),
("1", "0", "0"),
("1", "1", "1"),
("1234", "-1", "-1234"),
("8", "9", "72"),
("-8", "-9", "72"),
("8", "-9", "-72"),
(
"1234567891011",
"9876543210123",
"12193263121400563935904353",
),
(
"-1234567891011",
"9876543210123",
"-12193263121400563935904353",
),
];
for &(l, r, a) in cases.iter() {
let l: Int = l.parse().unwrap();
let r: Int = r.parse().unwrap();
let a: Int = a.parse().unwrap();
assert_mp_eq!(l * r, a);
}
}
#[test]
fn div() {
let cases = [
("1", "1", "1"),
("1234", "-1", "-1234"),
("8", "9", "0"),
("-9", "-3", "3"),
("1234567891011121314151617", "95123654789852856006", "12978"),
("-1234567891011121314151617", "95123654789852856006", "-12978"),
("-1198775410753307067346230628764044530011323809665206377243907561641040294348297309637331525393593945901384203950086960228531308793518800829453656715578105987032036211272103322425770761458186593",
"979504192721382235629958845425279521512826176107035761459344386626944187481828320416870752582555",
"-1223859397092234843008309150569447886995823751180958876260102037121722431272801092547910923059616")
];
for &(l, r, a) in cases.iter() {
let l: Int = l.parse().unwrap();
let r: Int = r.parse().unwrap();
let a: Int = a.parse().unwrap();
let val = &l / &r;
assert_mp_eq!(val, a);
}
}
#[test]
#[should_panic(expected = "divide by zero")]
#[cfg(debug_assertions)] // only a panic in this mode
fn divmod_zero() {
Int::from(1).divmod(&Int::zero());
}
#[test]
fn rem() {
let cases = [
("2", "1", "0"),
("1", "2", "1"),
("100", "2", "0"),
("100", "3", "1"),
(
"234129835798275032157029375235",
"4382109473241242142341234",
"2490861941946976021925083",
),
("-2", "1", "0"),
("-1", "2", "-1"),
("-100", "2", "0"),
("-100", "3", "-1"),
(
"-234129835798275032157029375235",
"4382109473241242142341234",
"-2490861941946976021925083",
),
("2", "-1", "0"),
("1", "-2", "1"),
("100", "-2", "0"),
("100", "-3", "1"),
(
"234129835798275032157029375235",
"-4382109473241242142341234",
"2490861941946976021925083",
),
("-2", "-1", "0"),
("-1", "-2", "-1"),
("-100", "-2", "0"),
("-100", "-3", "-1"),
(
"-234129835798275032157029375235",
"-4382109473241242142341234",
"-2490861941946976021925083",
),
];
for &(l, r, a) in cases.iter() {
let l: Int = l.parse().unwrap();
let r: Int = r.parse().unwrap();
let a: Int = a.parse().unwrap();
let val = &l % &r;
assert_mp_eq!(val, a);
}
}
#[test]
fn divrem() {
let cases = [
("20000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000001",
"100000000000000000000000000000000000000000000001",
"200000000000000000000000000000000000000000000001",
"100000000000000000000000000000000000000000000000"),
];
for t in cases.iter() {
let dividend: Int = t.0.parse().unwrap();
let divisor: Int = t.1.parse().unwrap();
let expected_quotient: Int = t.2.parse().unwrap();
let expected_remainder: Int = t.3.parse().unwrap();
let (actual_quotient, actual_remainder) = (÷nd).divrem(&divisor);
assert_mp_eq!(actual_quotient, expected_quotient);
assert_mp_eq!(actual_remainder, expected_remainder);
}
}
#[test]
fn sqrt_rem() {
let cases = [
("0", "0", "0"),
("1", "1", "0"),
("2", "1", "1"),
("3", "1", "2"),
("4", "2", "0"),
("1000", "31", "39"),
(
"15241578753238836750495351562536198787501905199875019052100",
"123456789012345678901234567890",
"0",
),
(
"15241578753238836750495351562536198787501905199875019052099",
"123456789012345678901234567889",
"246913578024691357802469135778",
),
(
"15241578753238836750495351562536198787501905199875019052101",
"123456789012345678901234567890",
"1",
),
];
for &(x, sqrt, rem) in &cases {
let x: Int = x.parse().unwrap();
let sqrt: Int = sqrt.parse().unwrap();
let rem: Int = rem.parse().unwrap();
if x != 0 {
assert!((-&x).sqrt_rem().is_none());
}
let (s, r) = x.sqrt_rem().unwrap();
assert_mp_eq!(s, sqrt);
assert_mp_eq!(r, rem);
}
}
#[test]
fn bitand() {
let cases = [
("0", "1", "0"),
("17", "65", "1"),
("-17", "65", "65"),
("17", "-65", "17"),
("-17", "-65", "-81"),
("0", "543253451643657932075830214751263521", "0"),
(
"-1",
"543253451643657932075830214751263521",
"543253451643657932075830214751263521",
),
(
"47398217493274092174042109472",
"9843271092740214732017421",
"152974816756326460458496",
),
(
"87641324986400000000000",
"31470973247490321000000000000000",
"2398658832415825854464",
),
(
"-87641324986400000000000",
"31470973247490321000000000000000",
"31470973245091662167584174145536",
),
(
"87641324986400000000000",
"-31470973247490321000000000000000",
"85242666153984174129152",
),
(
"-87641324986400000000000",
"-31470973247490321000000000000000",
"-31470973332732987153984174129152",
),
];
for &(l_, r_, a) in cases.iter() {
let l: Int = l_.parse().unwrap();
let r: Int = r_.parse().unwrap();
let a: Int = a.parse().unwrap();
let val = &l & &r;
assert_mp_eq!(val, a);
if l.bit_length() <= 31 {
let l: i32 = l_.parse().unwrap();
let val = l & &r;
assert_mp_eq!(val, a);
}
if r.bit_length() <= 31 {
let r: i32 = r_.parse().unwrap();
let val = &l & r;
assert_mp_eq!(val, a);
}
}
}
#[test]
fn bitor() {
let cases = [
("0", "1", "1"),
("17", "65", "81"),
("-17", "65", "-17"),
("17", "-65", "-65"),
("-17", "-65", "-1"),
(
"0",
"543253451643657932075830214751263521",
"543253451643657932075830214751263521",
),
("-1", "543253451643657932075830214751263521", "-1"),
(
"47398217493274092174042109472",
"9843271092740214732017421",
"47407907789550076062313668397",
),
(
"87641324986400000000000",
"31470973247490321000000000000000",
"31470973332732987153984174145536",
),
(
"-87641324986400000000000",
"31470973247490321000000000000000",
"-85242666153984174145536",
),
(
"87641324986400000000000",
"-31470973247490321000000000000000",
"-31470973245091662167584174129152",
),
(
"-87641324986400000000000",
"-31470973247490321000000000000000",
"-2398658832415825870848",
),
];
for &(l_, r_, a) in cases.iter() {
let l: Int = l_.parse().unwrap();
let r: Int = r_.parse().unwrap();
let a: Int = a.parse().unwrap();
let val = &l | &r;
assert_mp_eq!(val, a);
if l.bit_length() <= 31 {
let l: i32 = l_.parse().unwrap();
let val = l | &r;
assert_mp_eq!(val, a);
}
if r.bit_length() <= 31 {
let r: i32 = r_.parse().unwrap();
let val = &l | r;
assert_mp_eq!(val, a);
}
}
}
#[test]
fn bitxor() {
let cases = [
("0", "1", "1"),
("17", "65", "80"),
("-17", "65", "-82"),
("17", "-65", "-82"),
("-17", "-65", "80"),
(
"0",
"543253451643657932075830214751263521",
"543253451643657932075830214751263521",
),
(
"-1",
"543253451643657932075830214751263521",
"-543253451643657932075830214751263522",
),
(
"47398217493274092174042109472",
"9843271092740214732017421",
"47407754814733319735853209901",
),
(
"87641324986400000000000",
"31470973247490321000000000000000",
"31470973330334328321568348291072",
),
(
"-87641324986400000000000",
"31470973247490321000000000000000",
"-31470973330334328321568348291072",
),
(
"87641324986400000000000",
"-31470973247490321000000000000000",
"-31470973330334328321568348258304",
),
(
"-87641324986400000000000",
"-31470973247490321000000000000000",
"31470973330334328321568348258304",
),
];
for &(l_, r_, a) in cases.iter() {
let l: Int = l_.parse().unwrap();
let r: Int = r_.parse().unwrap();
let a: Int = a.parse().unwrap();
let val = &l ^ &r;
assert_mp_eq!(val, a);
if l.bit_length() <= 31 {
let l: i32 = l_.parse().unwrap();
let val = l ^ &r;
assert_mp_eq!(val, a);
}
if r.bit_length() <= 31 {
let r: i32 = r_.parse().unwrap();
let val = &l ^ r;
assert_mp_eq!(val, a);
}
}
}
#[test]
fn is_even() {
let cases = [
("0", true),
("1", false),
("47398217493274092174042109472", true),
("47398217493274092174042109471", false),
];
for &(v, even) in cases.iter() {
let val: Int = v.parse().unwrap();
assert_eq!(val.is_even(), even);
let val = -val;
assert_eq!(val.is_even(), even);
}
}
#[test]
fn trailing_zeros() {
let cases = [
("0", 0),
("1", 0),
("16", 4),
(
"3036937844145311324764506857395738547330878864826266812416",
100,
),
];
for &(v, count) in cases.iter() {
let val: Int = v.parse().unwrap();
assert_eq!(val.trailing_zeros(), count);
}
}
#[test]
fn arith_prim() {
// Test that the Int/prim overloads are working as expected
let x: Int = "100".parse().unwrap();
// Int op prim
assert_mp_eq!(&x + 1usize, "101".parse().unwrap());
assert_mp_eq!(&x - 1usize, "99".parse().unwrap());
assert_mp_eq!(&x + 1i32, "101".parse().unwrap());
assert_mp_eq!(&x - 1i32, "99".parse().unwrap());
assert_mp_eq!(&x + (-1i32), "99".parse().unwrap());
assert_mp_eq!(&x - (-1i32), "101".parse().unwrap());
assert_mp_eq!(&x + (-101i32), "-1".parse().unwrap());
assert_mp_eq!(&x - 101i32, "-1".parse().unwrap());
assert_mp_eq!(&x - 100usize, Int::zero());
assert_mp_eq!(-&x + 100usize, Int::zero());
assert_mp_eq!(&x - 100i32, Int::zero());
assert_mp_eq!(&x + (-100i32), Int::zero());
assert_mp_eq!(-&x + 100i32, Int::zero());
assert_mp_eq!(-&x - (-100i32), Int::zero());
assert_mp_eq!(&x * 2usize, "200".parse().unwrap());
assert_mp_eq!(&x * 2i32, "200".parse().unwrap());
assert_mp_eq!(&x * (-2i32), "-200".parse().unwrap());
assert_mp_eq!(&x / 2usize, "50".parse().unwrap());
assert_mp_eq!(&x / 2i32, "50".parse().unwrap());
assert_mp_eq!(&x / (-2i32), "-50".parse().unwrap());
assert_mp_eq!(&x % 2usize, "0".parse().unwrap());
assert_mp_eq!(&x % 2i32, "0".parse().unwrap());
assert_mp_eq!(&x % (-2i32), "0".parse().unwrap());
let x: Int = "5".parse().unwrap();
// prim op Int
assert_mp_eq!(1usize + &x, "6".parse().unwrap());
assert_mp_eq!(1usize - &x, "-4".parse().unwrap());
assert_mp_eq!(1i32 + &x, "6".parse().unwrap());
assert_mp_eq!(1i32 - &x, "-4".parse().unwrap());
assert_mp_eq!((-1i32) + &x, "4".parse().unwrap());
assert_mp_eq!((-1i32) - &x, "-6".parse().unwrap());
assert_mp_eq!(2usize * &x, "10".parse().unwrap());
assert_mp_eq!(2i32 * &x, "10".parse().unwrap());
assert_mp_eq!((-2i32) * &x, "-10".parse().unwrap());
assert_mp_eq!(20usize / &x, "4".parse().unwrap());
assert_mp_eq!(20i32 / &x, "4".parse().unwrap());
assert_mp_eq!((-20i32) / &x, "-4".parse().unwrap());
}
#[test]
fn int_from() {
let i = Int::from(::std::i64::MIN);
assert_eq!(i64::from(&i), ::std::i64::MIN);
let i = Int::from(::std::i32::MIN);
assert_eq!(i32::from(&i), ::std::i32::MIN);
let i = Int::from(::std::i128::MIN);
assert_eq!(i128::from(&i), ::std::i128::MIN);
let i = Int::from(::std::u128::MAX);
assert_eq!(u128::from(&i), ::std::u128::MAX);
let i = Int::from(::std::usize::MAX);
assert_eq!(usize::from(&i), ::std::usize::MAX);
}
#[test]
fn step() {
use std::iter::Step;
let a = Int::from(897235032);
let b = Int::from(98345);
assert_eq!(Int::steps_between(&a, &b), Some(897136687));
assert_eq!(Int::steps_between(&a, &b), Int::steps_between(&b, &a));
assert_eq!(
Int::forward_checked(a.clone(), 232184),
Some(Int::from(897467216))
);
assert_eq!(
Int::backward_checked(a.clone(), 897467216),
Some(Int::from(-232184))
);
}
const RAND_ITER: usize = 1000;
#[test]
fn div_rand() {
let mut rng = rand::thread_rng();
for _ in 0..RAND_ITER {
let x = rng.gen_int(640);
let y = rng.gen_int(320);
let (q, r) = x.divmod(&y);
let val = (q * &y) + r;
assert_mp_eq!(val, x);
}
}
#[test]
fn sqr_rand() {
let mut rng = rand::thread_rng();
for _ in 0..RAND_ITER {
let x = rng.gen_int(640);
let xs = x.square();
let xm = &x * &x;
assert_mp_eq!(xm, xs);
}
}
#[test]
fn shl_rand() {
let mut rng = rand::thread_rng();
for _ in 0..RAND_ITER {
let x = rng.gen_int(640);
let shift_1 = &x << 1;
let mul_2 = &x * 2;
assert_mp_eq!(shift_1, mul_2);
let shift_3 = &x << 3;
let mul_8 = &x * 8;
assert_mp_eq!(shift_3, mul_8);
}
}
#[test]
fn shl_rand_large() {
let mut rng = rand::thread_rng();
for _ in 0..RAND_ITER {
let pow: usize = rng.gen_range(64..8196);
let mul_by = Int::from(2).pow(pow);
let x = rng.gen_int(640);
let shift = &x << pow;
let mul = x * mul_by;
assert_mp_eq!(shift, mul);
}
}
#[test]
fn shr_rand() {
let mut rng = rand::thread_rng();
for _ in 0..RAND_ITER {
let pow: usize = rng.gen_range(64..8196);
let x = rng.gen_int(640);
let shift_up = &x << pow;
let shift_down = shift_up >> pow;
assert_mp_eq!(shift_down, x);
}
}
#[test]
fn bitand_rand() {
let mut rng = rand::thread_rng();
for _ in 0..RAND_ITER {
let x = rng.gen_int(640);
let y = rng.gen_int(640);
let _ = x & y;
}
}
#[test]
fn hash_rand() {
let mut rng = rand::thread_rng();
for _ in 0..RAND_ITER {
let x1 = rng.gen_int(640);
let x2 = x1.clone();
assert_eq!(x1, x2);
let x1_hash = {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
x1.hash(&mut hasher);
hasher.finish()
};
let x2_hash = {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
x2.hash(&mut hasher);
hasher.finish()
};
assert_eq!(x1_hash, x2_hash);
}
}
#[test]
#[should_panic]
fn gen_uint_with_zero_bits() {
let mut rng = rand::thread_rng();
rng.gen_uint(0);
}
#[test]
#[should_panic]
fn gen_int_with_zero_bits() {
let mut rng = rand::thread_rng();
rng.gen_int(0);
}
#[test]
#[should_panic]
fn gen_uint_below_zero_or_negative() {
let mut rng = rand::thread_rng();
let i = Int::from(0);
rng.gen_uint_below(&i);
let j = Int::from(-1);
rng.gen_uint_below(&j);
}
#[test]
#[should_panic]
fn gen_int_range_zero() {
let mut rng = rand::thread_rng();
let b = Int::from(123);
rng.gen_int_range(&b, &b);
}
#[test]
#[should_panic]
fn gen_int_range_negative() {
let mut rng = rand::thread_rng();
let lb = Int::from(123);
let ub = Int::from(321);
rng.gen_int_range(&ub, &lb);
}
#[test]
fn gen_int_range() {
let mut rng = rand::thread_rng();
for _ in 0..10 {
let i = rng.gen_int_range(&Int::from(236), &Int::from(237));
assert_eq!(i, Int::from(236));
}
let l = Int::from(403469000 + 2352);
let u = Int::from(403469000 + 3513);
for _ in 0..1000 {
let n: Int = rng.gen_uint_below(&u);
assert!(n < u);
let n: Int = rng.gen_int_range(&l, &u);
assert!(n >= l);
assert!(n < u);
}
}
#[test]
fn gen_uint_below_all_ones() {
static N: &'static str = "000001FFFFFFFFFFFFFFFFFFFFFFFFFFF\
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\
FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF";
let mut rng = rand::thread_rng();
let bound = Int::from_str_radix(N, 16).unwrap();
for _ in 0..10 {
let n: Int = rng.gen_uint_below(&bound);
assert!(n < bound);
}
}
#[test]
fn add_larger_limb() {
let a = Int::from(-14);
let b = Limb(15 as BaseInt);
assert_eq!(a + b, Int::one());
}
#[test]
fn gcd() {
let cases = [
("3", "0","3"), // special
("0", "3", "3"),
("0", "0", "0"),
("13", "13", "13"),
("37", "600", "1"), // prime numbers
("2567", "997", "1"),
("624129", "2061517", "18913"), // normal
("18446744073709551616", "18446744073709551616", "18446744073709551616"),
("184467440737095516201234", "493882992939324", "6"),
("493882992939324", "184467440737095516201234", "6"),
("18446744073709551620", "18446744073709551615", "5"),
("-9223372036854775808", "-9223372036854775808", "9223372036854775808"),
("-9223372036854775811", "-9223372036854775808", "1"),
("-23465475685232342344366756745345", "-23423545489322535345", "5"),
("-23423545489322535345", "-23465475685232342344366756745345", "5"),
("-170141183460469231731687303715884105728", "-170141183460469231731687303715884105729", "1"),
("-170141183460469231731687303715884105731", "-170141183460469231731687303715884105728", "1"),
("170141183460469231731687303715884105731234234363462345234345547232443500000000000000000000000", "17014118346046923173168730371588410572836362453452345000000000000000000", "5000000000000000000")
];
for &(l, r, a) in cases.iter() {
let l: Int = l.parse().unwrap();
let r: Int = r.parse().unwrap();
let a: Int = a.parse().unwrap();
let val = l.gcd(&r);
assert_mp_eq!(val, a);
}
}
#[test]
fn lcm() {
let cases = [
("1", "0", "0"),
("0", "1", "0"),
("1", "1", "1"),
("-1", "0", "0"),
("0", "-1", "0"),
("-1", "-1", "1"),
("8", "9", "72"),
("11", "5", "55"),
("99", "17", "1683"),
(
"18446744073709551616",
"18446744073709551616",
"18446744073709551616",
),
(
"18446744073709551620",
"18446744073709551615",
"68056473384187692703742967930579373260",
),
(
"-9223372036854775808",
"-9223372036854775808",
"9223372036854775808",
),
(
"-9223372036854775811",
"-9223372036854775808",
"85070591730234615893513767968506380288",
),
(
"-92233720368547758112345",
"-235777694355",
"4349330786055998253486590232462495",
),
];
for &(l, r, a) in cases.iter() {
let l: Int = l.parse().unwrap();
let r: Int = r.parse().unwrap();
let a: Int = a.parse().unwrap();
let val = l.lcm(&r);
assert_mp_eq!(val.clone(), a.clone());
}
}
fn bench_add(b: &mut Bencher, xs: usize, ys: usize) {
let mut rng = rand::thread_rng();
let x = rng.gen_int(xs * Limb::BITS);
let y = rng.gen_int(ys * Limb::BITS);
b.iter(|| {
let z = &x + &y;
test::black_box(z);
});
}
#[bench]
fn bench_add_1_1(b: &mut Bencher) {
bench_add(b, 1, 1);
}
#[bench]
fn bench_add_10_10(b: &mut Bencher) {
bench_add(b, 10, 10);
}
#[bench]
fn bench_add_100_100(b: &mut Bencher) {
bench_add(b, 100, 100);
}
#[bench]
fn bench_add_1000_1000(b: &mut Bencher) {
bench_add(b, 1000, 1000);
}
#[bench]
fn bench_add_1000_10(b: &mut Bencher) {
bench_add(b, 1000, 10);
}
fn bench_mul(b: &mut Bencher, xs: usize, ys: usize) {
let mut rng = rand::thread_rng();
let x = rng.gen_int(xs * Limb::BITS);
let y = rng.gen_int(ys * Limb::BITS);
b.iter(|| {
let z = &x * &y;
test::black_box(z);
});
}
fn bench_pow(b: &mut Bencher, xs: usize, ys: usize) {
let mut rng = rand::thread_rng();
let x = rng.gen_int(xs * Limb::BITS);
let y: usize = rng.gen_range(0..ys);
b.iter(|| {
let z = &x.pow(y);
test::black_box(z);
});
}
fn bench_pow_mod(b: &mut Bencher, gs: usize, es: usize, ms: usize) {
let mut rng = rand::thread_rng();
let g = rng.gen_int(gs * Limb::BITS);
let e = rng.gen_uint(es * Limb::BITS);
let m = rng.gen_uint(ms * Limb::BITS);
b.iter(|| {
let z = g.pow_mod(&e, &m);
test::black_box(z);
});
}
#[bench]
fn bench_mul_1_1(b: &mut Bencher) {
bench_mul(b, 1, 1);
}
#[bench]
fn bench_mul_10_10(b: &mut Bencher) {
bench_mul(b, 10, 10);
}
#[bench]
fn bench_mul_2_20(b: &mut Bencher) {
bench_mul(b, 2, 20);
}
#[bench]
fn bench_mul_50_50(b: &mut Bencher) {
bench_mul(b, 50, 50);
}
#[bench]
fn bench_mul_5_50(b: &mut Bencher) {
bench_mul(b, 5, 50);
}
#[bench]
fn bench_mul_250_250(b: &mut Bencher) {
bench_mul(b, 250, 250);
}
#[bench]
fn bench_mul_1000_1000(b: &mut Bencher) {
bench_mul(b, 1000, 1000);
}
#[bench]
fn bench_mul_50_1500(b: &mut Bencher) {
bench_mul(b, 50, 1500);
}
fn bench_sqr(b: &mut Bencher, xs: usize) {
let mut rng = rand::thread_rng();
let x = rng.gen_int(xs * Limb::BITS);
b.iter(|| {
let z = x.square();
test::black_box(z);
});
}
#[bench]
fn bench_sqr_1(b: &mut Bencher) {
bench_sqr(b, 1);
}
#[bench]
fn bench_sqr_10(b: &mut Bencher) {
bench_sqr(b, 10);
}
#[bench]
fn bench_sqr_50(b: &mut Bencher) {
bench_sqr(b, 50);
}
#[bench]
fn bench_sqr_250(b: &mut Bencher) {
bench_sqr(b, 250);
}
#[bench]
fn bench_sqr_1000(b: &mut Bencher) {
bench_sqr(b, 1000);
}
#[bench]
fn bench_pow_1_1(b: &mut Bencher) {
bench_pow(b, 1, 1);
}
#[bench]
fn bench_pow_10_10(b: &mut Bencher) {
bench_pow(b, 10, 10);
}
#[bench]
fn bench_pow_2_20(b: &mut Bencher) {
bench_pow(b, 2, 20);
}
#[bench]
fn bench_pow_50_50(b: &mut Bencher) {
bench_pow(b, 50, 50);
}
#[bench]
fn bench_pow_5_50(b: &mut Bencher) {
bench_pow(b, 5, 50);
}
#[bench]
fn bench_pow_250_250(b: &mut Bencher) {
bench_pow(b, 250, 250);
}
#[bench]
fn bench_pow_50_1500(b: &mut Bencher) {
bench_pow(b, 50, 1500);
}
#[bench]
fn bench_pow_mod_50_50_50(b: &mut Bencher) {
bench_pow_mod(b, 50, 50, 50);
}
#[bench]
fn bench_factorial_100(b: &mut Bencher) {
b.iter(|| {
let mut i = Int::from(1);
for j in 2..100 {
i = i * j;
}
i = i * 100;
test::black_box(i);
});
}
#[bench]
fn bench_factorial_1000(b: &mut Bencher) {
b.iter(|| {
let mut i = Int::from(1);
for j in 2..1000 {
i = i * j;
}
i = i * 1000;
test::black_box(i);
});
}
fn bench_div(b: &mut Bencher, xs: usize, ys: usize) {
let mut rng = rand::thread_rng();
let x = rng.gen_int(xs * Limb::BITS);
let y = rng.gen_int(ys * Limb::BITS);
b.iter(|| {
let z = &x / &y;
test::black_box(z);
});
}
#[bench]
fn bench_div_1_1(b: &mut Bencher) {
bench_div(b, 1, 1);
}
#[bench]
fn bench_div_10_10(b: &mut Bencher) {
bench_div(b, 10, 10);
}
#[bench]
fn bench_div_20_2(b: &mut Bencher) {
bench_div(b, 20, 2);
}
#[bench]
fn bench_div_50_50(b: &mut Bencher) {
bench_div(b, 50, 50);
}
#[bench]
fn bench_div_50_5(b: &mut Bencher) {
bench_div(b, 50, 5);
}
#[bench]
fn bench_div_250_250(b: &mut Bencher) {
bench_div(b, 250, 250);
}
#[bench]
fn bench_div_1000_1000(b: &mut Bencher) {
bench_div(b, 1000, 1000);
}
fn bench_gcd(b: &mut Bencher, xs: usize, ys: usize) {
let mut rng = rand::thread_rng();
let x = rng.gen_int(xs * Limb::BITS);
let y = rng.gen_int(ys * Limb::BITS);
b.iter(|| {
let z = x.gcd(&y);
test::black_box(z);
});
}
#[bench]
fn bench_gcd_1_1(b: &mut Bencher) {
bench_gcd(b, 1, 1);
}
#[bench]
fn bench_gcd_10_10(b: &mut Bencher) {
bench_gcd(b, 10, 10);
}
#[bench]
fn bench_gcd_20_2(b: &mut Bencher) {
bench_gcd(b, 20, 2);
}
#[bench]
fn bench_gcd_50_50(b: &mut Bencher) {
bench_gcd(b, 50, 50);
}
#[bench]
fn bench_gcd_50_5(b: &mut Bencher) {
bench_gcd(b, 50, 5);
}
#[bench]
fn bench_gcd_250_150(b: &mut Bencher) {
bench_gcd(b, 250, 150);
}
#[bench]
fn bench_gcd_100_100(b: &mut Bencher) {
bench_gcd(b, 100, 100);
}
#[bench]
fn bench_gcd_100_10(b: &mut Bencher) {
bench_gcd(b, 100, 10);
}
#[bench]
fn bench_gcd_100_50(b: &mut Bencher) {
bench_gcd(b, 100, 50);
}
#[bench]
fn bench_rng_all_ones(b: &mut Bencher) {
let mut rng = rand::thread_rng();
let num_bits: usize = rng.gen_range(512..1024);
let mut bound = Int::from(1) << num_bits;
bound -= 1;
b.iter(|| {
let n = rng.gen_uint_below(&bound);
test::black_box(n);
});
}
}<|fim▁end|>
|
_ => (),
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
|
pub mod i32x4;
|
<|file_name|>primePalindromeCodeEval.py<|end_file_name|><|fim▁begin|>'''
primepalCodeEval.py - Solution to Problem Prime Palindrome (Category - Easy)
Copyright (C) 2013, Shubham Verma
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Description:
Write a program to determine the biggest prime palindrome under 1000.<|fim▁hole|>
Output sample:
Your program should print the largest palindrome on stdout. i.e.
929
'''
from math import sqrt
def isPrime(num):
if num%2 == 0:
return False
else:
for i in xrange(3, int(sqrt(num)), 2):
if num % i == 0:
return False
return True
if __name__ == '__main__':
for num in reversed(xrange(1000)):
if str(num) == str(num)[::-1] and isPrime(num):
print num
break<|fim▁end|>
|
Input sample:
None
|
<|file_name|>desktop-capturer.d.ts<|end_file_name|><|fim▁begin|>// https://github.com/electron/electron/blob/master/docs/api/desktop-capturer.md
declare namespace Electron {
/**
* This module can be used to get available sources that can be used to be captured with getUserMedia.
*/
interface DesktopCapturer {
/**
* Starts a request to get all desktop sources.
*
* Note: There is no guarantee that the size of source.thumbnail is always
* the same as the thumnbailSize in options. It also depends on the scale of the screen or window.
*/
getSources(options: DesktopCapturerOptions, callback: (error: Error, sources: DesktopCapturerSource[]) => any): void;
}
interface DesktopCapturerOptions {
/**
* The types of desktop sources to be captured.
*/
types?: ('screen' | 'window')[];
/**
* The suggested size that thumbnail should be scaled.
* Default: {width: 150, height: 150}
*/
thumbnailSize?: Dimension;
}
interface DesktopCapturerSource {
/**
* The id of the captured window or screen used in navigator.webkitGetUserMedia.<|fim▁hole|> /**
* The described name of the capturing screen or window.
* If the source is a screen, the name will be Entire Screen or Screen <index>;
* if it is a window, the name will be the window’s title.
*/
name: string;
/**
* A thumbnail image.
*/
thumbnail: NativeImage;
}
}<|fim▁end|>
|
* The format looks like window:XX or screen:XX where XX is a random generated number.
*/
id: string;
|
<|file_name|>encoder.rs<|end_file_name|><|fim▁begin|>#![cfg_attr(all(feature = "unstable", test), feature(test))]
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate arthas_derive;
extern crate rand;
extern crate arthas;
#[macro_use]
extern crate maplit;
extern crate env_logger;
extern crate serde_json;
pub mod model;
pub mod common;
use std::collections::HashMap;
use arthas::to_value;
use arthas::encoder::{encode, decode};
use arthas::traits::get_unique_int_str;
use common::revert;
use model::*;
#[test]
fn test_atomic() {
let string_one = "This is string one".to_owned();
let string_two = "This is string two".to_owned();
let decoded = to_value(Atomic {
string_one: string_one.clone(),
string_two: string_two.clone(),
hash_map: HashMap::new(),
});
let encoded = to_value(hashmap!{
get_unique_int_str("string_one") => to_value(string_one.clone()),
get_unique_int_str("string_two") => to_value(string_two.clone()),
get_unique_int_str("hash_map") => to_value(HashMap::<String, usize>::new())
});
assert_eq!(encode(&decoded, &Atomic::get_field_int_map()), encoded);
assert_eq!(decode(&encoded, &revert(Atomic::get_field_int_map())),
decoded);
}
#[test]
fn test_vec() {
let title = "This is title".to_owned();
let content = "This is content".to_owned();
let comment_title = "This is comment title".to_owned();
let comment_content = "This is comment content".to_owned();
let decoded = to_value(Article {
_id: String::new(),
title: title.clone(),
content: content.clone(),
day_to_views: HashMap::new(),
views: 0,
comments: vec![Comment {
title: comment_title.clone(),
content: comment_content.clone(),
}],
});
let encoded = to_value(hashmap!{
get_unique_int_str("_id") => to_value(""),
get_unique_int_str("title") => to_value(title.clone()),
get_unique_int_str("content") => to_value(content.clone()),
get_unique_int_str("day_to_views") => to_value(HashMap::<String, usize>::new()),
get_unique_int_str("views") => to_value(0),
get_unique_int_str("comments.[].title") => to_value(vec![comment_title.clone()]),
get_unique_int_str("comments.[].content") => to_value(vec![comment_content.clone()])
});
assert_eq!(encode(&decoded, &Article::get_field_int_map()), encoded);
assert_eq!(decode(&encoded, &revert(Article::get_field_int_map())),
decoded);
}
#[test]
fn test_hashmap() {
let day_26 = "2016-10-26".to_owned();
let day_27 = "2016-10-27".to_owned();
let title_26 = "This is day 26 title".to_owned();
let title_27 = "This is day 27 title".to_owned();
let content_26 = "This is day 26 content".to_owned();
let content_27 = "This is day 27 content".to_owned();
let decoded = to_value(Comments {
day_to_comments: hashmap!{
day_26.clone() => Comment {
title: title_26.clone(),
content: content_26.clone()
},
day_27.clone() => Comment {
title: title_27.clone(),
content: content_27.clone()
}
},
});
let encoded = to_value(hashmap!{
get_unique_int_str("day_to_comments.{}.title") => to_value(hashmap!{
day_26.clone() => title_26.clone(),
day_27.clone() => title_27.clone()
}),
get_unique_int_str("day_to_comments.{}.content") => to_value(hashmap!{
day_26.clone() => content_26.clone(),
day_27.clone() => content_27.clone()
})
});
assert_eq!(encode(&decoded, &Comments::get_field_int_map()), encoded);
assert_eq!(decode(&encoded, &revert(Comments::get_field_int_map())),
decoded);
}
#[test]
fn test_blog() {
let comment_title = "This is comment title".to_owned();
let comment_content = "This is comment content".to_owned();
let day_26 = "2016-10-26".to_owned();
let day_27 = "2016-10-27".to_owned();
let title_26 = "This is day 26 title".to_owned();
let title_27 = "This is day 27 title".to_owned();
let content_26 = "This is day 26 content".to_owned();
let content_27 = "This is day 27 content".to_owned();
let comment_title_26 = "This is day 26 comment title".to_owned();
let comment_title_27 = "This is day 27 comment title".to_owned();
let comment_content_26 = "This is day 26 comment content".to_owned();
let comment_content_27 = "This is day 27 comment content".to_owned();
let decoded = to_value(Blog {
articles: Articles {
day_to_articles: hashmap!{
day_26.clone() => Article {
_id: String::new(),
title: title_26.clone(),
content: content_26.clone(),
day_to_views: HashMap::new(),
views: 0,
comments: vec![Comment {
title: comment_title.clone(),
content: comment_content.clone()
}]
},
day_27.clone() => Article {
_id: String::new(),
title: title_27.clone(),
content: content_27.clone(),
day_to_views: HashMap::new(),
views: 0,
comments: vec![Comment {
title: comment_title.clone(),
content: comment_content.clone()
}]
}
},
},
comments: Comments {
day_to_comments: hashmap!{
day_26.clone() => Comment {
title: comment_title_26.clone(),
content: comment_content_26.clone()
},
day_27.clone() => Comment {
title: comment_title_27.clone(),
content: comment_content_27.clone()
}
},
},
});
let encoded = to_value(hashmap!{
get_unique_int_str("articles.day_to_articles.{}._id") => to_value(hashmap!{
day_26.clone() => String::new(),
day_27.clone() => String::new()
}),
get_unique_int_str("articles.day_to_articles.{}.title") => to_value(hashmap!{<|fim▁hole|> get_unique_int_str("articles.day_to_articles.{}.content") => to_value(hashmap!{
day_26.clone() => content_26.clone(),
day_27.clone() => content_27.clone()
}),
get_unique_int_str("articles.day_to_articles.{}.day_to_views") => to_value(hashmap!{
day_26.clone() => HashMap::<String, usize>::new(),
day_27.clone() => HashMap::<String, usize>::new()
}),
get_unique_int_str("articles.day_to_articles.{}.views") => to_value(hashmap!{
day_26.clone() => 0,
day_27.clone() => 0
}),
get_unique_int_str("articles.day_to_articles.{}.comments.[].title") => to_value(hashmap!{
day_26.clone() => vec![comment_title.clone()],
day_27.clone() => vec![comment_title.clone()]
}),
get_unique_int_str("articles.day_to_articles.{}.comments.[].content") => to_value(hashmap!{
day_26.clone() => vec![comment_content.clone()],
day_27.clone() => vec![comment_content.clone()]
}),
get_unique_int_str("comments.day_to_comments.{}.title") =>to_value(hashmap!{
day_26.clone() => comment_title_26.clone(),
day_27.clone() => comment_title_27.clone()
}),
get_unique_int_str("comments.day_to_comments.{}.content") =>to_value(hashmap!{
day_26.clone() => comment_content_26.clone(),
day_27.clone() => comment_content_27.clone()
})
});
assert_eq!(encode(&decoded, &Blog::get_field_int_map()), encoded);
assert_eq!(decode(&encoded, &revert(Blog::get_field_int_map())),
decoded);
}
#[cfg(all(feature = "unstable", test))]
mod benches {
extern crate test;
use arthas::to_value;
use arthas::traits::get_unique_int_str;
use arthas::encoder::{encode, decode};
use model::*;
use std::collections::HashMap;
use common::revert;
#[bench]
fn bench_encode(b: &mut test::Bencher) {
let title = "This is title".to_owned();
let content = "This is content".to_owned();
let comment_title = "This is comment title".to_owned();
let comment_content = "This is comment content".to_owned();
let field_int_map = Article::get_field_int_map();
let value = to_value(Article {
_id: String::new(),
title: title.clone(),
content: content.clone(),
day_to_views: HashMap::new(),
views: 0,
comments: vec![Comment {
title: comment_title.clone(),
content: comment_content.clone(),
}],
});
b.iter(|| encode(&value, &field_int_map))
}
#[bench]
fn bench_decode(b: &mut test::Bencher) {
let title = "This is title".to_owned();
let content = "This is content".to_owned();
let comment_title = "This is comment title".to_owned();
let comment_content = "This is comment content".to_owned();
let int_field_map = revert(Article::get_field_int_map());
let value = to_value(hashmap!{
get_unique_int_str("_id") => to_value(""),
get_unique_int_str("title") => to_value(title.clone()),
get_unique_int_str("content") => to_value(content.clone()),
get_unique_int_str("day_to_views") => to_value(HashMap::<String, usize>::new()),
get_unique_int_str("views") => to_value(0),
get_unique_int_str("comments.[].title") => to_value(vec![comment_title.clone()]),
get_unique_int_str("comments.[].content") => to_value(vec![comment_content.clone()])
});
b.iter(|| decode(&value, &int_field_map))
}
}<|fim▁end|>
|
day_26.clone() => title_26.clone(),
day_27.clone() => title_27.clone()
}),
|
<|file_name|>sizzle-src.js<|end_file_name|><|fim▁begin|>/**
* Sizzle Engine Support v2.2.0
* http://rightjs.org/plugins/sizzle
*
* Copyright (C) 2009-2011 Nikolay Nemshilov
*/
/**
* sizzle initialization script
*
* Copyright (C) 2010-2011 Nikolay Nemshilov
*/
RightJS.Sizzle = {
version: '2.2.0'
};
/*!
* Sizzle CSS Selector Engine - v1.0
* Copyright 2009, The Dojo Foundation
* Released under the MIT, BSD, and GPL Licenses.
* More information: http://sizzlejs.com/
*/
(function(){
var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
done = 0,
toString = Object.prototype.toString,
hasDuplicate = false,
baseHasDuplicate = true;
// Here we check if the JavaScript engine is using some sort of
// optimization where it does not always call our comparision
// function. If that is the case, discard the hasDuplicate value.
// Thus far that includes Google Chrome.
[0, 0].sort(function(){
baseHasDuplicate = false;
return 0;
});
var Sizzle = function(selector, context, results, seed) {
results = results || [];
context = context || document;
var origContext = context;
if ( context.nodeType !== 1 && context.nodeType !== 9 ) {
return [];
}
if ( !selector || typeof selector !== "string" ) {
return results;
}
var parts = [], m, set, checkSet, extra, prune = true, contextXML = Sizzle.isXML(context),
soFar = selector, ret, cur, pop, i;
// Reset the position of the chunker regexp (start from head)
do {
chunker.exec("");
m = chunker.exec(soFar);
if ( m ) {
soFar = m[3];
parts.push( m[1] );
if ( m[2] ) {
extra = m[3];
break;
}
}
} while ( m );
if ( parts.length > 1 && origPOS.exec( selector ) ) {
if ( parts.length === 2 && Expr.relative[ parts[0] ] ) {
set = posProcess( parts[0] + parts[1], context );
} else {
set = Expr.relative[ parts[0] ] ?
[ context ] :
Sizzle( parts.shift(), context );
while ( parts.length ) {
selector = parts.shift();
if ( Expr.relative[ selector ] ) {
selector += parts.shift();
}
set = posProcess( selector, set );
}
}
} else {
// Take a shortcut and set the context if the root selector is an ID
// (but not if it'll be faster if the inner selector is an ID)
if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML &&
Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) {
ret = Sizzle.find( parts.shift(), context, contextXML );
context = ret.expr ? Sizzle.filter( ret.expr, ret.set )[0] : ret.set[0];
}
if ( context ) {
ret = seed ?
{ expr: parts.pop(), set: makeArray(seed) } :
Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML );
set = ret.expr ? Sizzle.filter( ret.expr, ret.set ) : ret.set;
if ( parts.length > 0 ) {
checkSet = makeArray(set);
} else {
prune = false;
}
while ( parts.length ) {
cur = parts.pop();
pop = cur;
if ( !Expr.relative[ cur ] ) {
cur = "";
} else {
pop = parts.pop();
}
if ( pop == null ) {
pop = context;
}
Expr.relative[ cur ]( checkSet, pop, contextXML );
}
} else {
checkSet = parts = [];
}
}
if ( !checkSet ) {
checkSet = set;
}
if ( !checkSet ) {
Sizzle.error( cur || selector );
}
if ( toString.call(checkSet) === "[object Array]" ) {
if ( !prune ) {
results.push.apply( results, checkSet );
} else if ( context && context.nodeType === 1 ) {
for ( i = 0; checkSet[i] != null; i++ ) {
if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && Sizzle.contains(context, checkSet[i])) ) {
results.push( set[i] );
}
}
} else {
for ( i = 0; checkSet[i] != null; i++ ) {
if ( checkSet[i] && checkSet[i].nodeType === 1 ) {
results.push( set[i] );
}
}
}
} else {
makeArray( checkSet, results );
}
if ( extra ) {
Sizzle( extra, origContext, results, seed );
Sizzle.uniqueSort( results );
}
return results;
};
Sizzle.uniqueSort = function(results){
if ( sortOrder ) {
hasDuplicate = baseHasDuplicate;
results.sort(sortOrder);
if ( hasDuplicate ) {
for ( var i = 1; i < results.length; i++ ) {
if ( results[i] === results[i-1] ) {
results.splice(i--, 1);
}
}
}
}
return results;
};
Sizzle.matches = function(expr, set){
return Sizzle(expr, null, null, set);
};
Sizzle.find = function(expr, context, isXML){
var set;
if ( !expr ) {
return [];
}
for ( var i = 0, l = Expr.order.length; i < l; i++ ) {
var type = Expr.order[i], match;
if ( (match = Expr.leftMatch[ type ].exec( expr )) ) {
var left = match[1];
match.splice(1,1);
if ( left.substr( left.length - 1 ) !== "\\" ) {
match[1] = (match[1] || "").replace(/\\/g, "");
set = Expr.find[ type ]( match, context, isXML );
if ( set != null ) {
expr = expr.replace( Expr.match[ type ], "" );
break;
}
}
}
}
if ( !set ) {
set = context.getElementsByTagName("*");
}
return {set: set, expr: expr};
};
Sizzle.filter = function(expr, set, inplace, not){
var old = expr, result = [], curLoop = set, match, anyFound,
isXMLFilter = set && set[0] && Sizzle.isXML(set[0]);
while ( expr && set.length ) {
for ( var type in Expr.filter ) {
if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) {
var filter = Expr.filter[ type ], found, item, left = match[1];
anyFound = false;
match.splice(1,1);
if ( left.substr( left.length - 1 ) === "\\" ) {
continue;
}
if ( curLoop === result ) {
result = [];
}
if ( Expr.preFilter[ type ] ) {
match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter );
if ( !match ) {
anyFound = found = true;
} else if ( match === true ) {
continue;
}
}
if ( match ) {
for ( var i = 0; (item = curLoop[i]) != null; i++ ) {
if ( item ) {
found = filter( item, match, i, curLoop );
var pass = not ^ !!found;
if ( inplace && found != null ) {
if ( pass ) {
anyFound = true;
} else {
curLoop[i] = false;
}
} else if ( pass ) {
result.push( item );
anyFound = true;
}
}
}
}
if ( found !== undefined ) {
if ( !inplace ) {
curLoop = result;
}
expr = expr.replace( Expr.match[ type ], "" );
if ( !anyFound ) {
return [];
}
break;
}
}
}
// Improper expression
if ( expr === old ) {
if ( anyFound == null ) {
Sizzle.error( expr );
} else {
break;
}
}
old = expr;
}
return curLoop;
};
Sizzle.error = function( msg ) {
throw "Syntax error, unrecognized expression: " + msg;
};
var Expr = Sizzle.selectors = {
order: [ "ID", "NAME", "TAG" ],
match: {
ID: /#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
CLASS: /\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,
NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,
ATTR: /\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,
TAG: /^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,
CHILD: /:(only|nth|last|first)-child(?:\((even|odd|[\dn+\-]*)\))?/,
POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,
PSEUDO: /:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/
},
leftMatch: {},
attrMap: {
"class": "className",
"for": "htmlFor"
},
attrHandle: {
href: function(elem){
return elem.getAttribute("href");
}
},
relative: {
"+": function(checkSet, part){
var isPartStr = typeof part === "string",
isTag = isPartStr && !/\W/.test(part),
isPartStrNotTag = isPartStr && !isTag;
if ( isTag ) {
part = part.toLowerCase();
}
for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) {
if ( (elem = checkSet[i]) ) {
while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {}
checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ?
elem || false :
elem === part;
}
}
if ( isPartStrNotTag ) {
Sizzle.filter( part, checkSet, true );
}
},
">": function(checkSet, part){
var isPartStr = typeof part === "string",
elem, i = 0, l = checkSet.length;
if ( isPartStr && !/\W/.test(part) ) {
part = part.toLowerCase();
for ( ; i < l; i++ ) {
elem = checkSet[i];
if ( elem ) {
var parent = elem.parentNode;
checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false;
}
}
} else {
for ( ; i < l; i++ ) {
elem = checkSet[i];
if ( elem ) {
checkSet[i] = isPartStr ?
elem.parentNode :
elem.parentNode === part;
}
}
if ( isPartStr ) {
Sizzle.filter( part, checkSet, true );
}
}
},
"": function(checkSet, part, isXML){
var doneName = done++, checkFn = dirCheck, nodeCheck;
if ( typeof part === "string" && !/\W/.test(part) ) {
part = part.toLowerCase();
nodeCheck = part;
checkFn = dirNodeCheck;
}
checkFn("parentNode", part, doneName, checkSet, nodeCheck, isXML);
},
"~": function(checkSet, part, isXML){
var doneName = done++, checkFn = dirCheck, nodeCheck;
if ( typeof part === "string" && !/\W/.test(part) ) {
part = part.toLowerCase();
nodeCheck = part;
checkFn = dirNodeCheck;
}
checkFn("previousSibling", part, doneName, checkSet, nodeCheck, isXML);
}
},
find: {
ID: function(match, context, isXML){
if ( typeof context.getElementById !== "undefined" && !isXML ) {
var m = context.getElementById(match[1]);
// Check parentNode to catch when Blackberry 4.6 returns
// nodes that are no longer in the document #6963
return m && m.parentNode ? [m] : [];
}
},
NAME: function(match, context){
if ( typeof context.getElementsByName !== "undefined" ) {
var ret = [], results = context.getElementsByName(match[1]);
for ( var i = 0, l = results.length; i < l; i++ ) {
if ( results[i].getAttribute("name") === match[1] ) {
ret.push( results[i] );
}
}
return ret.length === 0 ? null : ret;
}
},
TAG: function(match, context){
return context.getElementsByTagName(match[1]);
}
},
preFilter: {
CLASS: function(match, curLoop, inplace, result, not, isXML){
match = " " + match[1].replace(/\\/g, "") + " ";
if ( isXML ) {
return match;
}
for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) {
if ( elem ) {
if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n]/g, " ").indexOf(match) >= 0) ) {
if ( !inplace ) {
result.push( elem );
}
} else if ( inplace ) {
curLoop[i] = false;
}
}
}
return false;
},
ID: function(match){
return match[1].replace(/\\/g, "");
},
TAG: function(match, curLoop){
return match[1].toLowerCase();
},
CHILD: function(match){
if ( match[1] === "nth" ) {
// parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6'
var test = /(-?)(\d*)n((?:\+|-)?\d*)/.exec(
match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" ||
!/\D/.test( match[2] ) && "0n+" + match[2] || match[2]);
// calculate the numbers (first)n+(last) including if they are negative
match[2] = (test[1] + (test[2] || 1)) - 0;
match[3] = test[3] - 0;
}
// TODO: Move to normal caching system
match[0] = done++;
return match;
},
ATTR: function(match, curLoop, inplace, result, not, isXML){
var name = match[1].replace(/\\/g, "");
if ( !isXML && Expr.attrMap[name] ) {
match[1] = Expr.attrMap[name];
}
if ( match[2] === "~=" ) {
match[4] = " " + match[4] + " ";
}
return match;
},
PSEUDO: function(match, curLoop, inplace, result, not){
if ( match[1] === "not" ) {
// If we're dealing with a complex expression, or a simple one
if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) {
match[3] = Sizzle(match[3], null, null, curLoop);
} else {
var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not);
if ( !inplace ) {
result.push.apply( result, ret );
}
return false;
}
} else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) {
return true;
}
return match;
},
POS: function(match){
match.unshift( true );
return match;
}
},
filters: {
enabled: function(elem){
return elem.disabled === false && elem.type !== "hidden";
},
disabled: function(elem){
return elem.disabled === true;
},
checked: function(elem){
return elem.checked === true;
},
selected: function(elem){
// Accessing this property makes selected-by-default
// options in Safari work properly
elem.parentNode.selectedIndex;
return elem.selected === true;
},
parent: function(elem){
return !!elem.firstChild;
},
empty: function(elem){
return !elem.firstChild;
},
has: function(elem, i, match){
return !!Sizzle( match[3], elem ).length;
},
header: function(elem){
return (/h\d/i).test( elem.nodeName );
},
text: function(elem){
return "text" === elem.type;
},
radio: function(elem){
return "radio" === elem.type;
},
checkbox: function(elem){
return "checkbox" === elem.type;
},
file: function(elem){
return "file" === elem.type;
},
password: function(elem){
return "password" === elem.type;
},
submit: function(elem){
return "submit" === elem.type;
},
image: function(elem){
return "image" === elem.type;
},
reset: function(elem){
return "reset" === elem.type;
},
button: function(elem){
return "button" === elem.type || elem.nodeName.toLowerCase() === "button";
},
input: function(elem){
return (/input|select|textarea|button/i).test(elem.nodeName);
}
},
setFilters: {
first: function(elem, i){
return i === 0;
},
last: function(elem, i, match, array){
return i === array.length - 1;
},
even: function(elem, i){
return i % 2 === 0;
},
odd: function(elem, i){
return i % 2 === 1;
},
lt: function(elem, i, match){
return i < match[3] - 0;
},
gt: function(elem, i, match){
return i > match[3] - 0;
},
nth: function(elem, i, match){
return match[3] - 0 === i;
},
eq: function(elem, i, match){
return match[3] - 0 === i;
}
},
filter: {
PSEUDO: function(elem, match, i, array){
var name = match[1], filter = Expr.filters[ name ];<|fim▁hole|> return filter( elem, i, match, array );
} else if ( name === "contains" ) {
return (elem.textContent || elem.innerText || Sizzle.getText([ elem ]) || "").indexOf(match[3]) >= 0;
} else if ( name === "not" ) {
var not = match[3];
for ( var j = 0, l = not.length; j < l; j++ ) {
if ( not[j] === elem ) {
return false;
}
}
return true;
} else {
Sizzle.error( "Syntax error, unrecognized expression: " + name );
}
},
CHILD: function(elem, match){
var type = match[1], node = elem;
switch (type) {
case 'only':
case 'first':
while ( (node = node.previousSibling) ) {
if ( node.nodeType === 1 ) {
return false;
}
}
if ( type === "first" ) {
return true;
}
node = elem;
case 'last':
while ( (node = node.nextSibling) ) {
if ( node.nodeType === 1 ) {
return false;
}
}
return true;
case 'nth':
var first = match[2], last = match[3];
if ( first === 1 && last === 0 ) {
return true;
}
var doneName = match[0],
parent = elem.parentNode;
if ( parent && (parent.sizcache !== doneName || !elem.nodeIndex) ) {
var count = 0;
for ( node = parent.firstChild; node; node = node.nextSibling ) {
if ( node.nodeType === 1 ) {
node.nodeIndex = ++count;
}
}
parent.sizcache = doneName;
}
var diff = elem.nodeIndex - last;
if ( first === 0 ) {
return diff === 0;
} else {
return ( diff % first === 0 && diff / first >= 0 );
}
}
},
ID: function(elem, match){
return elem.nodeType === 1 && elem.getAttribute("id") === match;
},
TAG: function(elem, match){
return (match === "*" && elem.nodeType === 1) || elem.nodeName.toLowerCase() === match;
},
CLASS: function(elem, match){
return (" " + (elem.className || elem.getAttribute("class")) + " ")
.indexOf( match ) > -1;
},
ATTR: function(elem, match){
var name = match[1],
result = Expr.attrHandle[ name ] ?
Expr.attrHandle[ name ]( elem ) :
elem[ name ] != null ?
elem[ name ] :
elem.getAttribute( name ),
value = result + "",
type = match[2],
check = match[4];
return result == null ?
type === "!=" :
type === "=" ?
value === check :
type === "*=" ?
value.indexOf(check) >= 0 :
type === "~=" ?
(" " + value + " ").indexOf(check) >= 0 :
!check ?
value && result !== false :
type === "!=" ?
value !== check :
type === "^=" ?
value.indexOf(check) === 0 :
type === "$=" ?
value.substr(value.length - check.length) === check :
type === "|=" ?
value === check || value.substr(0, check.length + 1) === check + "-" :
false;
},
POS: function(elem, match, i, array){
var name = match[2], filter = Expr.setFilters[ name ];
if ( filter ) {
return filter( elem, i, match, array );
}
}
}
};
var origPOS = Expr.match.POS,
fescape = function(all, num){
return "\\" + (num - 0 + 1);
};
for ( var type in Expr.match ) {
Expr.match[ type ] = new RegExp( Expr.match[ type ].source + (/(?![^\[]*\])(?![^\(]*\))/.source) );
Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, fescape) );
}
var makeArray = function(array, results) {
array = Array.prototype.slice.call( array, 0 );
if ( results ) {
results.push.apply( results, array );
return results;
}
return array;
};
// Perform a simple check to determine if the browser is capable of
// converting a NodeList to an array using builtin methods.
// Also verifies that the returned array holds DOM nodes
// (which is not the case in the Blackberry browser)
try {
Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType;
// Provide a fallback method if it does not work
} catch(e){
makeArray = function(array, results) {
var ret = results || [], i = 0;
if ( toString.call(array) === "[object Array]" ) {
Array.prototype.push.apply( ret, array );
} else {
if ( typeof array.length === "number" ) {
for ( var l = array.length; i < l; i++ ) {
ret.push( array[i] );
}
} else {
for ( ; array[i]; i++ ) {
ret.push( array[i] );
}
}
}
return ret;
};
}
var sortOrder, siblingCheck;
if ( document.documentElement.compareDocumentPosition ) {
sortOrder = function( a, b ) {
if ( a === b ) {
hasDuplicate = true;
return 0;
}
if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) {
return a.compareDocumentPosition ? -1 : 1;
}
return a.compareDocumentPosition(b) & 4 ? -1 : 1;
};
} else {
sortOrder = function( a, b ) {
var ap = [], bp = [], aup = a.parentNode, bup = b.parentNode,
cur = aup, al, bl;
// The nodes are identical, we can exit early
if ( a === b ) {
hasDuplicate = true;
return 0;
// If the nodes are siblings (or identical) we can do a quick check
} else if ( aup === bup ) {
return siblingCheck( a, b );
// If no parents were found then the nodes are disconnected
} else if ( !aup ) {
return -1;
} else if ( !bup ) {
return 1;
}
// Otherwise they're somewhere else in the tree so we need
// to build up a full list of the parentNodes for comparison
while ( cur ) {
ap.unshift( cur );
cur = cur.parentNode;
}
cur = bup;
while ( cur ) {
bp.unshift( cur );
cur = cur.parentNode;
}
al = ap.length;
bl = bp.length;
// Start walking down the tree looking for a discrepancy
for ( var i = 0; i < al && i < bl; i++ ) {
if ( ap[i] !== bp[i] ) {
return siblingCheck( ap[i], bp[i] );
}
}
// We ended someplace up the tree so do a sibling check
return i === al ?
siblingCheck( a, bp[i], -1 ) :
siblingCheck( ap[i], b, 1 );
};
siblingCheck = function( a, b, ret ) {
if ( a === b ) {
return ret;
}
var cur = a.nextSibling;
while ( cur ) {
if ( cur === b ) {
return -1;
}
cur = cur.nextSibling;
}
return 1;
};
}
// Utility function for retreiving the text value of an array of DOM nodes
Sizzle.getText = function( elems ) {
var ret = "", elem;
for ( var i = 0; elems[i]; i++ ) {
elem = elems[i];
// Get the text from text nodes and CDATA nodes
if ( elem.nodeType === 3 || elem.nodeType === 4 ) {
ret += elem.nodeValue;
// Traverse everything else, except comment nodes
} else if ( elem.nodeType !== 8 ) {
ret += Sizzle.getText( elem.childNodes );
}
}
return ret;
};
// Check to see if the browser returns elements by name when
// querying by getElementById (and provide a workaround)
(function(){
// We're going to inject a fake input element with a specified name
var form = document.createElement("div"),
id = "script" + (new Date()).getTime();
form.innerHTML = "<a name='" + id + "'/>";
// Inject it into the root element, check its status, and remove it quickly
var root = document.documentElement;
root.insertBefore( form, root.firstChild );
// The workaround has to do additional checks after a getElementById
// Which slows things down for other browsers (hence the branching)
if ( document.getElementById( id ) ) {
Expr.find.ID = function(match, context, isXML){
if ( typeof context.getElementById !== "undefined" && !isXML ) {
var m = context.getElementById(match[1]);
return m ? m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ? [m] : undefined : [];
}
};
Expr.filter.ID = function(elem, match){
var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id");
return elem.nodeType === 1 && node && node.nodeValue === match;
};
}
root.removeChild( form );
root = form = null; // release memory in IE
})();
(function(){
// Check to see if the browser returns only elements
// when doing getElementsByTagName("*")
// Create a fake element
var div = document.createElement("div");
div.appendChild( document.createComment("") );
// Make sure no comments are found
if ( div.getElementsByTagName("*").length > 0 ) {
Expr.find.TAG = function(match, context){
var results = context.getElementsByTagName(match[1]);
// Filter out possible comments
if ( match[1] === "*" ) {
var tmp = [];
for ( var i = 0; results[i]; i++ ) {
if ( results[i].nodeType === 1 ) {
tmp.push( results[i] );
}
}
results = tmp;
}
return results;
};
}
// Check to see if an attribute returns normalized href attributes
div.innerHTML = "<a href='#'></a>";
if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" &&
div.firstChild.getAttribute("href") !== "#" ) {
Expr.attrHandle.href = function(elem){
return elem.getAttribute("href", 2);
};
}
div = null; // release memory in IE
})();
if ( document.querySelectorAll ) {
(function(){
var oldSizzle = Sizzle, div = document.createElement("div");
div.innerHTML = "<p class='TEST'></p>";
// Safari can't handle uppercase or unicode characters when
// in quirks mode.
if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) {
return;
}
Sizzle = function(query, context, extra, seed){
context = context || document;
// Only use querySelectorAll on non-XML documents
// (ID selectors don't work in non-HTML documents)
if ( !seed && context.nodeType === 9 && !Sizzle.isXML(context) ) {
try {
return makeArray( context.querySelectorAll(query), extra );
} catch(e){}
}
return oldSizzle(query, context, extra, seed);
};
for ( var prop in oldSizzle ) {
Sizzle[ prop ] = oldSizzle[ prop ];
}
div = null; // release memory in IE
})();
}
(function(){
var div = document.createElement("div");
div.innerHTML = "<div class='test e'></div><div class='test'></div>";
// Opera can't find a second classname (in 9.6)
// Also, make sure that getElementsByClassName actually exists
if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) {
return;
}
// Safari caches class attributes, doesn't catch changes (in 3.2)
div.lastChild.className = "e";
if ( div.getElementsByClassName("e").length === 1 ) {
return;
}
Expr.order.splice(1, 0, "CLASS");
Expr.find.CLASS = function(match, context, isXML) {
if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) {
return context.getElementsByClassName(match[1]);
}
};
div = null; // release memory in IE
})();
function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
for ( var i = 0, l = checkSet.length; i < l; i++ ) {
var elem = checkSet[i];
if ( elem ) {
elem = elem[dir];
var match = false;
while ( elem ) {
if ( elem.sizcache === doneName ) {
match = checkSet[elem.sizset];
break;
}
if ( elem.nodeType === 1 && !isXML ){
elem.sizcache = doneName;
elem.sizset = i;
}
if ( elem.nodeName.toLowerCase() === cur ) {
match = elem;
break;
}
elem = elem[dir];
}
checkSet[i] = match;
}
}
}
function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) {
for ( var i = 0, l = checkSet.length; i < l; i++ ) {
var elem = checkSet[i];
if ( elem ) {
elem = elem[dir];
var match = false;
while ( elem ) {
if ( elem.sizcache === doneName ) {
match = checkSet[elem.sizset];
break;
}
if ( elem.nodeType === 1 ) {
if ( !isXML ) {
elem.sizcache = doneName;
elem.sizset = i;
}
if ( typeof cur !== "string" ) {
if ( elem === cur ) {
match = true;
break;
}
} else if ( Sizzle.filter( cur, [elem] ).length > 0 ) {
match = elem;
break;
}
}
elem = elem[dir];
}
checkSet[i] = match;
}
}
}
Sizzle.contains = document.compareDocumentPosition ? function(a, b){
return !!(a.compareDocumentPosition(b) & 16);
} : function(a, b){
return a !== b && (a.contains ? a.contains(b) : true);
};
Sizzle.isXML = function(elem){
// documentElement is verified for cases where it doesn't yet exist
// (such as loading iframes in IE - #4833)
var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement;
return documentElement ? documentElement.nodeName !== "HTML" : false;
};
var posProcess = function(selector, context){
var tmpSet = [], later = "", match,
root = context.nodeType ? [context] : context;
// Position selectors must be done after the filter
// And so must :not(positional) so we move all PSEUDOs to the end
while ( (match = Expr.match.PSEUDO.exec( selector )) ) {
later += match[0];
selector = selector.replace( Expr.match.PSEUDO, "" );
}
selector = Expr.relative[selector] ? selector + "*" : selector;
for ( var i = 0, l = root.length; i < l; i++ ) {
Sizzle( selector, root[i], tmpSet );
}
return Sizzle.filter( later, tmpSet );
};
// EXPOSE
window.Sizzle = Sizzle;
})();
RightJS([RightJS.Document, RightJS.Element]).each('include', {
first: function(rule) {
return this.find(rule)[0];
},
find: function(rule) {
return RightJS(Sizzle(rule, this._)).map(RightJS.$);
}
});<|fim▁end|>
|
if ( filter ) {
|
<|file_name|>env_gce.go<|end_file_name|><|fim▁begin|>package fingerprint
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad/structs"
)
// This is where the GCE metadata server normally resides. We hardcode the
// "instance" path as well since it's the only one we access here.
const DEFAULT_GCE_URL = "http://169.254.169.254/computeMetadata/v1/instance/"
type GCEMetadataNetworkInterface struct {
AccessConfigs []struct {
ExternalIp string
Type string
}
ForwardedIps []string
Ip string
Network string
}
type ReqError struct {
StatusCode int
}
func (e ReqError) Error() string {
return http.StatusText(e.StatusCode)
}
func lastToken(s string) string {
index := strings.LastIndex(s, "/")
return s[index+1:]
}
// EnvGCEFingerprint is used to fingerprint GCE metadata
type EnvGCEFingerprint struct {
StaticFingerprinter
client *http.Client
logger *log.Logger
metadataURL string
}
// NewEnvGCEFingerprint is used to create a fingerprint from GCE metadata
func NewEnvGCEFingerprint(logger *log.Logger) Fingerprint {
// Read the internal metadata URL from the environment, allowing test files to
// provide their own<|fim▁hole|> if metadataURL == "" {
metadataURL = DEFAULT_GCE_URL
}
// assume 2 seconds is enough time for inside GCE network
client := &http.Client{
Timeout: 2 * time.Second,
Transport: cleanhttp.DefaultTransport(),
}
return &EnvGCEFingerprint{
client: client,
logger: logger,
metadataURL: metadataURL,
}
}
func (f *EnvGCEFingerprint) Get(attribute string, recursive bool) (string, error) {
reqUrl := f.metadataURL + attribute
if recursive {
reqUrl = reqUrl + "?recursive=true"
}
parsedUrl, err := url.Parse(reqUrl)
if err != nil {
return "", err
}
req := &http.Request{
Method: "GET",
URL: parsedUrl,
Header: http.Header{
"Metadata-Flavor": []string{"Google"},
},
}
res, err := f.client.Do(req)
if err != nil {
return "", err
}
resp, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
f.logger.Printf("[ERR]: fingerprint.env_gce: Error reading response body for GCE %s", attribute)
return "", err
}
if res.StatusCode >= 400 {
return "", ReqError{res.StatusCode}
}
return string(resp), nil
}
func checkError(err error, logger *log.Logger, desc string) error {
// If it's a URL error, assume we're not actually in an GCE environment.
// To the outer layers, this isn't an error so return nil.
if _, ok := err.(*url.Error); ok {
logger.Printf("[ERR] fingerprint.env_gce: Error querying GCE " + desc + ", skipping")
return nil
}
// Otherwise pass the error through.
return err
}
func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
if !f.isGCE() {
return false, nil
}
if node.Links == nil {
node.Links = make(map[string]string)
}
keys := []string{
"hostname",
"id",
"cpu-platform",
"scheduling/automatic-restart",
"scheduling/on-host-maintenance",
}
for _, k := range keys {
value, err := f.Get(k, false)
if err != nil {
return false, checkError(err, f.logger, k)
}
// assume we want blank entries
key := strings.Replace(k, "/", ".", -1)
node.Attributes["platform.gce."+key] = strings.Trim(string(value), "\n")
}
// These keys need everything before the final slash removed to be usable.
keys = []string{
"machine-type",
"zone",
}
for _, k := range keys {
value, err := f.Get(k, false)
if err != nil {
return false, checkError(err, f.logger, k)
}
node.Attributes["platform.gce."+k] = strings.Trim(lastToken(value), "\n")
}
// Get internal and external IPs (if they exist)
value, err := f.Get("network-interfaces/", true)
var interfaces []GCEMetadataNetworkInterface
if err := json.Unmarshal([]byte(value), &interfaces); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding network interface information: %s", err.Error())
}
for _, intf := range interfaces {
prefix := "platform.gce.network." + lastToken(intf.Network)
node.Attributes[prefix] = "true"
node.Attributes[prefix+".ip"] = strings.Trim(intf.Ip, "\n")
for index, accessConfig := range intf.AccessConfigs {
node.Attributes[prefix+".external-ip."+strconv.Itoa(index)] = accessConfig.ExternalIp
}
}
var tagList []string
value, err = f.Get("tags", false)
if err != nil {
return false, checkError(err, f.logger, "tags")
}
if err := json.Unmarshal([]byte(value), &tagList); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding instance tags: %s", err.Error())
}
for _, tag := range tagList {
node.Attributes["platform.gce.tag."+tag] = "true"
}
var attrDict map[string]string
value, err = f.Get("attributes/", true)
if err != nil {
return false, checkError(err, f.logger, "attributes/")
}
if err := json.Unmarshal([]byte(value), &attrDict); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding instance attributes: %s", err.Error())
}
for k, v := range attrDict {
node.Attributes["platform.gce.attr."+k] = strings.Trim(v, "\n")
}
// populate Links
node.Links["gce"] = node.Attributes["platform.gce.id"]
return true, nil
}
func (f *EnvGCEFingerprint) isGCE() bool {
// TODO: better way to detect GCE?
// Query the metadata url for the machine type, to verify we're on GCE
machineType, err := f.Get("machine-type", false)
if err != nil {
if re, ok := err.(ReqError); !ok || re.StatusCode != 404 {
// If it wasn't a 404 error, print an error message.
f.logger.Printf("[ERR] fingerprint.env_gce: Error querying GCE Metadata URL, skipping")
}
return false
}
match, err := regexp.MatchString("projects/.+/machineTypes/.+", machineType)
if !match {
return false
}
return true
}<|fim▁end|>
|
metadataURL := os.Getenv("GCE_ENV_URL")
|
<|file_name|>test_mixins.py<|end_file_name|><|fim▁begin|># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.exceptions import PermissionDenied
from mock import Mock, patch
from nose.tools import eq_, raises
from oneanddone.base.tests import TestCase
from oneanddone.users.mixins import BaseUserProfileRequiredMixin, MyStaffUserRequiredMixin
from oneanddone.users.tests import UserFactory, UserProfileFactory
class FakeMixin(object):
def dispatch(self, request, *args, **kwargs):
return 'fakemixin'
class FakeView(BaseUserProfileRequiredMixin, FakeMixin):
pass
class FakeViewNeedsStaff(MyStaffUserRequiredMixin, FakeMixin):
pass
class MyStaffUserRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeViewNeedsStaff()
def test_is_staff(self):
"""
If the user is staff, call the parent class's
dispatch method.
"""
request = Mock()
request.user = UserFactory.create(is_staff=True)
eq_(self.view.dispatch(request), 'fakemixin')
@raises(PermissionDenied)
def test_not_staff(self):
"""
If the user is not staff, raise a PermissionDenied exception.
"""
request = Mock()
request.user = UserFactory.create(is_staff=False)
self.view.dispatch(request)
class UserProfileRequiredMixinTests(TestCase):
def setUp(self):
self.view = FakeView()
def test_has_profile(self):
"""
If the user has created a profile, and has accepted privacy policy
call the parent class's dispatch method.
"""
request = Mock()
request.user = UserProfileFactory.create(privacy_policy_accepted=True).user
eq_(self.view.dispatch(request), 'fakemixin')
def test_no_profile(self):
"""
If the user hasn't created a profile, redirect them to the
profile creation view.
"""
request = Mock()
request.user = UserFactory.create()<|fim▁hole|> eq_(self.view.dispatch(request), redirect.return_value)
redirect.assert_called_with('users.profile.create')<|fim▁end|>
|
with patch('oneanddone.users.mixins.redirect') as redirect:
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from PySide2.QtWidgets import QApplication
<|fim▁hole|><p>This plugin allows to create sets of words that can be matched with the
attributes of the project's variants.</p>
<p>
Once the addition of a word set is started, a manual addition one by one of the
words is possible; for practical reasons it is however advisable to directly
import a text file containing merely 1 word per line.</p>
The set can be reworked at any time via an editor.<br>
<br>
<i>Example of use:</i><br>
<br>
<i>A user wishes to quickly filter all variants of a project related to a set of
relevant genes for him.
He therefore creates a word set and then makes a selection via:</i>
<ul>
<li>the <em>Filters Editor</em> plugin with a filter of the type:
<pre>gene IN ('WORDSET', 'my_word_set')</pre></li>
<li>the <em>VQL Editor</em> plugin with a VQL request of the type:
<pre>SELECT chr,pos,ref,alt,gene FROM variants WHERE gene IN WORDSET['my_word_set']</pre></li>
</ul>
"""
__author__ = "Sacha schutz"
__version__ = "1.0.0"<|fim▁end|>
|
__title__ = "Wordsets editor"
__description__ = "A plugin to manage word sets"
__long_description__ = """
|
<|file_name|>serviceworkerglobalscope.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use devtools;
use devtools_traits::DevtoolScriptControlMsg;
use dom::abstractworker::WorkerScriptMsg;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding;
use dom::bindings::codegen::Bindings::ServiceWorkerGlobalScopeBinding::ServiceWorkerGlobalScopeMethods;
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootCollection};
use dom::bindings::reflector::Reflectable;
use dom::bindings::str::DOMString;
use dom::event::Event;
use dom::eventtarget::EventTarget;
use dom::extendableevent::ExtendableEvent;
use dom::extendablemessageevent::ExtendableMessageEvent;
use dom::globalscope::GlobalScope;
use dom::workerglobalscope::WorkerGlobalScope;
use ipc_channel::ipc::{self, IpcSender, IpcReceiver};
use ipc_channel::router::ROUTER;
use js::jsapi::{JS_SetInterruptCallback, JSAutoCompartment, JSContext};
use js::jsval::UndefinedValue;
use js::rust::Runtime;
use net_traits::{load_whole_resource, IpcSend, CustomResponseMediator};
use net_traits::request::{CredentialsMode, Destination, RequestInit, Type as RequestType};
use rand::random;
use script_runtime::{CommonScriptMsg, StackRootTLS, get_reports, new_rt_and_cx, ScriptChan};
use script_traits::{TimerEvent, WorkerGlobalScopeInit, ScopeThings, ServiceWorkerMsg, WorkerScriptLoadOrigin};
use servo_url::ServoUrl;
use std::sync::mpsc::{Receiver, RecvError, Select, Sender, channel};
use std::thread;
use std::time::Duration;
use style::thread_state::{self, IN_WORKER, SCRIPT};
use util::prefs::PREFS;
use util::thread::spawn_named;
/// Messages used to control service worker event loop
pub enum ServiceWorkerScriptMsg {
/// Message common to all workers
CommonWorker(WorkerScriptMsg),
// Message to request a custom response by the service worker
Response(CustomResponseMediator)
}
pub enum MixedMessage {
FromServiceWorker(ServiceWorkerScriptMsg),
FromDevtools(DevtoolScriptControlMsg),
FromTimeoutThread(())
}
#[derive(JSTraceable, Clone)]
pub struct ServiceWorkerChan {
pub sender: Sender<ServiceWorkerScriptMsg>
}
impl ScriptChan for ServiceWorkerChan {
fn send(&self, msg: CommonScriptMsg) -> Result<(), ()> {
self.sender
.send(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)))
.map_err(|_| ())
}
fn clone(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.sender.clone(),
}
}
}
#[dom_struct]
pub struct ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope,
#[ignore_heap_size_of = "Defined in std"]
receiver: Receiver<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
own_sender: Sender<ServiceWorkerScriptMsg>,
#[ignore_heap_size_of = "Defined in std"]
timer_event_port: Receiver<()>,
#[ignore_heap_size_of = "Defined in std"]
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl,
}
impl ServiceWorkerGlobalScope {
fn new_inherited(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> ServiceWorkerGlobalScope {
ServiceWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(init,
worker_url,
runtime,
from_devtools_receiver,
timer_event_chan,
None),
receiver: receiver,
timer_event_port: timer_event_port,
own_sender: own_sender,
swmanager_sender: swmanager_sender,
scope_url: scope_url
}
}
#[allow(unsafe_code)]
pub fn new(init: WorkerGlobalScopeInit,
worker_url: ServoUrl,
from_devtools_receiver: Receiver<DevtoolScriptControlMsg>,
runtime: Runtime,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
timer_event_chan: IpcSender<TimerEvent>,
timer_event_port: Receiver<()>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl)
-> Root<ServiceWorkerGlobalScope> {
let cx = runtime.cx();
let scope = box ServiceWorkerGlobalScope::new_inherited(init,
worker_url,
from_devtools_receiver,
runtime,
own_sender,
receiver,
timer_event_chan,
timer_event_port,
swmanager_sender,
scope_url);
unsafe {
ServiceWorkerGlobalScopeBinding::Wrap(cx, scope)
}
}
#[allow(unsafe_code)]
pub fn run_serviceworker_scope(scope_things: ScopeThings,
own_sender: Sender<ServiceWorkerScriptMsg>,
receiver: Receiver<ServiceWorkerScriptMsg>,
devtools_receiver: IpcReceiver<DevtoolScriptControlMsg>,
swmanager_sender: IpcSender<ServiceWorkerMsg>,
scope_url: ServoUrl) {
let ScopeThings { script_url,
init,
worker_load_origin,
.. } = scope_things;
let serialized_worker_url = script_url.to_string();
spawn_named(format!("ServiceWorker for {}", serialized_worker_url), move || {
thread_state::initialize(SCRIPT | IN_WORKER);
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
let WorkerScriptLoadOrigin { referrer_url, referrer_policy, pipeline_id } = worker_load_origin;
let request = RequestInit {
url: script_url.clone(),
type_: RequestType::Script,
destination: Destination::ServiceWorker,
credentials_mode: CredentialsMode::Include,
use_url_credentials: true,
origin: script_url,
pipeline_id: pipeline_id,
referrer_url: referrer_url,
referrer_policy: referrer_policy,
.. RequestInit::default()
};
let (url, source) = match load_whole_resource(request,
&init.resource_threads.sender()) {
Err(_) => {
println!("error loading script {}", serialized_worker_url);
return;
}
Ok((metadata, bytes)) => {
(metadata.final_url, String::from_utf8(bytes).unwrap())
}
};
let runtime = unsafe { new_rt_and_cx() };
let (devtools_mpsc_chan, devtools_mpsc_port) = channel();
ROUTER.route_ipc_receiver_to_mpsc_sender(devtools_receiver, devtools_mpsc_chan);
// TODO XXXcreativcoder use this timer_ipc_port, when we have a service worker instance here
let (timer_ipc_chan, _timer_ipc_port) = ipc::channel().unwrap();
let (timer_chan, timer_port) = channel();
let global = ServiceWorkerGlobalScope::new(
init, url, devtools_mpsc_port, runtime,
own_sender, receiver,
timer_ipc_chan, timer_port, swmanager_sender, scope_url);
let scope = global.upcast::<WorkerGlobalScope>();
unsafe {
// Handle interrupt requests
JS_SetInterruptCallback(scope.runtime(), Some(interrupt_callback));
}
scope.execute_script(DOMString::from(source));
// Service workers are time limited
spawn_named("SWTimeoutThread".to_owned(), move || {
let sw_lifetime_timeout = PREFS.get("dom.serviceworker.timeout_seconds").as_u64().unwrap();
thread::sleep(Duration::new(sw_lifetime_timeout, 0));
let _ = timer_chan.send(());
});
global.dispatch_activate();
let reporter_name = format!("service-worker-reporter-{}", random::<u64>());
scope.upcast::<GlobalScope>().mem_profiler_chan().run_with_memory_reporting(|| {
while let Ok(event) = global.receive_event() {
if !global.handle_event(event) {
break;
}
}
}, reporter_name, scope.script_chan(), CommonScriptMsg::CollectReports);
});
}
fn handle_event(&self, event: MixedMessage) -> bool {
match event {
MixedMessage::FromDevtools(msg) => {
match msg {
DevtoolScriptControlMsg::EvaluateJS(_pipe_id, string, sender) =>
devtools::handle_evaluate_js(self.upcast(), string, sender),
DevtoolScriptControlMsg::GetCachedMessages(pipe_id, message_types, sender) =>
devtools::handle_get_cached_messages(pipe_id, message_types, sender),
DevtoolScriptControlMsg::WantsLiveNotifications(_pipe_id, bool_val) =>
devtools::handle_wants_live_notifications(self.upcast(), bool_val),
_ => debug!("got an unusable devtools control message inside the worker!"),
}
true
}
MixedMessage::FromServiceWorker(msg) => {
self.handle_script_event(msg);
true
}
MixedMessage::FromTimeoutThread(_) => {
let _ = self.swmanager_sender.send(ServiceWorkerMsg::Timeout(self.scope_url.clone()));
false
}
}
}
fn handle_script_event(&self, msg: ServiceWorkerScriptMsg) {
use self::ServiceWorkerScriptMsg::*;
match msg {
CommonWorker(WorkerScriptMsg::DOMMessage(data)) => {
let scope = self.upcast::<WorkerGlobalScope>();
let target = self.upcast();
let _ac = JSAutoCompartment::new(scope.get_cx(), scope.reflector().get_jsobject().get());
rooted!(in(scope.get_cx()) let mut message = UndefinedValue());
data.read(scope.upcast(), message.handle_mut());
ExtendableMessageEvent::dispatch_jsval(target, scope.upcast(), message.handle());
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::RunnableMsg(_, runnable))) => {
runnable.handler()
},
CommonWorker(WorkerScriptMsg::Common(CommonScriptMsg::CollectReports(reports_chan))) => {
let scope = self.upcast::<WorkerGlobalScope>();
let cx = scope.get_cx();
let path_seg = format!("url({})", scope.get_url());
let reports = get_reports(cx, path_seg);
reports_chan.send(reports);
},<|fim▁hole|> self.upcast::<EventTarget>().fire_event(atom!("fetch"));
let _ = mediator.response_chan.send(None);
}
}
}
#[allow(unsafe_code)]
fn receive_event(&self) -> Result<MixedMessage, RecvError> {
let scope = self.upcast::<WorkerGlobalScope>();
let worker_port = &self.receiver;
let devtools_port = scope.from_devtools_receiver();
let timer_event_port = &self.timer_event_port;
let sel = Select::new();
let mut worker_handle = sel.handle(worker_port);
let mut devtools_handle = sel.handle(devtools_port);
let mut timer_port_handle = sel.handle(timer_event_port);
unsafe {
worker_handle.add();
if scope.from_devtools_sender().is_some() {
devtools_handle.add();
}
timer_port_handle.add();
}
let ret = sel.wait();
if ret == worker_handle.id() {
Ok(MixedMessage::FromServiceWorker(try!(worker_port.recv())))
}else if ret == devtools_handle.id() {
Ok(MixedMessage::FromDevtools(try!(devtools_port.recv())))
} else if ret == timer_port_handle.id() {
Ok(MixedMessage::FromTimeoutThread(try!(timer_event_port.recv())))
} else {
panic!("unexpected select result!")
}
}
pub fn process_event(&self, msg: CommonScriptMsg) {
self.handle_script_event(ServiceWorkerScriptMsg::CommonWorker(WorkerScriptMsg::Common(msg)));
}
pub fn script_chan(&self) -> Box<ScriptChan + Send> {
box ServiceWorkerChan {
sender: self.own_sender.clone()
}
}
fn dispatch_activate(&self) {
let event = ExtendableEvent::new(self, atom!("activate"), false, false);
let event = (&*event).upcast::<Event>();
self.upcast::<EventTarget>().dispatch_event(event);
}
}
#[allow(unsafe_code)]
unsafe extern "C" fn interrupt_callback(cx: *mut JSContext) -> bool {
let worker =
Root::downcast::<WorkerGlobalScope>(GlobalScope::from_context(cx))
.expect("global is not a worker scope");
assert!(worker.is::<ServiceWorkerGlobalScope>());
// A false response causes the script to terminate
!worker.is_closing()
}
impl ServiceWorkerGlobalScopeMethods for ServiceWorkerGlobalScope {
// https://w3c.github.io/ServiceWorker/#service-worker-global-scope-onmessage-attribute
event_handler!(message, GetOnmessage, SetOnmessage);
}<|fim▁end|>
|
Response(mediator) => {
// TODO XXXcreativcoder This will eventually use a FetchEvent interface to fire event
// when we have the Request and Response dom api's implemented
// https://slightlyoff.github.io/ServiceWorker/spec/service_worker_1/index.html#fetch-event-section
|
<|file_name|>Renderer.ts<|end_file_name|><|fim▁begin|>import AwaitBlock from './handlers/AwaitBlock';
import Comment from './handlers/Comment';
import DebugTag from './handlers/DebugTag';
import EachBlock from './handlers/EachBlock';
import Element from './handlers/Element';
import Head from './handlers/Head';
import HtmlTag from './handlers/HtmlTag';
import IfBlock from './handlers/IfBlock';
import InlineComponent from './handlers/InlineComponent';
import KeyBlock from './handlers/KeyBlock';
import Slot from './handlers/Slot';
import SlotTemplate from './handlers/SlotTemplate';
import Tag from './handlers/Tag';
import Text from './handlers/Text';
import Title from './handlers/Title';
import { AppendTarget, CompileOptions } from '../../interfaces';
import { INode } from '../nodes/interfaces';
import { Expression, TemplateLiteral, Identifier } from 'estree';
import { escape_template } from '../utils/stringify';
type Handler = (node: any, renderer: Renderer, options: CompileOptions) => void;
function noop() {}
const handlers: Record<string, Handler> = {
AwaitBlock,
Body: noop,
Comment,
DebugTag,
EachBlock,
Element,
Head,
IfBlock,
InlineComponent,
KeyBlock,
MustacheTag: Tag, // TODO MustacheTag is an anachronism
Options: noop,
RawMustacheTag: HtmlTag,
Slot,
SlotTemplate,
Text,
Title,
Window: noop
};
export interface RenderOptions extends CompileOptions{
locate: (c: number) => { line: number; column: number };
head_id?: string;
}
<|fim▁hole|>export default class Renderer {
has_bindings = false;
name: Identifier;
stack: Array<{ current: { value: string }; literal: TemplateLiteral }> = [];
current: { value: string }; // TODO can it just be `current: string`?
literal: TemplateLiteral;
targets: AppendTarget[] = [];
constructor({ name }) {
this.name = name;
this.push();
}
add_string(str: string) {
this.current.value += escape_template(str);
}
add_expression(node: Expression) {
this.literal.quasis.push({
type: 'TemplateElement',
value: { raw: this.current.value, cooked: null },
tail: false
});
this.literal.expressions.push(node);
this.current.value = '';
}
push() {
const current = this.current = { value: '' };
const literal = this.literal = {
type: 'TemplateLiteral',
expressions: [],
quasis: []
};
this.stack.push({ current, literal });
}
pop() {
this.literal.quasis.push({
type: 'TemplateElement',
value: { raw: this.current.value, cooked: null },
tail: true
});
const popped = this.stack.pop();
const last = this.stack[this.stack.length - 1];
if (last) {
this.literal = last.literal;
this.current = last.current;
}
return popped.literal;
}
render(nodes: INode[], options: RenderOptions) {
nodes.forEach(node => {
const handler = handlers[node.type];
if (!handler) {
throw new Error(`No handler for '${node.type}' nodes`);
}
handler(node, this, options);
});
}
}<|fim▁end|>
| |
<|file_name|>strcmp.rs<|end_file_name|><|fim▁begin|>#[macro_export]
macro_rules! strcmp_tests {
( $TestRegion:path ) => {
use libc::{c_char, c_int, c_void, strcmp};
use lucet_runtime::vmctx::lucet_vmctx;
use lucet_runtime::{lucet_hostcalls, Error, Limits, Region, Val, WASM_PAGE_SIZE};
use std::ffi::CString;
use std::sync::Arc;
use $TestRegion as TestRegion;
use $crate::build::test_module_c;
lucet_hostcalls! {
#[no_mangle]
pub unsafe extern "C" fn hostcall_host_fault(
&mut _vmctx,
) -> () {
let oob = (-1isize) as *mut c_char;
*oob = 'x' as c_char;
}
}
fn strcmp_compare(s1: &str, s2: &str) {
let s1 = CString::new(s1)
.expect("s1 is a valid CString")
.into_bytes_with_nul();
let s2 = CString::new(s2)
.expect("s2 is a valid CString")
.into_bytes_with_nul();
assert!(s1.len() + s2.len() < WASM_PAGE_SIZE as usize);
<|fim▁hole|> let mut inst = region
.new_instance(module)
.expect("instance can be created");
let newpage_start = inst.grow_memory(1).expect("grow_memory succeeds");
let heap = inst.heap_mut();
let s1_ptr = (newpage_start * WASM_PAGE_SIZE) as usize;
let s2_ptr = s1_ptr + s1.len();
heap[s1_ptr..s2_ptr].copy_from_slice(&s1);
heap[s2_ptr..s2_ptr + s2.len()].copy_from_slice(&s2);
let res = c_int::from(
inst.run(
"run_strcmp",
&[Val::GuestPtr(s1_ptr as u32), Val::GuestPtr(s2_ptr as u32)],
)
.expect("instance runs")
.unwrap_returned(),
);
let host_strcmp_res =
unsafe { strcmp(s1.as_ptr() as *const c_char, s2.as_ptr() as *const c_char) };
assert_eq!(res, host_strcmp_res);
}
#[test]
fn strcmp_abc_abc() {
strcmp_compare("abc", "abc");
}
#[test]
fn strcmp_def_abc() {
strcmp_compare("def", "abc");
}
#[test]
fn strcmp_abcd_abc() {
strcmp_compare("abcd", "abc");
}
#[test]
fn strcmp_abc_abcd() {
strcmp_compare("abc", "abcd");
}
#[test]
fn strcmp_fault_test() {
let module = test_module_c("strcmp", "guest.c").expect("compile module");
let region = TestRegion::create(10, &Limits::default()).expect("region can be created");
let mut inst = region
.new_instance(module)
.expect("instance can be created");
match inst.run("wasm_fault", &[]) {
Err(Error::RuntimeFault { .. }) => (),
res => panic!("unexpected result: {:?}", res),
}
}
};
}<|fim▁end|>
|
let module = test_module_c("strcmp", "guest.c").expect("compile module");
let region = TestRegion::create(10, &Limits::default()).expect("region can be created");
|
<|file_name|>database.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Copyright 2012 Institut für Experimentelle Kernphysik - Karlsruher Institut für Technologie
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software<|fim▁hole|># distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import *
import hf
metadata = MetaData()
engine = None
def connect(implicit_execution=False):
config = dict(hf.config.items("database"))
hf.database.engine = engine_from_config(config, prefix="")
if implicit_execution:
metadata.bind = hf.database.engine
def disconnect():
pass<|fim▁end|>
| |
<|file_name|>cooki.js<|end_file_name|><|fim▁begin|>/*!<|fim▁hole|> *
* Copyright 2014 Contributors
* Released under the MIT license
* https://github.com/Manoz/Cooki/blob/master/LICENSE
*/
/*!
* Your website scripts below
*/<|fim▁end|>
|
* Cooki v1.0.0
* http://k-legrand.fr
|
<|file_name|>wiki_plugin.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from wiki.plugins.macros import settings
from wiki.plugins.macros.mdx.macro import MacroExtension
from wiki.plugins.macros.mdx.toc import WikiTocExtension
# from wiki.plugins.macros.mdx.wikilinks import WikiLinkExtension
class MacroPlugin(BasePlugin):
slug = settings.SLUG
sidebar = {'headline': _('Macros'),
'icon_class': 'fa-play',
'template': 'wiki/plugins/macros/sidebar.html',
'form_class': None,
'get_form_kwargs': (lambda a: {})}
markdown_extensions = [MacroExtension(), WikiTocExtension()]
<|fim▁hole|> pass
registry.register(MacroPlugin)<|fim▁end|>
|
def __init__(self):
|
<|file_name|>21613.js<|end_file_name|><|fim▁begin|>/*
This file is part of the HeavenMS MapleStory Server
Copyleft (L) 2016 - 2019 RonanLana
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation version 3 as published by
the Free Software Foundation. You may not use, modify or distribute
this program under any other version of the GNU Affero General Public
License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
var status = -1;
function start(mode, type, selection) {
if (mode == -1) {
qm.dispose();
} else {
if(mode == 0 && type > 0) {
qm.dispose();
return;
}
if (mode == 1)
status++;
else
status--;
if (status == 0) {
qm.sendNext("We're a pack of wolves looking for our lost child. I hear you are taking care of our baby. We appreciate your kindness, but it's time to return our baby to us.", 9);
} else if (status == 1) {
qm.sendNextPrev("Werewolf is my friend, I can't just hand over a friend.", 3);
} else if (status == 2) {
qm.sendAcceptDecline("We understand, but we won't leave without our pup. Tell you what, we'll test you to see if you are worthy of raising a wolf. #rGet ready to be tested by wolves.#k");
} else if (status == 3) {
var em = qm.getEventManager("Aran_3rdmount");
if (em == null) {
qm.sendOk("Sorry, but the 3rd mount quest (Wolves) is closed.");
return;
}
else {
var em = qm.getEventManager("Aran_3rdmount");
if (!em.startInstance(qm.getPlayer())) {
qm.sendOk("There is currently someone in this map, come back later.");<|fim▁hole|> }
} else if (status == 4) {
qm.dispose();
}
}
}<|fim▁end|>
|
} else {
qm.forceStartQuest();
}
|
<|file_name|>scope.go<|end_file_name|><|fim▁begin|>package scope
import (
"fmt"
"net/url"
"github.com/docker/infrakit/pkg/discovery"
"github.com/docker/infrakit/pkg/discovery/local"
"github.com/docker/infrakit/pkg/plugin"
"github.com/docker/infrakit/pkg/run/scope"
"github.com/docker/infrakit/pkg/spi/controller"
"github.com/docker/infrakit/pkg/spi/flavor"
"github.com/docker/infrakit/pkg/spi/group"
"github.com/docker/infrakit/pkg/spi/instance"
"github.com/docker/infrakit/pkg/spi/loadbalancer"
"github.com/docker/infrakit/pkg/spi/stack"
"github.com/docker/infrakit/pkg/template"
)
// FakeLeader returns a fake leadership func
func FakeLeader(v bool) func() stack.Leadership {
return func() stack.Leadership { return fakeLeaderT(v) }
}
type fakeLeaderT bool
func (f fakeLeaderT) IsLeader() (bool, error) {
return bool(f), nil
}
func (f fakeLeaderT) LeaderLocation() (*url.URL, error) {
return nil, nil
}
type fakePlugins map[string]*plugin.Endpoint
// Find implements discovery.Plugins
func (f fakePlugins) Find(name plugin.Name) (*plugin.Endpoint, error) {
if f == nil {
return nil, fmt.Errorf("not found")
}<|fim▁hole|> if v, has := f[lookup]; has {
return v, nil
}
return nil, fmt.Errorf("not found")
}
// List implements discovery.Plugins
func (f fakePlugins) List() (map[string]*plugin.Endpoint, error) {
return (map[string]*plugin.Endpoint)(f), nil
}
// FakeScope returns a fake Scope with given endpoints
func FakeScope(endpoints map[string]*plugin.Endpoint) scope.Scope {
return scope.DefaultScope(func() discovery.Plugins {
return fakePlugins(endpoints)
})
}
// DefaultScope returns a default scope but customizable for different plugin lookups
func DefaultScope() *Scope {
f, err := local.NewPluginDiscovery()
if err != nil {
panic(err)
}
return &Scope{
Scope: scope.DefaultScope(func() discovery.Plugins { return f }),
}
}
// Scope is the testing scope for looking up components
type Scope struct {
scope.Scope
// ResolvePlugins returns the plugin lookup
ResolvePlugins func() discovery.Plugins
// ResolveStack returns the stack that entails this scope
ResolveStack func(n string) (stack.Interface, error)
// ResolveGroup is for looking up an group plugin
ResolveGroup func(n string) (group.Plugin, error)
// ResolveController returns the controller by name
ResolveController func(n string) (controller.Controller, error)
// ResolveInstance is for looking up an instance plugin
ResolveInstance func(n string) (instance.Plugin, error)
// ResolveFlavor is for lookup up a flavor plugin
ResolveFlavor func(n string) (flavor.Plugin, error)
// ResolveL4 is for lookup up an L4 plugin
ResolveL4 func(n string) (loadbalancer.L4, error)
// ResolveMetadata is for resolving metadata / path related queries
ResolveMetadata func(p string) (*scope.MetadataCall, error)
// ResolveTemplateEngine creates a template engine for use.
ResolveTemplateEngine func(url string, opts template.Options) (*template.Template, error)
}
// Plugins returns the plugin lookup
func (s *Scope) Plugins() discovery.Plugins {
if s.ResolvePlugins != nil {
return s.ResolvePlugins()
}
return s.Scope.Plugins()
}
// Stack returns the stack that entails this scope
func (s *Scope) Stack(name string) (stack.Interface, error) {
if s.ResolveStack != nil {
return s.ResolveStack(name)
}
return s.Scope.Stack(name)
}
// Group is for looking up an group plugin
func (s *Scope) Group(name string) (group.Plugin, error) {
if s.ResolveGroup != nil {
return s.ResolveGroup(name)
}
return s.Scope.Group(name)
}
// Controller returns the controller by name
func (s *Scope) Controller(name string) (controller.Controller, error) {
if s.ResolveController != nil {
return s.ResolveController(name)
}
return s.Scope.Controller(name)
}
// Instance is for looking up an instance plugin
func (s *Scope) Instance(name string) (instance.Plugin, error) {
if s.ResolveInstance != nil {
return s.ResolveInstance(name)
}
return s.Scope.Instance(name)
}
// Flavor is for lookup up a flavor plugin
func (s *Scope) Flavor(name string) (flavor.Plugin, error) {
if s.ResolveFlavor != nil {
return s.ResolveFlavor(name)
}
return s.Scope.Flavor(name)
}
// L4 is for lookup up an L4 plugin
func (s *Scope) L4(name string) (loadbalancer.L4, error) {
if s.ResolveL4 != nil {
return s.ResolveL4(name)
}
return s.Scope.L4(name)
}
// Metadata is for resolving metadata / path related queries
func (s *Scope) Metadata(path string) (*scope.MetadataCall, error) {
if s.ResolveMetadata != nil {
return s.ResolveMetadata(path)
}
return s.Scope.Metadata(path)
}
// TemplateEngine creates a template engine for use.
func (s *Scope) TemplateEngine(url string, opts template.Options) (*template.Template, error) {
if s.ResolveTemplateEngine != nil {
return s.ResolveTemplateEngine(url, opts)
}
return s.Scope.TemplateEngine(url, opts)
}<|fim▁end|>
|
lookup, _ := name.GetLookupAndType()
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors,
// refer to the AUTHORS file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//<|fim▁hole|>// limitations under the License.
extern mod glfw;
use std::libc;
use std::unstable::finally::Finally;
#[start]
fn start(argc: int, argv: **u8, crate_map: *u8) -> int {
// GLFW must run on the main platform thread
std::rt::start_on_main_thread(argc, argv, crate_map, main)
}
fn main() {
glfw::set_error_callback(error_callback);
if glfw::init().is_err() {
fail!(~"Failed to initialize GLFW");
} else {
(||{
let window = glfw::Window::create(300, 300, "Hello this is window", glfw::Windowed).unwrap();
window.set_key_callback(key_callback);
window.make_context_current();
while !window.should_close() {
window.poll_events();
glfw::poll_events();
}
// Use `finally` to ensure that `glfw::terminate` is called even if a failure occurs
}).finally(glfw::terminate);
}
}
fn key_callback(window: &glfw::Window, key: libc::c_int, _: libc::c_int, action: libc::c_int, _: glfw::KeyMods) {
if action == glfw::PRESS && key == glfw::KEY_ESCAPE {
window.set_should_close(true);
}
}
fn error_callback(_: libc::c_int, description: ~str) {
println(fmt!("GLFW Error: %s", description));
}<|fim▁end|>
|
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
|
<|file_name|>onp_roiswglm.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
# Plugins
Plugins allow flexible modification and execution of OpenNFT without touching the core codebase. Plugins can access data, process them in a specific way,
and they can be switched on and off according to the user's need.
Each plugin has to be a subclass of *Process class specified in pyniexp.mlplugins. It has to contain a header in a format of dictionary (called META) with prespecified keys:
- plugin_name: It is a freeform text which will be displayed in the plugin dialog and in the logs.
- plugin_time: It is a event timestamp as specified in opennft.eventrecorder. Times, and it determines the execution time of the plugin (so far only t3 is implemented)
- plugin_init: It is the initialization code of the plugin. "{}" can be used to refer to OpenNFT parameters as specified in the P parameter dictionary. It can be a list of
commands, in which case, the first is run to create the object, and the rest are executed afterwards.
- plugin_signal: It is an expression returning to logical value, and it speicies the condition when the plugin can be executed.<|fim▁hole|>
*Process classes pyniexp.mlplugins has an abstract/placeholder method called process, which should be overwritten to specify the operation on the data.
- the input to the process method of dataProcess (called data) is a one-dimensional numpy array
- the input to the process method of imageProcess (called image) is a multi-dimensional (usually 3D) numpy array as specified during initialization
# ROI step-wise GLM
This plugin demonstrates how to add you own approach (this one is a step-wise addition of each block) for ROI analysis.
__________________________________________________________________________
Copyright (C) 2016-2021 OpenNFT.org
Written by Tibor Auer
""" # noqa: E501
from pyniexp.mlplugins import dataProcess
from loguru import logger
from multiprocessing import Value, RawArray
from numpy import array, meshgrid, savetxt
import matplotlib.pyplot as plt
from os import path
META = {
"plugin_name": "ROI step-wise GLM",
"plugin_time": "t4", # according to opennft.eventrecorder.Times
"plugin_init": [
"ROIswGLM(int({NrROIs}),len({ProtNF}),r'{nfbDataFolder}')",
"self.parent.eng.evalin('base','onp_roiswglm')"
],
"plugin_signal": "self.parent.eng.evalin('base','isfield(mainLoopData,\\\'tmp_rawTimeSeriesAR1\\\')')",
"plugin_exec": "load_data(self.parent.eng.evalin('base','onp_roiswglm'))",
}
class ROIswGLM(dataProcess):
def __init__(self, nROIs, nBlocks, nfbDataFolder):
super().__init__(nROIs*nBlocks, autostart=False)
self.nfbDataFolder = nfbDataFolder
self.nROIs = nROIs
self.nBlocks = nBlocks
self.rtdata = RawArray('d', [0]*self.nROIs*self.nBlocks*self.nBlocks)
self.nData = Value('i', 0)
self.start_process()
def process(self, data):
if any(array(data) != 0):
for r in data:
self.rtdata[self.nData.value] = r
self.nData.value += 1
logger.info(('ROIs: [ ' + '{:.3f} '*len(data) + ']').format(*data))
def finalize_process(self):
dat = array(self.rtdata).reshape(self.nBlocks, self.nROIs, self.nBlocks)
for b in range(0, self.nBlocks):
fname = path.join(path.normpath(self.nfbDataFolder), 'ROIswGLM_{:02d}.txt'.format(b+1))
savetxt(fname=fname, X=dat[b,:,0:b+1].transpose(), fmt='%.3f', delimiter=',')
X, Y = meshgrid(self.nBlocks, self.nBlocks)
for r in range(0, self.nROIs):
ax = plt.subplot(120+(r+1), projection='3d')
ax.plot_surface(X, Y, dat[:,r,:])
plt.show()<|fim▁end|>
|
- plugin_exec: It is the execution code of the plugin, and it is usually calls the plugin's load_data method to transfer some data to the plugin.
|
<|file_name|>itunes_item_extension_builder.rs<|end_file_name|><|fim▁begin|>// This file is part of feed.
//<|fim▁hole|>// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation; either version 3 of the License, or
// (at your option) any later version.
//! The fields can be set for itunes category by using the methods under
//! `ITunesItemExtensionBuilder`.
use extension::itunes::ITunesItemExtensionBuilder;
use rss::extension::itunes::ITunesItemExtension;
impl ITunesItemExtensionBuilder
{
/// Construct a new `ITunesItemExtensionBuilder` and return default values.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let item_builder = ITunesItemExtensionBuilder::new();
/// ```
pub fn new() -> ITunesItemExtensionBuilder
{
ITunesItemExtensionBuilder::default()
}
/// Set the optional author that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.author(Some("author".to_owned()));
/// ```
pub fn author(&mut self, author: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.author = author;
self
}
/// Set the optional block that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.block(Some("block".to_owned()));
/// ```
pub fn block(&mut self, block: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.block = block;
self
}
/// Set the optional image that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.image(Some("image".to_owned()));
/// ```
pub fn image(&mut self, image: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.image = image;
self
}
/// Set the optional duration that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.duration(Some("duration".to_owned()));
/// ```
pub fn duration(&mut self, duration: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.duration = duration;
self
}
/// Set the optional explicit that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.explicit(Some("explicit".to_owned()));
/// ```
pub fn explicit(&mut self, explicit: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.explicit = explicit;
self
}
/// Set the optional closed_captioned that exists under
/// `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.closed_captioned(Some("closed_captioned".to_owned()));
/// ```
pub fn closed_captioned(&mut self, closed_captioned: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.closed_captioned = closed_captioned;
self
}
/// Set the optional order that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.order(Some("order".to_owned()));
/// ```
pub fn order(&mut self, order: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.order = order;
self
}
/// Set the optional subtitle that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.subtitle(Some("subtitle".to_owned()));
/// ```
pub fn subtitle(&mut self, subtitle: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.subtitle = subtitle;
self
}
/// Set the optional summary that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.summary(Some("summary".to_owned()));
/// ```
pub fn summary(&mut self, summary: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.summary = summary;
self
}
/// Set the optional keywords that exists under `ITunesItemExtension`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let mut item_builder = ITunesItemExtensionBuilder::new();
/// item_builder.keywords(Some("keywords".to_owned()));
/// ```
pub fn keywords(&mut self, keywords: Option<String>) -> &mut ITunesItemExtensionBuilder
{
self.keywords = keywords;
self
}
/// Construct the `ITunesItemExtension` from the
/// `ITunesItemExtensionBuilder`.
///
/// # Examples
///
/// ```
/// use feed::extension::itunes::ITunesItemExtensionBuilder;
///
/// let item = ITunesItemExtensionBuilder::new()
/// .author(Some("author".to_owned()))
/// .block(Some("block".to_owned()))
/// .image(Some("image".to_owned()))
/// .duration(Some("duration".to_owned()))
/// .explicit(Some("explicit".to_owned()))
/// .closed_captioned(Some("closed_captioned".to_owned()))
/// .order(Some("order".to_owned()))
/// .subtitle(Some("subtitle".to_owned()))
/// .summary(Some("summary".to_owned()))
/// .keywords(Some("keywords".to_owned()))
/// .finalize()
/// .unwrap();
/// ```
pub fn finalize(&self) -> Result<ITunesItemExtension, String>
{
Ok(ITunesItemExtension {
author: self.author.clone(),
block: self.block.clone(),
image: self.image.clone(),
duration: self.duration.clone(),
explicit: self.explicit.clone(),
closed_captioned: self.closed_captioned.clone(),
order: self.order.clone(),
subtitle: self.subtitle.clone(),
summary: self.summary.clone(),
keywords: self.keywords.clone(),
})
}
}<|fim▁end|>
|
// Copyright © 2015-2017 Chris Palmer <[email protected]>
//
// This program is free software; you can redistribute it and/or modify
|
<|file_name|>0012_auto__chg_field_conclusion_title__chg_field_answer_title__chg_field_qu.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Conclusion.title'
db.alter_column(u'itest_conclusion', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Answer.title'
db.alter_column(u'itest_answer', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Question.title'
db.alter_column(u'itest_question', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Test.title'
db.alter_column(u'itest_test', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
def backwards(self, orm):
# Changing field 'Conclusion.title'
db.alter_column(u'itest_conclusion', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=250))
# Changing field 'Answer.title'
db.alter_column(u'itest_answer', 'title', self.gf('django.db.models.fields.CharField')(default=1, max_length=250))
# Changing field 'Question.title'
db.alter_column(u'itest_question', 'title', self.gf('django.db.models.fields.CharField')(default=1, max_length=250))
# Changing field 'Test.title'
db.alter_column(u'itest_test', 'title', self.gf('django.db.models.fields.CharField')(default='a', max_length=250))
models = {
'itest.answer': {
'Meta': {'object_name': 'Answer'},
'conclusion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['itest.Conclusion']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jump': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['itest.Question']"}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['itest.Question']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.conclusion': {
'Meta': {'ordering': "['num']", 'object_name': 'Conclusion'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conclusions'", 'to': "orm['itest.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},<|fim▁hole|> 'itest.question': {
'Meta': {'ordering': "['num']", 'object_name': 'Question'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['itest.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '35'})
},
'itest.test': {
'Meta': {'ordering': "['num']", 'object_name': 'Test'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tests'", 'symmetrical': 'False', 'to': "orm['itest.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['itest']<|fim▁end|>
| |
<|file_name|>apigenrole.py<|end_file_name|><|fim▁begin|>from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
# I cant figure out how the hell to import this so I'm just gonna forget it for now
def apigen_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to API Docs page.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
try:
class_name = text.replace('\\', '.')
if text[0:1] == '.':
class_name = class_name[1:]
if class_name == "":
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'Class name must be a valid fully qualified class name; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_link_node(rawtext, app, 'class', class_name, options)
return [node], []
def make_link_node(rawtext, app, type, slug, options):
"""Create a link to an ApiGen API docs page.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param type: Item type (class, namespace, etc.)
:param slug: ID of the thing to link to
:param options: Options dictionary passed to role func.
"""
#
try:
base = app.config.apigen_docs_uri
if not base:
raise AttributeError
except AttributeError, err:
raise ValueError('apigen_docs_uri configuration value is not set (%s)' % str(err))
# Build API docs link
slash = '/' if base[-1] != '/' else ''
ref = base + slash + type + '-' + slug + '.html'
set_classes(options)
node = nodes.reference(rawtext, type + ' ' + utils.unescape(slug), refuri=ref,
**options)
return node
def setup(app):
"""Install the plugin.<|fim▁hole|> """
app.info('Initializing Api Class plugin')
app.add_role('apiclass', apigen_role)
# app.add_role('apins', apigen_namespace_role)
app.add_config_value('apigen_docs_uri', None, 'env')
return<|fim▁end|>
|
:param app: Sphinx application context.
|
<|file_name|>nurseconnect_tags.py<|end_file_name|><|fim▁begin|>import calendar
from django.template import Library
from molo.core.templatetags.core_tags import load_sections
from molo.profiles.models import UserProfilesSettings
from nurseconnect.utils import get_survey_results_for_user
register = Library()
@register.filter('fieldtype')
def fieldtype(field):
return field.field.widget.__class__.__name__
@register.inclusion_tag("core/tags/footerlink.html", takes_context=True)
def footer_link(context, id):
request = context["request"]
locale = context.get("locale_code")
terms = UserProfilesSettings.for_site(request.site).terms_and_conditions
return {
"id": id,
"terms": terms,
"request": context["request"],
"locale_code": locale,
}
@register.inclusion_tag(
"core/tags/section_listing_menu.html",
takes_context=True
)
def section_listing_menu(context):
locale_code = context.get("locale_code")
return {
"sections": load_sections(context),
"request": context["request"],
"locale_code": locale_code,
}
@register.assignment_tag()
def convert_month(value):
if value:
return calendar.month_name[value]
else:
return ""
@register.assignment_tag()
def get_next_article(page):
if page.get_next_sibling():
return page.get_next_sibling().specific
return None
@register.inclusion_tag("surveys/embedded_survey.html",
takes_context=True)
def embedded_survey_tag(context, page):
'''
Display the child survey of a page
If a user has not submitted they will see the survey form<|fim▁hole|>
NOTE: This currently only works for Radio Buttons with True/False
and uses a hack where data stored in the survey thank you text will
store true, false string values seperated by commas. I apologise
if you are responsible for maintaining this in the future.
'''
user = context['request'].user
survey = page.get_children().first().specific
survey_results = get_survey_results_for_user(survey, user)
if survey_results:
if page.get_next_sibling():
next_article = page.get_next_sibling().specific
else:
next_article = None
return {
"survey_answered": True,
"answers": survey_results,
"next_article": next_article,
}
else:
return {
"survey_answered": False,
"survey": survey
}<|fim▁end|>
|
If a user has already submitted an answer they see their results
|
<|file_name|>317ce6b677c486cd0ff0fa900186c6309aa26ae3.js<|end_file_name|><|fim▁begin|>var Todo = React.createClass({displayName: "Todo",
getInitialState: function() {
this.text = "";
return {text: ""};
},
componentWillUnmount: function() {
this.ref.off();
},
componentWillMount: function() {
this.ref = new Firebase("https://glaring-fire-5349.firebaseio.com/react_todos/" + this.props.todoKey);
// Update the todo's text when it changes.
this.ref.on("value", function(snap) {
if (snap.val() !== null) {
this.text = snap.val().text;
this.setState({
text: this.text
});
} else {
this.ref.update({
text: ""
});
}
}.bind(this));
},
onTextBlur: function(event) {
this.ref.update({
text: $(event.target).text()
});
},
render: function() {
return (
React.createElement("li", {id: this.props.todoKey, className: "list-group-item todo"},
React.createElement("a", {href: "#", className: "pull-left todo-check"},
React.createElement("span", {
className: "todo-check-mark glyphicon glyphicon-ok",
"aria-hidden": "true"}
)
),
React.createElement("span", {
onBlur: this.onTextBlur,
contentEditable: "true",
"data-ph": "Todo",
className: "todo-text"},
this.state.text
)
)
);
}
});
var TodoList = React.createClass({displayName: "TodoList",
getInitialState: function() {
this.todos = [];
return {todos: []};
},
componentWillMount: function() {
this.ref = new Firebase("https://glaring-fire-5349.firebaseio.com/react_todos/");
// Add an empty todo if none currently exist.
this.ref.on("value", function(snap) {
if (snap.val() === null) {
this.ref.push({
text: "",
checked: false,
});
}
}.bind(this));
// Add an added child to this.todos.
this.ref.on("child_added", function(childSnap) {
this.todos.push({
k: childSnap.key(),
val: childSnap.val()
});
this.setState({
todos: this.todos
});
}.bind(this));
this.ref.on("child_removed", function(childSnap) {
var key = childSnap.key();
var i;
for (i = 0; i < this.todos.length; i++) {
if (this.todos[i].k == key) {
break;
}
}
this.todos.splice(i, 1);
this.setState({
todos: this.todos
});
}.bind(this));
},
componentWillUnmount: function() {
this.ref.off();
},
render: function() {
var todos = this.state.todos.map(function (todo) {
return (
React.createElement(Todo, {todoKey: todo.k})
);
});
return (
React.createElement("div", null,
React.createElement("h1", {id: "list_title"}, this.props.title),
React.createElement("ul", {id: "todo-list", className: "list-group"},
todos
)
)
);
}
});
var ListPage = React.createClass({displayName: "ListPage",
render: function() {
return (
React.createElement("div", null,
React.createElement("div", {id: "list_page"},
React.createElement("a", {
onClick: this.props.app.navOnClick({page: "LISTS"}),
href: "/#/lists",
id: "lists_link",
className: "btn btn-primary"},
"Back to Lists"
)
),
React.createElement("div", {className: "page-header"},
this.props.children
)
)
);
}
});
var Nav = React.createClass({displayName: "Nav",
render: function() {
return (
React.createElement("nav", {className: "navbar navbar-default navbar-static-top"},
React.createElement("div", {className: "container"},
React.createElement("div", {className: "navbar-header"},
React.createElement("a", {onClick: this.props.app.navOnClick({page: "LISTS"}), className: "navbar-brand", href: "?"}, "Firebase Todo")
),
React.createElement("ul", {className: "nav navbar-nav"}, <|fim▁hole|> )
);
},
});
var App = React.createClass({displayName: "App",
getInitialState: function() {
var state = this.getState();
this.setHistory(state, true);
return this.getState();
},
setHistory: function(state, replace) {
var histFunc = replace ?
history.replaceState.bind(history) :
history.pushState.bind(history);
if (state.page === "LIST") {
histFunc(state, "", "#/list/" + state.todoListKey);
} else if (state.page === "LISTS") {
histFunc(state, "", "#/lists");
} else {
console.log("Unknown page: " + state.page);
}
},
getState: function() {
var url = document.location.toString();
if (url.match(/#/)) {
var path = url.split("#")[1];
var res = path.match(/\/list\/([^\/]*)$/);
if (res) {
return {
page: "LIST",
todoListKey: res[1],
};
}
res = path.match(/lists$/);
if (res) {
return {
page: "LISTS"
}
}
}
return {
page: "LISTS"
}
},
componentWillMount: function() {
// Register history listeners.
window.onpopstate = function(event) {
this.setState(event.state);
};
},
navOnClick: function(state) {
return function(event) {
this.setHistory(state, false);
this.setState(state);
event.preventDefault();
}.bind(this);
},
getPage: function() {
if (this.state.page === "LIST") {
return (
React.createElement(ListPage, {app: this},
React.createElement(TodoList, {todoListKey: this.state.todoListKey})
)
);
} else if (this.state.page === "LISTS") {
return (
React.createElement("a", {onClick: this.navOnClick({page: "LIST", todoListKey: "-JjcFYgp1LyD5oDNNSe2"}), href: "/#/list/-JjcFYgp1LyD5oDNNSe2"}, "hi")
);
} else {
console.log("Unknown page: " + this.state.page);
}
},
render: function() {
return (
React.createElement("div", null,
React.createElement(Nav, {app: this}),
React.createElement("div", {className: "container", role: "main"},
this.getPage()
)
)
);
}
});
React.render(
React.createElement(App, null),
document.getElementById('content')
);<|fim▁end|>
|
React.createElement("li", null, React.createElement("a", {href: "?"}, "Lists"))
)
)
|
<|file_name|>plugin-flattener.ts<|end_file_name|><|fim▁begin|>/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { codeModelSchema, CodeModel } from "@autorest/codemodel";
import { AutorestExtensionHost, startSession } from "@autorest/extension-base";
import { serialize } from "@azure-tools/codegen";
import { Example } from "./example";
export async function processRequest(host: AutorestExtensionHost) {
const debug = (await host.getValue("debug")) || false;
try {
const session = await startSession<CodeModel>(host, codeModelSchema);
// process
const plugin = new Example(session);
// go!
const result = plugin.process();
// output the model to the pipeline
host.writeFile({
filename: "code-model-v4.yaml",
content: serialize(result, codeModelSchema),
artifactType: "code-model-v4",
});
host.writeFile({<|fim▁hole|> artifactType: "code-model-v4-no-tags",
});
} catch (error: any) {
if (debug) {
// eslint-disable-next-line no-console
console.error(`${__filename} - FAILURE ${JSON.stringify(error)} ${error.stack}`);
}
throw error;
}
}<|fim▁end|>
|
filename: "code-model-v4-no-tags.yaml",
content: serialize(result),
|
<|file_name|>wsgi.py<|end_file_name|><|fim▁begin|>"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os, sys
from django.core.wsgi import get_wsgi_application
<|fim▁hole|>
application = get_wsgi_application()<|fim▁end|>
|
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evewspace.settings")
|
<|file_name|>bigint.js<|end_file_name|><|fim▁begin|>// Copyright (C) 2017 Josh Wolfe. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
description: Bitwise AND for BigInt values
esid: sec-bitwise-op
info: |
BitwiseOp(op, x, y)
1. Let result be 0.
2. Let shift be 0.
3. Repeat, until (x = 0 or x = -1) and (y = 0 or y = -1),
a. Let xDigit be x modulo 2.
b. Let yDigit be y modulo 2.
c. Let result be result + 2**shift * op(xDigit, yDigit)
d. Let shift be shift + 1.
e. Let x be (x - xDigit) / 2.
f. Let y be (y - yDigit) / 2.
4. If op(x modulo 2, y modulo 2) ≠ 0,
a. Let result be result - 2**shift. NOTE: This extends the sign.
5. Return result.
features: [BigInt]
---*/
assert.sameValue(0b00n & 0b00n, 0b00n, "0b00n & 0b00n === 0b00n");
assert.sameValue(0b00n & 0b01n, 0b00n, "0b00n & 0b01n === 0b00n");<|fim▁hole|>assert.sameValue(0b10n & 0b00n, 0b00n, "0b10n & 0b00n === 0b00n");
assert.sameValue(0b00n & 0b11n, 0b00n, "0b00n & 0b11n === 0b00n");
assert.sameValue(0b11n & 0b00n, 0b00n, "0b11n & 0b00n === 0b00n");
assert.sameValue(0b01n & 0b01n, 0b01n, "0b01n & 0b01n === 0b01n");
assert.sameValue(0b01n & 0b10n, 0b00n, "0b01n & 0b10n === 0b00n");
assert.sameValue(0b10n & 0b01n, 0b00n, "0b10n & 0b01n === 0b00n");
assert.sameValue(0b01n & 0b11n, 0b01n, "0b01n & 0b11n === 0b01n");
assert.sameValue(0b11n & 0b01n, 0b01n, "0b11n & 0b01n === 0b01n");
assert.sameValue(0b10n & 0b10n, 0b10n, "0b10n & 0b10n === 0b10n");
assert.sameValue(0b10n & 0b11n, 0b10n, "0b10n & 0b11n === 0b10n");
assert.sameValue(0b11n & 0b10n, 0b10n, "0b11n & 0b10n === 0b10n");
assert.sameValue(0xffffffffn & 0n, 0n, "0xffffffffn & 0n === 0n");
assert.sameValue(0n & 0xffffffffn, 0n, "0n & 0xffffffffn === 0n");
assert.sameValue(0xffffffffn & 0xffffffffn, 0xffffffffn, "0xffffffffn & 0xffffffffn === 0xffffffffn");
assert.sameValue(0xffffffffffffffffn & 0n, 0n, "0xffffffffffffffffn & 0n === 0n");
assert.sameValue(0n & 0xffffffffffffffffn, 0n, "0n & 0xffffffffffffffffn === 0n");
assert.sameValue(0xffffffffffffffffn & 0xffffffffn, 0xffffffffn, "0xffffffffffffffffn & 0xffffffffn === 0xffffffffn");
assert.sameValue(0xffffffffn & 0xffffffffffffffffn, 0xffffffffn, "0xffffffffn & 0xffffffffffffffffn === 0xffffffffn");
assert.sameValue(
0xffffffffffffffffn & 0xffffffffffffffffn, 0xffffffffffffffffn,
"0xffffffffffffffffn & 0xffffffffffffffffn === 0xffffffffffffffffn");
assert.sameValue(
0xbf2ed51ff75d380fd3be813ec6185780n & 0x4aabef2324cedff5387f1f65n, 0x42092803008e813400181700n,
"0xbf2ed51ff75d380fd3be813ec6185780n & 0x4aabef2324cedff5387f1f65n === 0x42092803008e813400181700n");
assert.sameValue(
0x4aabef2324cedff5387f1f65n & 0xbf2ed51ff75d380fd3be813ec6185780n, 0x42092803008e813400181700n,
"0x4aabef2324cedff5387f1f65n & 0xbf2ed51ff75d380fd3be813ec6185780n === 0x42092803008e813400181700n");
assert.sameValue(0n & -1n, 0n, "0n & -1n === 0n");
assert.sameValue(-1n & 0n, 0n, "-1n & 0n === 0n");
assert.sameValue(0n & -2n, 0n, "0n & -2n === 0n");
assert.sameValue(-2n & 0n, 0n, "-2n & 0n === 0n");
assert.sameValue(1n & -2n, 0n, "1n & -2n === 0n");
assert.sameValue(-2n & 1n, 0n, "-2n & 1n === 0n");
assert.sameValue(2n & -2n, 2n, "2n & -2n === 2n");
assert.sameValue(-2n & 2n, 2n, "-2n & 2n === 2n");
assert.sameValue(2n & -3n, 0n, "2n & -3n === 0n");
assert.sameValue(-3n & 2n, 0n, "-3n & 2n === 0n");
assert.sameValue(-1n & -2n, -2n, "-1n & -2n === -2n");
assert.sameValue(-2n & -1n, -2n, "-2n & -1n === -2n");
assert.sameValue(-2n & -2n, -2n, "-2n & -2n === -2n");
assert.sameValue(-2n & -3n, -4n, "-2n & -3n === -4n");
assert.sameValue(-3n & -2n, -4n, "-3n & -2n === -4n");
assert.sameValue(0xffffffffn & -1n, 0xffffffffn, "0xffffffffn & -1n === 0xffffffffn");
assert.sameValue(-1n & 0xffffffffn, 0xffffffffn, "-1n & 0xffffffffn === 0xffffffffn");
assert.sameValue(0xffffffffffffffffn & -1n, 0xffffffffffffffffn, "0xffffffffffffffffn & -1n === 0xffffffffffffffffn");
assert.sameValue(-1n & 0xffffffffffffffffn, 0xffffffffffffffffn, "-1n & 0xffffffffffffffffn === 0xffffffffffffffffn");
assert.sameValue(
0xbf2ed51ff75d380fd3be813ec6185780n & -0x4aabef2324cedff5387f1f65n, 0xbf2ed51fb554100cd330000ac6004080n,
"0xbf2ed51ff75d380fd3be813ec6185780n & -0x4aabef2324cedff5387f1f65n === 0xbf2ed51fb554100cd330000ac6004080n");
assert.sameValue(
-0x4aabef2324cedff5387f1f65n & 0xbf2ed51ff75d380fd3be813ec6185780n, 0xbf2ed51fb554100cd330000ac6004080n,
"-0x4aabef2324cedff5387f1f65n & 0xbf2ed51ff75d380fd3be813ec6185780n === 0xbf2ed51fb554100cd330000ac6004080n");
assert.sameValue(
-0xbf2ed51ff75d380fd3be813ec6185780n & 0x4aabef2324cedff5387f1f65n, 0x8a2c72024405ec138670800n,
"-0xbf2ed51ff75d380fd3be813ec6185780n & 0x4aabef2324cedff5387f1f65n === 0x8a2c72024405ec138670800n");
assert.sameValue(
0x4aabef2324cedff5387f1f65n & -0xbf2ed51ff75d380fd3be813ec6185780n, 0x8a2c72024405ec138670800n,
"0x4aabef2324cedff5387f1f65n & -0xbf2ed51ff75d380fd3be813ec6185780n === 0x8a2c72024405ec138670800n");
assert.sameValue(
-0xbf2ed51ff75d380fd3be813ec6185780n & -0x4aabef2324cedff5387f1f65n, -0xbf2ed51fffffff2ff7fedffffe7f5f80n,
"-0xbf2ed51ff75d380fd3be813ec6185780n & -0x4aabef2324cedff5387f1f65n === -0xbf2ed51fffffff2ff7fedffffe7f5f80n");
assert.sameValue(
-0x4aabef2324cedff5387f1f65n & -0xbf2ed51ff75d380fd3be813ec6185780n, -0xbf2ed51fffffff2ff7fedffffe7f5f80n,
"-0x4aabef2324cedff5387f1f65n & -0xbf2ed51ff75d380fd3be813ec6185780n === -0xbf2ed51fffffff2ff7fedffffe7f5f80n");
assert.sameValue(-0xffffffffn & 0n, 0n, "-0xffffffffn & 0n === 0n");
assert.sameValue(0n & -0xffffffffn, 0n, "0n & -0xffffffffn === 0n");
assert.sameValue(
-0xffffffffffffffffn & 0x10000000000000000n, 0x10000000000000000n,
"-0xffffffffffffffffn & 0x10000000000000000n === 0x10000000000000000n");
assert.sameValue(
0x10000000000000000n & -0xffffffffffffffffn, 0x10000000000000000n,
"0x10000000000000000n & -0xffffffffffffffffn === 0x10000000000000000n");
assert.sameValue(
-0xffffffffffffffffffffffffn & 0x10000000000000000n, 0n,
"-0xffffffffffffffffffffffffn & 0x10000000000000000n === 0n");
assert.sameValue(
0x10000000000000000n & -0xffffffffffffffffffffffffn, 0n,
"0x10000000000000000n & -0xffffffffffffffffffffffffn === 0n");<|fim▁end|>
|
assert.sameValue(0b01n & 0b00n, 0b00n, "0b01n & 0b00n === 0b00n");
assert.sameValue(0b00n & 0b10n, 0b00n, "0b00n & 0b10n === 0b00n");
|
<|file_name|>Dijkstra.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 19:41:46 2015
@author: deep
"""
from graph import weightedGraph
import heapq
def djikstra(a,S):
N = len(a.adjLst)
Visited = [False for i in xrange(N)]
Distance = [float('inf') for i in xrange(N)]
Distance[S] = 0
heap = []
heapq.heappush(heap,(0,S))
for i in xrange(N):
if heap:
while(True):
_,u = heapq.heappop(heap)
if not Visited[u]:
break
Visited[u] = True
for weight_uv,v in a.adjLst[u]:
if not Visited[v]:<|fim▁hole|> Distance[v] = Distance[u] + weight_uv
heapq.heappush(heap, (Distance[v],v))
print Distance
return Distance
g = weightedGraph(4)
g.addEdge(0,1,1)
g.addEdge(1,2,2)
g.addEdge(2,3,3)
g.addEdge(3,0,4)
djikstra(g,0)<|fim▁end|>
|
if Distance[v] > Distance[u] + weight_uv:
|
<|file_name|>tests.py<|end_file_name|><|fim▁begin|>import json
from djpcms import test
from djpcms.plugins.text import Text
class Editing(test.TestCase):
def setUp(self):
super(Editing,self).setUp()
p = self.get()['page']
p.set_template(p.create_template('thre-columns',
'{{ content0 }} {{ content1 }} {{ content2 }}',
'left,center,right'))
for pr in range(0,5):
p.add_plugin(Text,0)
p.add_plugin(Text,1)
p.add_plugin(Text,2)
def postdata(self):
return {self.sites.settings.HTML_CLASSES.post_view_key:'rearrange'}
def geturl(self, block):
return '{0}{1}/{2}/{3}/'.format(self.sites.settings.CONTENT_INLINE_EDITING['pagecontent'],
block.page.id,
block.block,
block.position)
def _getcontent(self, block, toblock):
'''Do as jQuery does'''
data = self.postdata()
if toblock.position:
if toblock.position <= block.position:
toblockp = self.get_block(toblock.block,toblock.position-1)
else:
toblockp = toblock
data['previous'] = toblockp.htmlid()
else:
data['next'] = toblock.htmlid()
self.assertTrue(self.login())
url = self.geturl(block)
res = self.post(url, data = data, response = True, ajax = True)
return json.loads(res.content)
def get_block(self, blocknum, position):
'''Get a content block from page and perform sanity check'''
p = self.get()['page']
block = p.get_block(blocknum,position)
self.assertEqual(block.block,blocknum)
self.assertEqual(block.position,position)
return block
def testLayout(self):
p = self.get()['page']
self.assertEqual(p.numblocks(),3)
def testRearrangeSame(self):
block = self.get_block(2,3)
content = self._getcontent(block,block)
self.assertEqual(content['header'],'empty')
def testRearrangeSame0(self):
block = self.get_block(1,0)
content = self._getcontent(block,block)
self.assertEqual(content['header'],'empty')
def testRearrange3to1SameBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(2,1)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrange3to0SameBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(2,0)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
<|fim▁hole|> content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrangeDifferentBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(0,1)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']<|fim▁end|>
|
def testRearrange1to4SameBlock(self):
block = self.get_block(2,1)
toblock = self.get_block(2,4)
|
<|file_name|>feeds.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals
from django.contrib.syndication.views import Feed
from django.contrib.sites.models import get_current_site
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django_images.models import Thumbnail
from taggit.models import Tag
from .models import Pin
def filter_generator_for(size):
def wrapped_func(obj):
return Thumbnail.objects.get_or_create_at_size(obj.pk, size)
return wrapped_func
class LatestPins(Feed):
title = 'Latest Pins'
link = '/'
description = 'The latest pins from around the internet.'
domain_name = None
item_enclosure_mime_type = 'image/jpeg'
def get_object(self, request):
"""
Doing this as a fix for Django's not including the domain name in
enclosure urls.
"""
try:
request_type = 'http'
if request.is_secure(): request_type = 'https'
self.domain_name = ''.join([request_type, '://',
get_current_site(request).domain])
except:
pass
def items(self):
return Pin.objects.order_by('-published')[:15]
def item_pubdate(self, item):
return item.published
def item_link(self, item):
return item.url
def item_title(self, item):
return item.url
def item_description(self, item):
tags = ', '.join(tag.name for tag in item.tags.all())
return ''.join(['Description: ', item.description or 'None',
' | Tags: ', tags or 'None'])
def item_enclosure_url(self, item):
slug = unicode(filter_generator_for('standard')(item.image).image.url)
return self.domain_name + slug
def item_enclosure_length(self, item):
return filter_generator_for('standard')(item.image).image.size
class LatestUserPins(Feed):
description = 'The latest pins from around the internet.'
domain_name = None<|fim▁hole|> item_enclosure_mime_type = 'image/jpeg'
def get_object(self, request, user):
"""
Doing this as a fix for Django's not including the domain name in
enclosure urls.
"""
request_type = 'http'
if request.is_secure(): request_type = 'https'
self.domain_name = ''.join([request_type, '://',
get_current_site(request).domain])
return get_object_or_404(User, username=user)
def title(self, obj):
return 'Latest Pins from ' + obj.username
def link(self, obj):
return '/pins/user/' + obj.username + '/'
def items(self, obj):
return Pin.objects.filter(submitter=obj).order_by('-published')[:15]
def item_pubdate(self, item):
return item.published
def item_link(self, item):
return item.url
def item_title(self, item):
return item.url
def item_description(self, item):
tags = ', '.join(tag.name for tag in item.tags.all())
return ''.join(['Description: ', item.description or 'None',
' | Tags: ', tags or 'None'])
def item_enclosure_url(self, item):
slug = unicode(filter_generator_for('standard')(item.image).image.url)
return self.domain_name + slug
def item_enclosure_length(self, item):
return filter_generator_for('standard')(item.image).image.size
class LatestTagPins(Feed):
link = '/'
description = 'The latest pins from around the internet.'
domain_name = None
item_enclosure_mime_type = 'image/jpeg'
def get_object(self, request, tag):
"""
Doing this as a fix for Django's not including the domain name in
enclosure urls.
"""
request_type = 'http'
if request.is_secure(): request_type = 'https'
self.domain_name = ''.join([request_type, '://',
get_current_site(request).domain])
return get_object_or_404(Tag, name=tag)
def title(self, obj):
return 'Latest Pins in ' + obj.name
def link(self, obj):
return '/pins/tag/' + obj.name + '/'
def items(self, obj):
return Pin.objects.filter(tags=obj).order_by('-published')[:15]
def item_pubdate(self, item):
return item.published
def item_link(self, item):
return item.url
def item_title(self, item):
return item.url
def item_description(self, item):
tags = ', '.join(tag.name for tag in item.tags.all())
return ''.join(['Description: ', item.description or 'None',
' | Tags: ', tags or 'None'])
def item_enclosure_url(self, item):
slug = unicode(filter_generator_for('standard')(item.image).image.url)
return self.domain_name + slug
def item_enclosure_length(self, item):
return filter_generator_for('standard')(item.image).image.size<|fim▁end|>
| |
<|file_name|>minimal.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# minimal.py --- Minimal example of using traits.
<|fim▁hole|>from traits.api import HasTraits, Float
class Person(HasTraits):
weight = Float(150.0)<|fim▁end|>
| |
<|file_name|>psapi.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015, skdltmxn
// Licensed under the MIT License <LICENSE.md>
//! API Prototypes and Definitions for PSAPI.DLL
pub const LIST_MODULES_DEFAULT: ::DWORD = 0x0;
pub const LIST_MODULES_32BIT: ::DWORD = 0x01;
pub const LIST_MODULES_64BIT: ::DWORD = 0x02;
pub const LIST_MODULES_ALL: ::DWORD = LIST_MODULES_32BIT | LIST_MODULES_64BIT;
STRUCT!{struct MODULEINFO {
lpBaseOfDll: ::LPVOID,
SizeOfImage: ::DWORD,
EntryPoint: ::LPVOID,
}}
pub type LPMODULEINFO = *mut MODULEINFO;
STRUCT!{struct PSAPI_WORKING_SET_BLOCK {
Flags: ::ULONG_PTR,
BitFields: ::ULONG_PTR,
}}
#[cfg(target_arch="x86")]
BITFIELD!(PSAPI_WORKING_SET_BLOCK BitFields: ::ULONG_PTR [
Protection set_Protection[0..5],
ShareCount set_ShareCount[5..8],
Shared set_Shared[8..9],
Reserved set_Reserved[9..12],
VirtualPage set_VirtualPage[12..32],
]);
#[cfg(target_arch="x86_64")]
BITFIELD!(PSAPI_WORKING_SET_BLOCK BitFields: ::ULONG_PTR [
Protection set_Protection[0..5],
ShareCount set_ShareCount[5..8],
Shared set_Shared[8..9],
Reserved set_Reserved[9..12],
VirtualPage set_VirtualPage[12..64],
]);
pub type PPSAPI_WORKING_SET_BLOCK = *mut PSAPI_WORKING_SET_BLOCK;
STRUCT!{struct PSAPI_WORKING_SET_INFORMATION {
NumberOfEntries: ::ULONG_PTR,
WorkingSetInfo: [PSAPI_WORKING_SET_BLOCK; 1],
}}
pub type PPSAPI_WORKING_SET_INFORMATION = *mut PSAPI_WORKING_SET_INFORMATION;
STRUCT!{struct PSAPI_WORKING_SET_EX_BLOCK_Invalid {
BitFields: ::ULONG_PTR,
}}
#[cfg(target_arch="x86")]
BITFIELD!(PSAPI_WORKING_SET_EX_BLOCK_Invalid BitFields: ::ULONG_PTR [
Valid set_Valid[0..1],
Reserved0 set_Reserved0[1..15],
Shared set_Shared[15..16],
Reserved1 set_Reserved1[16..31],
Bad set_Bad[31..32],
]);
#[cfg(target_arch="x86_64")]
BITFIELD!(PSAPI_WORKING_SET_EX_BLOCK_Invalid BitFields: ::ULONG_PTR [
Valid set_Valid[0..1],
Reserved0 set_Reserved0[1..15],
Shared set_Shared[15..16],
Reserved1 set_Reserved1[16..31],
Bad set_Bad[31..32],
ReservedUlong set_ReservedUlong[32..64],
]);
STRUCT!{struct PSAPI_WORKING_SET_EX_BLOCK {
Flags: ::ULONG_PTR,
BitFields: ::ULONG_PTR,
}}
#[cfg(target_arch="x86")]
BITFIELD!(PSAPI_WORKING_SET_EX_BLOCK BitFields: ::ULONG_PTR [
Valid set_Valid[0..1],
ShareCount set_ShareCount[1..4],
Win32Protection set_Win32Protection[4..15],
Shared set_Shared[15..16],
Node set_Node[16..22],
Locked set_Locked[22..23],
LargePage set_LargePage[23..24],
Reserved set_Reserved[24..31],
Bad set_Bad[31..32],
]);
#[cfg(target_arch="x86_64")]
BITFIELD!(PSAPI_WORKING_SET_EX_BLOCK BitFields: ::ULONG_PTR [
Valid set_Valid[0..1],
ShareCount set_ShareCount[1..4],
Win32Protection set_Win32Protection[4..15],
Shared set_Shared[15..16],
Node set_Node[16..22],
Locked set_Locked[22..23],
LargePage set_LargePage[23..24],
Reserved set_Reserved[24..31],
Bad set_Bad[31..32],
ReservedUlong set_ReservedUlong[32..64],
]);
UNION!(
PSAPI_WORKING_SET_EX_BLOCK, BitFields, Invalid, Invalid_mut, PSAPI_WORKING_SET_EX_BLOCK_Invalid
);
pub type PPSAPI_WORKING_SET_EX_BLOCK = *mut PSAPI_WORKING_SET_EX_BLOCK;
STRUCT!{struct PSAPI_WORKING_SET_EX_INFORMATION {
VirtualAddress: ::PVOID,
VirtualAttributes: PSAPI_WORKING_SET_EX_BLOCK,
}}
pub type PPSAPI_WORKING_SET_EX_INFORMATION = *mut PSAPI_WORKING_SET_EX_INFORMATION;
STRUCT!{struct PSAPI_WS_WATCH_INFORMATION {
FaultingPc: ::LPVOID,
FaultingVa: ::LPVOID,
}}
pub type PPSAPI_WS_WATCH_INFORMATION = *mut PSAPI_WS_WATCH_INFORMATION;
STRUCT!{struct PSAPI_WS_WATCH_INFORMATION_EX {
BasicInfo: PSAPI_WS_WATCH_INFORMATION,
FaultingThreadId: ::ULONG_PTR,
Flags: ::ULONG_PTR,
}}<|fim▁hole|>STRUCT!{struct PROCESS_MEMORY_COUNTERS {
cb: ::DWORD,
PageFaultCount: ::DWORD,
PeakWorkingSetSize: ::SIZE_T,
WorkingSetSize: ::SIZE_T,
QuotaPeakPagedPoolUsage: ::SIZE_T,
QuotaPagedPoolUsage: ::SIZE_T,
QuotaPeakNonPagedPoolUsage: ::SIZE_T,
QuotaNonPagedPoolUsage: ::SIZE_T,
PagefileUsage: ::SIZE_T,
PeakPagefileUsage: ::SIZE_T,
}}
pub type PPROCESS_MEMORY_COUNTERS = *mut PROCESS_MEMORY_COUNTERS;
STRUCT!{struct PROCESS_MEMORY_COUNTERS_EX {
cb: ::DWORD,
PageFaultCount: ::DWORD,
PeakWorkingSetSize: ::SIZE_T,
WorkingSetSize: ::SIZE_T,
QuotaPeakPagedPoolUsage: ::SIZE_T,
QuotaPagedPoolUsage: ::SIZE_T,
QuotaPeakNonPagedPoolUsage: ::SIZE_T,
QuotaNonPagedPoolUsage: ::SIZE_T,
PagefileUsage: ::SIZE_T,
PeakPagefileUsage: ::SIZE_T,
PrivateUsage: ::SIZE_T,
}}
pub type PPROCESS_MEMORY_COUNTERS_EX = *mut PROCESS_MEMORY_COUNTERS_EX;
STRUCT!{struct PERFORMANCE_INFORMATION {
cb: ::DWORD,
CommitTotal: ::SIZE_T,
CommitLimit: ::SIZE_T,
CommitPeak: ::SIZE_T,
PhysicalTotal: ::SIZE_T,
PhysicalAvailable: ::SIZE_T,
SystemCache: ::SIZE_T,
KernelTotal: ::SIZE_T,
KernelPaged: ::SIZE_T,
KernelNonpaged: ::SIZE_T,
PageSize: ::SIZE_T,
HandleCount: ::DWORD,
ProcessCount: ::DWORD,
ThreadCount: ::DWORD,
}}
pub type PPERFORMANCE_INFORMATION = *mut PERFORMANCE_INFORMATION;
STRUCT!{struct ENUM_PAGE_FILE_INFORMATION {
cb: ::DWORD,
Reserved: ::DWORD,
TotalSize: ::SIZE_T,
TotalInUse: ::SIZE_T,
PeakUsage: ::SIZE_T,
}}
pub type PENUM_PAGE_FILE_INFORMATION = *mut ENUM_PAGE_FILE_INFORMATION;
pub type PENUM_PAGE_FILE_CALLBACKA = Option<unsafe extern "system" fn(
pContext: ::LPVOID, pPageFileInfo: PENUM_PAGE_FILE_INFORMATION, lpFilename: ::LPCSTR,
) -> ::BOOL>;
pub type PENUM_PAGE_FILE_CALLBACKW = Option<unsafe extern "system" fn(
pContext: ::LPVOID, pPageFileInfo: PENUM_PAGE_FILE_INFORMATION, lpFilename: ::LPCWSTR,
) -> ::BOOL>;<|fim▁end|>
|
pub type PPSAPI_WS_WATCH_INFORMATION_EX = *mut PSAPI_WS_WATCH_INFORMATION_EX;
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>#[macro_use]
pub mod assert;
<|fim▁hole|><|fim▁end|>
|
pub mod task_domain;
|
<|file_name|>_models.py<|end_file_name|><|fim▁begin|># coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AadAuthenticationParameters(msrest.serialization.Model):
"""AAD Vpn authentication type related parameters.
:param aad_tenant: AAD Vpn authentication parameter AAD tenant.
:type aad_tenant: str
:param aad_audience: AAD Vpn authentication parameter AAD audience.
:type aad_audience: str
:param aad_issuer: AAD Vpn authentication parameter AAD issuer.
:type aad_issuer: str
"""
_attribute_map = {
'aad_tenant': {'key': 'aadTenant', 'type': 'str'},
'aad_audience': {'key': 'aadAudience', 'type': 'str'},
'aad_issuer': {'key': 'aadIssuer', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AadAuthenticationParameters, self).__init__(**kwargs)
self.aad_tenant = kwargs.get('aad_tenant', None)
self.aad_audience = kwargs.get('aad_audience', None)
self.aad_issuer = kwargs.get('aad_issuer', None)
class AddressSpace(msrest.serialization.Model):
"""AddressSpace contains an array of IP address ranges that can be used by subnets of the virtual network.
:param address_prefixes: A list of address blocks reserved for this virtual network in CIDR
notation.
:type address_prefixes: list[str]
"""
_attribute_map = {
'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AddressSpace, self).__init__(**kwargs)
self.address_prefixes = kwargs.get('address_prefixes', None)
class Resource(msrest.serialization.Model):
"""Common resource representation.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ApplicationGateway(Resource):
"""Application gateway resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param zones: A list of availability zones denoting where the resource needs to come from.
:type zones: list[str]
:param identity: The identity of the application gateway, if configured.
:type identity: ~azure.mgmt.network.v2020_04_01.models.ManagedServiceIdentity
:param sku: SKU of the application gateway resource.
:type sku: ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySku
:param ssl_policy: SSL policy of the application gateway resource.
:type ssl_policy: ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslPolicy
:ivar operational_state: Operational state of the application gateway resource. Possible values
include: "Stopped", "Starting", "Running", "Stopping".
:vartype operational_state: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayOperationalState
:param gateway_ip_configurations: Subnets of the application gateway resource. For default
limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type gateway_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayIPConfiguration]
:param authentication_certificates: Authentication certificates of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type authentication_certificates:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayAuthenticationCertificate]
:param trusted_root_certificates: Trusted Root certificates of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type trusted_root_certificates:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayTrustedRootCertificate]
:param ssl_certificates: SSL certificates of the application gateway resource. For default
limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type ssl_certificates:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslCertificate]
:param frontend_ip_configurations: Frontend IP addresses of the application gateway resource.
For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFrontendIPConfiguration]
:param frontend_ports: Frontend ports of the application gateway resource. For default limits,
see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type frontend_ports:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFrontendPort]
:param probes: Probes of the application gateway resource.
:type probes: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProbe]
:param backend_address_pools: Backend address pool of the application gateway resource. For
default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type backend_address_pools:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendAddressPool]
:param backend_http_settings_collection: Backend http settings of the application gateway
resource. For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHttpSettings]
:param http_listeners: Http listeners of the application gateway resource. For default limits,
see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type http_listeners:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayHttpListener]
:param url_path_maps: URL path map of the application gateway resource. For default limits, see
`Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type url_path_maps: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayUrlPathMap]
:param request_routing_rules: Request routing rules of the application gateway resource.
:type request_routing_rules:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRequestRoutingRule]
:param rewrite_rule_sets: Rewrite rules for the application gateway resource.
:type rewrite_rule_sets:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRewriteRuleSet]
:param redirect_configurations: Redirect configurations of the application gateway resource.
For default limits, see `Application Gateway limits
<https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits>`_.
:type redirect_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRedirectConfiguration]
:param web_application_firewall_configuration: Web application firewall configuration.
:type web_application_firewall_configuration:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayWebApplicationFirewallConfiguration
:param firewall_policy: Reference to the FirewallPolicy resource.
:type firewall_policy: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param enable_http2: Whether HTTP2 is enabled on the application gateway resource.
:type enable_http2: bool
:param enable_fips: Whether FIPS is enabled on the application gateway resource.
:type enable_fips: bool
:param autoscale_configuration: Autoscale Configuration.
:type autoscale_configuration:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayAutoscaleConfiguration
:ivar resource_guid: The resource GUID property of the application gateway resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the application gateway resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param custom_error_configurations: Custom error configurations of the application gateway
resource.
:type custom_error_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayCustomError]
:param force_firewall_policy_association: If true, associates a firewall policy with an
application gateway regardless whether the policy differs from the WAF Config.
:type force_firewall_policy_association: bool
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'operational_state': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'sku': {'key': 'properties.sku', 'type': 'ApplicationGatewaySku'},
'ssl_policy': {'key': 'properties.sslPolicy', 'type': 'ApplicationGatewaySslPolicy'},
'operational_state': {'key': 'properties.operationalState', 'type': 'str'},
'gateway_ip_configurations': {'key': 'properties.gatewayIPConfigurations', 'type': '[ApplicationGatewayIPConfiguration]'},
'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[ApplicationGatewayAuthenticationCertificate]'},
'trusted_root_certificates': {'key': 'properties.trustedRootCertificates', 'type': '[ApplicationGatewayTrustedRootCertificate]'},
'ssl_certificates': {'key': 'properties.sslCertificates', 'type': '[ApplicationGatewaySslCertificate]'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[ApplicationGatewayFrontendIPConfiguration]'},
'frontend_ports': {'key': 'properties.frontendPorts', 'type': '[ApplicationGatewayFrontendPort]'},
'probes': {'key': 'properties.probes', 'type': '[ApplicationGatewayProbe]'},
'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'},
'backend_http_settings_collection': {'key': 'properties.backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHttpSettings]'},
'http_listeners': {'key': 'properties.httpListeners', 'type': '[ApplicationGatewayHttpListener]'},
'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[ApplicationGatewayUrlPathMap]'},
'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[ApplicationGatewayRequestRoutingRule]'},
'rewrite_rule_sets': {'key': 'properties.rewriteRuleSets', 'type': '[ApplicationGatewayRewriteRuleSet]'},
'redirect_configurations': {'key': 'properties.redirectConfigurations', 'type': '[ApplicationGatewayRedirectConfiguration]'},
'web_application_firewall_configuration': {'key': 'properties.webApplicationFirewallConfiguration', 'type': 'ApplicationGatewayWebApplicationFirewallConfiguration'},
'firewall_policy': {'key': 'properties.firewallPolicy', 'type': 'SubResource'},
'enable_http2': {'key': 'properties.enableHttp2', 'type': 'bool'},
'enable_fips': {'key': 'properties.enableFips', 'type': 'bool'},
'autoscale_configuration': {'key': 'properties.autoscaleConfiguration', 'type': 'ApplicationGatewayAutoscaleConfiguration'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'custom_error_configurations': {'key': 'properties.customErrorConfigurations', 'type': '[ApplicationGatewayCustomError]'},
'force_firewall_policy_association': {'key': 'properties.forceFirewallPolicyAssociation', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGateway, self).__init__(**kwargs)
self.etag = None
self.zones = kwargs.get('zones', None)
self.identity = kwargs.get('identity', None)
self.sku = kwargs.get('sku', None)
self.ssl_policy = kwargs.get('ssl_policy', None)
self.operational_state = None
self.gateway_ip_configurations = kwargs.get('gateway_ip_configurations', None)
self.authentication_certificates = kwargs.get('authentication_certificates', None)
self.trusted_root_certificates = kwargs.get('trusted_root_certificates', None)
self.ssl_certificates = kwargs.get('ssl_certificates', None)
self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None)
self.frontend_ports = kwargs.get('frontend_ports', None)
self.probes = kwargs.get('probes', None)
self.backend_address_pools = kwargs.get('backend_address_pools', None)
self.backend_http_settings_collection = kwargs.get('backend_http_settings_collection', None)
self.http_listeners = kwargs.get('http_listeners', None)
self.url_path_maps = kwargs.get('url_path_maps', None)
self.request_routing_rules = kwargs.get('request_routing_rules', None)
self.rewrite_rule_sets = kwargs.get('rewrite_rule_sets', None)
self.redirect_configurations = kwargs.get('redirect_configurations', None)
self.web_application_firewall_configuration = kwargs.get('web_application_firewall_configuration', None)
self.firewall_policy = kwargs.get('firewall_policy', None)
self.enable_http2 = kwargs.get('enable_http2', None)
self.enable_fips = kwargs.get('enable_fips', None)
self.autoscale_configuration = kwargs.get('autoscale_configuration', None)
self.resource_guid = None
self.provisioning_state = None
self.custom_error_configurations = kwargs.get('custom_error_configurations', None)
self.force_firewall_policy_association = kwargs.get('force_firewall_policy_association', None)
class SubResource(msrest.serialization.Model):
"""Reference to another subresource.
:param id: Resource ID.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class ApplicationGatewayAuthenticationCertificate(SubResource):
"""Authentication certificates of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the authentication certificate that is unique within an Application
Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param data: Certificate public data.
:type data: str
:ivar provisioning_state: The provisioning state of the authentication certificate resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'data': {'key': 'properties.data', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayAuthenticationCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.data = kwargs.get('data', None)
self.provisioning_state = None
class ApplicationGatewayAutoscaleConfiguration(msrest.serialization.Model):
"""Application Gateway autoscale configuration.
All required parameters must be populated in order to send to Azure.
:param min_capacity: Required. Lower bound on number of Application Gateway capacity.
:type min_capacity: int
:param max_capacity: Upper bound on number of Application Gateway capacity.
:type max_capacity: int
"""
_validation = {
'min_capacity': {'required': True, 'minimum': 0},
'max_capacity': {'minimum': 2},
}
_attribute_map = {
'min_capacity': {'key': 'minCapacity', 'type': 'int'},
'max_capacity': {'key': 'maxCapacity', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayAutoscaleConfiguration, self).__init__(**kwargs)
self.min_capacity = kwargs['min_capacity']
self.max_capacity = kwargs.get('max_capacity', None)
class ApplicationGatewayAvailableSslOptions(Resource):
"""Response for ApplicationGatewayAvailableSslOptions API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param predefined_policies: List of available Ssl predefined policy.
:type predefined_policies: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param default_policy: Name of the Ssl predefined policy applied by default to application
gateway. Possible values include: "AppGwSslPolicy20150501", "AppGwSslPolicy20170401",
"AppGwSslPolicy20170401S".
:type default_policy: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslPolicyName
:param available_cipher_suites: List of available Ssl cipher suites.
:type available_cipher_suites: list[str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslCipherSuite]
:param available_protocols: List of available Ssl protocols.
:type available_protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslProtocol]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'predefined_policies': {'key': 'properties.predefinedPolicies', 'type': '[SubResource]'},
'default_policy': {'key': 'properties.defaultPolicy', 'type': 'str'},
'available_cipher_suites': {'key': 'properties.availableCipherSuites', 'type': '[str]'},
'available_protocols': {'key': 'properties.availableProtocols', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayAvailableSslOptions, self).__init__(**kwargs)
self.predefined_policies = kwargs.get('predefined_policies', None)
self.default_policy = kwargs.get('default_policy', None)
self.available_cipher_suites = kwargs.get('available_cipher_suites', None)
self.available_protocols = kwargs.get('available_protocols', None)
class ApplicationGatewayAvailableSslPredefinedPolicies(msrest.serialization.Model):
"""Response for ApplicationGatewayAvailableSslOptions API service call.
:param value: List of available Ssl predefined policy.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslPredefinedPolicy]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationGatewaySslPredefinedPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayAvailableSslPredefinedPolicies, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ApplicationGatewayAvailableWafRuleSetsResult(msrest.serialization.Model):
"""Response for ApplicationGatewayAvailableWafRuleSets API service call.
:param value: The list of application gateway rule sets.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFirewallRuleSet]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationGatewayFirewallRuleSet]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayAvailableWafRuleSetsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ApplicationGatewayBackendAddress(msrest.serialization.Model):
"""Backend address of an application gateway.
:param fqdn: Fully qualified domain name (FQDN).
:type fqdn: str
:param ip_address: IP address.
:type ip_address: str
"""
_attribute_map = {
'fqdn': {'key': 'fqdn', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendAddress, self).__init__(**kwargs)
self.fqdn = kwargs.get('fqdn', None)
self.ip_address = kwargs.get('ip_address', None)
class ApplicationGatewayBackendAddressPool(SubResource):
"""Backend Address Pool of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the backend address pool that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:ivar backend_ip_configurations: Collection of references to IPs defined in network interfaces.
:vartype backend_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfiguration]
:param backend_addresses: Backend addresses.
:type backend_addresses:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendAddress]
:ivar provisioning_state: The provisioning state of the backend address pool resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'backend_ip_configurations': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'backend_addresses': {'key': 'properties.backendAddresses', 'type': '[ApplicationGatewayBackendAddress]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendAddressPool, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.backend_ip_configurations = None
self.backend_addresses = kwargs.get('backend_addresses', None)
self.provisioning_state = None
class ApplicationGatewayBackendHealth(msrest.serialization.Model):
"""Response for ApplicationGatewayBackendHealth API service call.
:param backend_address_pools: A list of ApplicationGatewayBackendHealthPool resources.
:type backend_address_pools:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHealthPool]
"""
_attribute_map = {
'backend_address_pools': {'key': 'backendAddressPools', 'type': '[ApplicationGatewayBackendHealthPool]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendHealth, self).__init__(**kwargs)
self.backend_address_pools = kwargs.get('backend_address_pools', None)
class ApplicationGatewayBackendHealthHttpSettings(msrest.serialization.Model):
"""Application gateway BackendHealthHttp settings.
:param backend_http_settings: Reference to an ApplicationGatewayBackendHttpSettings resource.
:type backend_http_settings:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHttpSettings
:param servers: List of ApplicationGatewayBackendHealthServer resources.
:type servers:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHealthServer]
"""
_attribute_map = {
'backend_http_settings': {'key': 'backendHttpSettings', 'type': 'ApplicationGatewayBackendHttpSettings'},
'servers': {'key': 'servers', 'type': '[ApplicationGatewayBackendHealthServer]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendHealthHttpSettings, self).__init__(**kwargs)
self.backend_http_settings = kwargs.get('backend_http_settings', None)
self.servers = kwargs.get('servers', None)
class ApplicationGatewayBackendHealthOnDemand(msrest.serialization.Model):
"""Result of on demand test probe.
:param backend_address_pool: Reference to an ApplicationGatewayBackendAddressPool resource.
:type backend_address_pool:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendAddressPool
:param backend_health_http_settings: Application gateway BackendHealthHttp settings.
:type backend_health_http_settings:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHealthHttpSettings
"""
_attribute_map = {
'backend_address_pool': {'key': 'backendAddressPool', 'type': 'ApplicationGatewayBackendAddressPool'},
'backend_health_http_settings': {'key': 'backendHealthHttpSettings', 'type': 'ApplicationGatewayBackendHealthHttpSettings'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendHealthOnDemand, self).__init__(**kwargs)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.backend_health_http_settings = kwargs.get('backend_health_http_settings', None)
class ApplicationGatewayBackendHealthPool(msrest.serialization.Model):
"""Application gateway BackendHealth pool.
:param backend_address_pool: Reference to an ApplicationGatewayBackendAddressPool resource.
:type backend_address_pool:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendAddressPool
:param backend_http_settings_collection: List of ApplicationGatewayBackendHealthHttpSettings
resources.
:type backend_http_settings_collection:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHealthHttpSettings]
"""
_attribute_map = {
'backend_address_pool': {'key': 'backendAddressPool', 'type': 'ApplicationGatewayBackendAddressPool'},
'backend_http_settings_collection': {'key': 'backendHttpSettingsCollection', 'type': '[ApplicationGatewayBackendHealthHttpSettings]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendHealthPool, self).__init__(**kwargs)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.backend_http_settings_collection = kwargs.get('backend_http_settings_collection', None)
class ApplicationGatewayBackendHealthServer(msrest.serialization.Model):
"""Application gateway backendhealth http settings.
:param address: IP address or FQDN of backend server.
:type address: str
:param ip_configuration: Reference to IP configuration of backend server.
:type ip_configuration: ~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfiguration
:param health: Health of backend server. Possible values include: "Unknown", "Up", "Down",
"Partial", "Draining".
:type health: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendHealthServerHealth
:param health_probe_log: Health Probe Log.
:type health_probe_log: str
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
'ip_configuration': {'key': 'ipConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'health': {'key': 'health', 'type': 'str'},
'health_probe_log': {'key': 'healthProbeLog', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendHealthServer, self).__init__(**kwargs)
self.address = kwargs.get('address', None)
self.ip_configuration = kwargs.get('ip_configuration', None)
self.health = kwargs.get('health', None)
self.health_probe_log = kwargs.get('health_probe_log', None)
class ApplicationGatewayBackendHttpSettings(SubResource):
"""Backend address pool settings of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the backend http settings that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param port: The destination port on the backend.
:type port: int
:param protocol: The protocol used to communicate with the backend. Possible values include:
"Http", "Https".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProtocol
:param cookie_based_affinity: Cookie based affinity. Possible values include: "Enabled",
"Disabled".
:type cookie_based_affinity: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayCookieBasedAffinity
:param request_timeout: Request timeout in seconds. Application Gateway will fail the request
if response is not received within RequestTimeout. Acceptable values are from 1 second to 86400
seconds.
:type request_timeout: int
:param probe: Probe resource of an application gateway.
:type probe: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param authentication_certificates: Array of references to application gateway authentication
certificates.
:type authentication_certificates: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param trusted_root_certificates: Array of references to application gateway trusted root
certificates.
:type trusted_root_certificates: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param connection_draining: Connection draining of the backend http settings resource.
:type connection_draining:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayConnectionDraining
:param host_name: Host header to be sent to the backend servers.
:type host_name: str
:param pick_host_name_from_backend_address: Whether to pick host header should be picked from
the host name of the backend server. Default value is false.
:type pick_host_name_from_backend_address: bool
:param affinity_cookie_name: Cookie name to use for the affinity cookie.
:type affinity_cookie_name: str
:param probe_enabled: Whether the probe is enabled. Default value is false.
:type probe_enabled: bool
:param path: Path which should be used as a prefix for all HTTP requests. Null means no path
will be prefixed. Default value is null.
:type path: str
:ivar provisioning_state: The provisioning state of the backend HTTP settings resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'cookie_based_affinity': {'key': 'properties.cookieBasedAffinity', 'type': 'str'},
'request_timeout': {'key': 'properties.requestTimeout', 'type': 'int'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'authentication_certificates': {'key': 'properties.authenticationCertificates', 'type': '[SubResource]'},
'trusted_root_certificates': {'key': 'properties.trustedRootCertificates', 'type': '[SubResource]'},
'connection_draining': {'key': 'properties.connectionDraining', 'type': 'ApplicationGatewayConnectionDraining'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'pick_host_name_from_backend_address': {'key': 'properties.pickHostNameFromBackendAddress', 'type': 'bool'},
'affinity_cookie_name': {'key': 'properties.affinityCookieName', 'type': 'str'},
'probe_enabled': {'key': 'properties.probeEnabled', 'type': 'bool'},
'path': {'key': 'properties.path', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayBackendHttpSettings, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.port = kwargs.get('port', None)
self.protocol = kwargs.get('protocol', None)
self.cookie_based_affinity = kwargs.get('cookie_based_affinity', None)
self.request_timeout = kwargs.get('request_timeout', None)
self.probe = kwargs.get('probe', None)
self.authentication_certificates = kwargs.get('authentication_certificates', None)
self.trusted_root_certificates = kwargs.get('trusted_root_certificates', None)
self.connection_draining = kwargs.get('connection_draining', None)
self.host_name = kwargs.get('host_name', None)
self.pick_host_name_from_backend_address = kwargs.get('pick_host_name_from_backend_address', None)
self.affinity_cookie_name = kwargs.get('affinity_cookie_name', None)
self.probe_enabled = kwargs.get('probe_enabled', None)
self.path = kwargs.get('path', None)
self.provisioning_state = None
class ApplicationGatewayConnectionDraining(msrest.serialization.Model):
"""Connection draining allows open connections to a backend server to be active for a specified time after the backend server got removed from the configuration.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether connection draining is enabled or not.
:type enabled: bool
:param drain_timeout_in_sec: Required. The number of seconds connection draining is active.
Acceptable values are from 1 second to 3600 seconds.
:type drain_timeout_in_sec: int
"""
_validation = {
'enabled': {'required': True},
'drain_timeout_in_sec': {'required': True, 'maximum': 3600, 'minimum': 1},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'drain_timeout_in_sec': {'key': 'drainTimeoutInSec', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayConnectionDraining, self).__init__(**kwargs)
self.enabled = kwargs['enabled']
self.drain_timeout_in_sec = kwargs['drain_timeout_in_sec']
class ApplicationGatewayCustomError(msrest.serialization.Model):
"""Customer error of an application gateway.
:param status_code: Status code of the application gateway customer error. Possible values
include: "HttpStatus403", "HttpStatus502".
:type status_code: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayCustomErrorStatusCode
:param custom_error_page_url: Error page URL of the application gateway customer error.
:type custom_error_page_url: str
"""
_attribute_map = {
'status_code': {'key': 'statusCode', 'type': 'str'},
'custom_error_page_url': {'key': 'customErrorPageUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayCustomError, self).__init__(**kwargs)
self.status_code = kwargs.get('status_code', None)
self.custom_error_page_url = kwargs.get('custom_error_page_url', None)
class ApplicationGatewayFirewallDisabledRuleGroup(msrest.serialization.Model):
"""Allows to disable rules within a rule group or an entire rule group.
All required parameters must be populated in order to send to Azure.
:param rule_group_name: Required. The name of the rule group that will be disabled.
:type rule_group_name: str
:param rules: The list of rules that will be disabled. If null, all rules of the rule group
will be disabled.
:type rules: list[int]
"""
_validation = {
'rule_group_name': {'required': True},
}
_attribute_map = {
'rule_group_name': {'key': 'ruleGroupName', 'type': 'str'},
'rules': {'key': 'rules', 'type': '[int]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayFirewallDisabledRuleGroup, self).__init__(**kwargs)
self.rule_group_name = kwargs['rule_group_name']
self.rules = kwargs.get('rules', None)
class ApplicationGatewayFirewallExclusion(msrest.serialization.Model):
"""Allow to exclude some variable satisfy the condition for the WAF check.
All required parameters must be populated in order to send to Azure.
:param match_variable: Required. The variable to be excluded.
:type match_variable: str
:param selector_match_operator: Required. When matchVariable is a collection, operate on the
selector to specify which elements in the collection this exclusion applies to.
:type selector_match_operator: str
:param selector: Required. When matchVariable is a collection, operator used to specify which
elements in the collection this exclusion applies to.
:type selector: str
"""
_validation = {
'match_variable': {'required': True},
'selector_match_operator': {'required': True},
'selector': {'required': True},
}
_attribute_map = {
'match_variable': {'key': 'matchVariable', 'type': 'str'},
'selector_match_operator': {'key': 'selectorMatchOperator', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayFirewallExclusion, self).__init__(**kwargs)
self.match_variable = kwargs['match_variable']
self.selector_match_operator = kwargs['selector_match_operator']
self.selector = kwargs['selector']
class ApplicationGatewayFirewallRule(msrest.serialization.Model):
"""A web application firewall rule.
All required parameters must be populated in order to send to Azure.
:param rule_id: Required. The identifier of the web application firewall rule.
:type rule_id: int
:param description: The description of the web application firewall rule.
:type description: str
"""
_validation = {
'rule_id': {'required': True},
}
_attribute_map = {
'rule_id': {'key': 'ruleId', 'type': 'int'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayFirewallRule, self).__init__(**kwargs)
self.rule_id = kwargs['rule_id']
self.description = kwargs.get('description', None)
class ApplicationGatewayFirewallRuleGroup(msrest.serialization.Model):
"""A web application firewall rule group.
All required parameters must be populated in order to send to Azure.
:param rule_group_name: Required. The name of the web application firewall rule group.
:type rule_group_name: str
:param description: The description of the web application firewall rule group.
:type description: str
:param rules: Required. The rules of the web application firewall rule group.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFirewallRule]
"""
_validation = {
'rule_group_name': {'required': True},
'rules': {'required': True},
}
_attribute_map = {
'rule_group_name': {'key': 'ruleGroupName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'rules': {'key': 'rules', 'type': '[ApplicationGatewayFirewallRule]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayFirewallRuleGroup, self).__init__(**kwargs)
self.rule_group_name = kwargs['rule_group_name']
self.description = kwargs.get('description', None)
self.rules = kwargs['rules']
class ApplicationGatewayFirewallRuleSet(Resource):
"""A web application firewall rule set.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar provisioning_state: The provisioning state of the web application firewall rule set.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param rule_set_type: The type of the web application firewall rule set.
:type rule_set_type: str
:param rule_set_version: The version of the web application firewall rule set type.
:type rule_set_version: str
:param rule_groups: The rule groups of the web application firewall rule set.
:type rule_groups:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFirewallRuleGroup]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'rule_set_type': {'key': 'properties.ruleSetType', 'type': 'str'},
'rule_set_version': {'key': 'properties.ruleSetVersion', 'type': 'str'},
'rule_groups': {'key': 'properties.ruleGroups', 'type': '[ApplicationGatewayFirewallRuleGroup]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayFirewallRuleSet, self).__init__(**kwargs)
self.provisioning_state = None
self.rule_set_type = kwargs.get('rule_set_type', None)
self.rule_set_version = kwargs.get('rule_set_version', None)
self.rule_groups = kwargs.get('rule_groups', None)
class ApplicationGatewayFrontendIPConfiguration(SubResource):
"""Frontend IP configuration of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the frontend IP configuration that is unique within an Application
Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param private_ip_address: PrivateIPAddress of the network interface IP Configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP address allocation method. Possible values
include: "Static", "Dynamic".
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
:param subnet: Reference to the subnet resource.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param public_ip_address: Reference to the PublicIP resource.
:type public_ip_address: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the frontend IP configuration resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayFrontendIPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.private_ip_address = kwargs.get('private_ip_address', None)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.provisioning_state = None
class ApplicationGatewayFrontendPort(SubResource):
"""Frontend port of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the frontend port that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param port: Frontend port.
:type port: int
:ivar provisioning_state: The provisioning state of the frontend port resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayFrontendPort, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.port = kwargs.get('port', None)
self.provisioning_state = None
class ApplicationGatewayHeaderConfiguration(msrest.serialization.Model):
"""Header configuration of the Actions set in Application Gateway.
:param header_name: Header name of the header configuration.
:type header_name: str
:param header_value: Header value of the header configuration.
:type header_value: str
"""
_attribute_map = {
'header_name': {'key': 'headerName', 'type': 'str'},
'header_value': {'key': 'headerValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayHeaderConfiguration, self).__init__(**kwargs)
self.header_name = kwargs.get('header_name', None)
self.header_value = kwargs.get('header_value', None)
class ApplicationGatewayHttpListener(SubResource):
"""Http listener of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the HTTP listener that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param frontend_ip_configuration: Frontend IP configuration resource of an application gateway.
:type frontend_ip_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param frontend_port: Frontend port resource of an application gateway.
:type frontend_port: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param protocol: Protocol of the HTTP listener. Possible values include: "Http", "Https".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProtocol
:param host_name: Host name of HTTP listener.
:type host_name: str
:param ssl_certificate: SSL certificate resource of an application gateway.
:type ssl_certificate: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param require_server_name_indication: Applicable only if protocol is https. Enables SNI for
multi-hosting.
:type require_server_name_indication: bool
:ivar provisioning_state: The provisioning state of the HTTP listener resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param custom_error_configurations: Custom error configurations of the HTTP listener.
:type custom_error_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayCustomError]
:param firewall_policy: Reference to the FirewallPolicy resource.
:type firewall_policy: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param host_names: List of Host names for HTTP Listener that allows special wildcard characters
as well.
:type host_names: list[str]
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'},
'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'custom_error_configurations': {'key': 'properties.customErrorConfigurations', 'type': '[ApplicationGatewayCustomError]'},
'firewall_policy': {'key': 'properties.firewallPolicy', 'type': 'SubResource'},
'host_names': {'key': 'properties.hostNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayHttpListener, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None)
self.frontend_port = kwargs.get('frontend_port', None)
self.protocol = kwargs.get('protocol', None)
self.host_name = kwargs.get('host_name', None)
self.ssl_certificate = kwargs.get('ssl_certificate', None)
self.require_server_name_indication = kwargs.get('require_server_name_indication', None)
self.provisioning_state = None
self.custom_error_configurations = kwargs.get('custom_error_configurations', None)
self.firewall_policy = kwargs.get('firewall_policy', None)
self.host_names = kwargs.get('host_names', None)
class ApplicationGatewayIPConfiguration(SubResource):
"""IP configuration of an application gateway. Currently 1 public and 1 private IP configuration is allowed.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the IP configuration that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param subnet: Reference to the subnet resource. A subnet from where application gateway gets
its private address.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the application gateway IP configuration
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayIPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.subnet = kwargs.get('subnet', None)
self.provisioning_state = None
class ApplicationGatewayListResult(msrest.serialization.Model):
"""Response for ListApplicationGateways API service call.
:param value: List of an application gateways in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGateway]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationGateway]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ApplicationGatewayOnDemandProbe(msrest.serialization.Model):
"""Details of on demand test probe request.
:param protocol: The protocol used for the probe. Possible values include: "Http", "Https".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is sent to
:code:`<Protocol>`://:code:`<host>`::code:`<port>`:code:`<path>`.
:type path: str
:param timeout: The probe timeout in seconds. Probe marked as failed if valid response is not
received with this timeout period. Acceptable values are from 1 second to 86400 seconds.
:type timeout: int
:param pick_host_name_from_backend_http_settings: Whether the host header should be picked from
the backend http settings. Default value is false.
:type pick_host_name_from_backend_http_settings: bool
:param match: Criterion for classifying a healthy probe response.
:type match: ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProbeHealthResponseMatch
:param backend_address_pool: Reference to backend pool of application gateway to which probe
request will be sent.
:type backend_address_pool: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param backend_http_settings: Reference to backend http setting of application gateway to be
used for test probe.
:type backend_http_settings: ~azure.mgmt.network.v2020_04_01.models.SubResource
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'host': {'key': 'host', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'timeout': {'key': 'timeout', 'type': 'int'},
'pick_host_name_from_backend_http_settings': {'key': 'pickHostNameFromBackendHttpSettings', 'type': 'bool'},
'match': {'key': 'match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'},
'backend_address_pool': {'key': 'backendAddressPool', 'type': 'SubResource'},
'backend_http_settings': {'key': 'backendHttpSettings', 'type': 'SubResource'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayOnDemandProbe, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.host = kwargs.get('host', None)
self.path = kwargs.get('path', None)
self.timeout = kwargs.get('timeout', None)
self.pick_host_name_from_backend_http_settings = kwargs.get('pick_host_name_from_backend_http_settings', None)
self.match = kwargs.get('match', None)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.backend_http_settings = kwargs.get('backend_http_settings', None)
class ApplicationGatewayPathRule(SubResource):
"""Path rule of URL path map of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the path rule that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param paths: Path rules of URL path map.
:type paths: list[str]
:param backend_address_pool: Backend address pool resource of URL path map path rule.
:type backend_address_pool: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param backend_http_settings: Backend http settings resource of URL path map path rule.
:type backend_http_settings: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param redirect_configuration: Redirect configuration resource of URL path map path rule.
:type redirect_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param rewrite_rule_set: Rewrite rule set resource of URL path map path rule.
:type rewrite_rule_set: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the path rule resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param firewall_policy: Reference to the FirewallPolicy resource.
:type firewall_policy: ~azure.mgmt.network.v2020_04_01.models.SubResource
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'paths': {'key': 'properties.paths', 'type': '[str]'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'},
'redirect_configuration': {'key': 'properties.redirectConfiguration', 'type': 'SubResource'},
'rewrite_rule_set': {'key': 'properties.rewriteRuleSet', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'firewall_policy': {'key': 'properties.firewallPolicy', 'type': 'SubResource'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayPathRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.paths = kwargs.get('paths', None)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.backend_http_settings = kwargs.get('backend_http_settings', None)
self.redirect_configuration = kwargs.get('redirect_configuration', None)
self.rewrite_rule_set = kwargs.get('rewrite_rule_set', None)
self.provisioning_state = None
self.firewall_policy = kwargs.get('firewall_policy', None)
class ApplicationGatewayProbe(SubResource):
"""Probe of the application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the probe that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param protocol: The protocol used for the probe. Possible values include: "Http", "Https".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProtocol
:param host: Host name to send the probe to.
:type host: str
:param path: Relative path of probe. Valid path starts from '/'. Probe is sent to
:code:`<Protocol>`://:code:`<host>`::code:`<port>`:code:`<path>`.
:type path: str
:param interval: The probing interval in seconds. This is the time interval between two
consecutive probes. Acceptable values are from 1 second to 86400 seconds.
:type interval: int
:param timeout: The probe timeout in seconds. Probe marked as failed if valid response is not
received with this timeout period. Acceptable values are from 1 second to 86400 seconds.
:type timeout: int
:param unhealthy_threshold: The probe retry count. Backend server is marked down after
consecutive probe failure count reaches UnhealthyThreshold. Acceptable values are from 1 second
to 20.
:type unhealthy_threshold: int
:param pick_host_name_from_backend_http_settings: Whether the host header should be picked from
the backend http settings. Default value is false.
:type pick_host_name_from_backend_http_settings: bool
:param min_servers: Minimum number of servers that are always marked healthy. Default value is
0.
:type min_servers: int
:param match: Criterion for classifying a healthy probe response.
:type match: ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayProbeHealthResponseMatch
:ivar provisioning_state: The provisioning state of the probe resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param port: Custom port which will be used for probing the backend servers. The valid value
ranges from 1 to 65535. In case not set, port from http settings will be used. This property is
valid for Standard_v2 and WAF_v2 only.
:type port: int
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'port': {'maximum': 65535, 'minimum': 1},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host': {'key': 'properties.host', 'type': 'str'},
'path': {'key': 'properties.path', 'type': 'str'},
'interval': {'key': 'properties.interval', 'type': 'int'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'unhealthy_threshold': {'key': 'properties.unhealthyThreshold', 'type': 'int'},
'pick_host_name_from_backend_http_settings': {'key': 'properties.pickHostNameFromBackendHttpSettings', 'type': 'bool'},
'min_servers': {'key': 'properties.minServers', 'type': 'int'},
'match': {'key': 'properties.match', 'type': 'ApplicationGatewayProbeHealthResponseMatch'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayProbe, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.protocol = kwargs.get('protocol', None)
self.host = kwargs.get('host', None)
self.path = kwargs.get('path', None)
self.interval = kwargs.get('interval', None)
self.timeout = kwargs.get('timeout', None)
self.unhealthy_threshold = kwargs.get('unhealthy_threshold', None)
self.pick_host_name_from_backend_http_settings = kwargs.get('pick_host_name_from_backend_http_settings', None)
self.min_servers = kwargs.get('min_servers', None)
self.match = kwargs.get('match', None)
self.provisioning_state = None
self.port = kwargs.get('port', None)
class ApplicationGatewayProbeHealthResponseMatch(msrest.serialization.Model):
"""Application gateway probe health response match.
:param body: Body that must be contained in the health response. Default value is empty.
:type body: str
:param status_codes: Allowed ranges of healthy status codes. Default range of healthy status
codes is 200-399.
:type status_codes: list[str]
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'status_codes': {'key': 'statusCodes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayProbeHealthResponseMatch, self).__init__(**kwargs)
self.body = kwargs.get('body', None)
self.status_codes = kwargs.get('status_codes', None)
class ApplicationGatewayRedirectConfiguration(SubResource):
"""Redirect configuration of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the redirect configuration that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param redirect_type: HTTP redirection type. Possible values include: "Permanent", "Found",
"SeeOther", "Temporary".
:type redirect_type: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRedirectType
:param target_listener: Reference to a listener to redirect the request to.
:type target_listener: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param target_url: Url to redirect the request to.
:type target_url: str
:param include_path: Include path in the redirected url.
:type include_path: bool
:param include_query_string: Include query string in the redirected url.
:type include_query_string: bool
:param request_routing_rules: Request routing specifying redirect configuration.
:type request_routing_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param url_path_maps: Url path maps specifying default redirect configuration.
:type url_path_maps: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param path_rules: Path rules specifying redirect configuration.
:type path_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'redirect_type': {'key': 'properties.redirectType', 'type': 'str'},
'target_listener': {'key': 'properties.targetListener', 'type': 'SubResource'},
'target_url': {'key': 'properties.targetUrl', 'type': 'str'},
'include_path': {'key': 'properties.includePath', 'type': 'bool'},
'include_query_string': {'key': 'properties.includeQueryString', 'type': 'bool'},
'request_routing_rules': {'key': 'properties.requestRoutingRules', 'type': '[SubResource]'},
'url_path_maps': {'key': 'properties.urlPathMaps', 'type': '[SubResource]'},
'path_rules': {'key': 'properties.pathRules', 'type': '[SubResource]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayRedirectConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.redirect_type = kwargs.get('redirect_type', None)
self.target_listener = kwargs.get('target_listener', None)
self.target_url = kwargs.get('target_url', None)
self.include_path = kwargs.get('include_path', None)
self.include_query_string = kwargs.get('include_query_string', None)
self.request_routing_rules = kwargs.get('request_routing_rules', None)
self.url_path_maps = kwargs.get('url_path_maps', None)
self.path_rules = kwargs.get('path_rules', None)
class ApplicationGatewayRequestRoutingRule(SubResource):
"""Request routing rule of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the request routing rule that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param rule_type: Rule type. Possible values include: "Basic", "PathBasedRouting".
:type rule_type: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRequestRoutingRuleType
:param priority: Priority of the request routing rule.
:type priority: int
:param backend_address_pool: Backend address pool resource of the application gateway.
:type backend_address_pool: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param backend_http_settings: Backend http settings resource of the application gateway.
:type backend_http_settings: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param http_listener: Http listener resource of the application gateway.
:type http_listener: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param url_path_map: URL path map resource of the application gateway.
:type url_path_map: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param rewrite_rule_set: Rewrite Rule Set resource in Basic rule of the application gateway.
:type rewrite_rule_set: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param redirect_configuration: Redirect configuration resource of the application gateway.
:type redirect_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the request routing rule resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'priority': {'maximum': 20000, 'minimum': 1},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'rule_type': {'key': 'properties.ruleType', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'},
'http_listener': {'key': 'properties.httpListener', 'type': 'SubResource'},
'url_path_map': {'key': 'properties.urlPathMap', 'type': 'SubResource'},
'rewrite_rule_set': {'key': 'properties.rewriteRuleSet', 'type': 'SubResource'},
'redirect_configuration': {'key': 'properties.redirectConfiguration', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayRequestRoutingRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.rule_type = kwargs.get('rule_type', None)
self.priority = kwargs.get('priority', None)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.backend_http_settings = kwargs.get('backend_http_settings', None)
self.http_listener = kwargs.get('http_listener', None)
self.url_path_map = kwargs.get('url_path_map', None)
self.rewrite_rule_set = kwargs.get('rewrite_rule_set', None)
self.redirect_configuration = kwargs.get('redirect_configuration', None)
self.provisioning_state = None
class ApplicationGatewayRewriteRule(msrest.serialization.Model):
"""Rewrite rule of an application gateway.
:param name: Name of the rewrite rule that is unique within an Application Gateway.
:type name: str
:param rule_sequence: Rule Sequence of the rewrite rule that determines the order of execution
of a particular rule in a RewriteRuleSet.
:type rule_sequence: int
:param conditions: Conditions based on which the action set execution will be evaluated.
:type conditions:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRewriteRuleCondition]
:param action_set: Set of actions to be done as part of the rewrite Rule.
:type action_set: ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRewriteRuleActionSet
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'rule_sequence': {'key': 'ruleSequence', 'type': 'int'},
'conditions': {'key': 'conditions', 'type': '[ApplicationGatewayRewriteRuleCondition]'},
'action_set': {'key': 'actionSet', 'type': 'ApplicationGatewayRewriteRuleActionSet'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayRewriteRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.rule_sequence = kwargs.get('rule_sequence', None)
self.conditions = kwargs.get('conditions', None)
self.action_set = kwargs.get('action_set', None)
class ApplicationGatewayRewriteRuleActionSet(msrest.serialization.Model):
"""Set of actions in the Rewrite Rule in Application Gateway.
:param request_header_configurations: Request Header Actions in the Action Set.
:type request_header_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayHeaderConfiguration]
:param response_header_configurations: Response Header Actions in the Action Set.
:type response_header_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayHeaderConfiguration]
:param url_configuration: Url Configuration Action in the Action Set.
:type url_configuration:
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayUrlConfiguration
"""
_attribute_map = {
'request_header_configurations': {'key': 'requestHeaderConfigurations', 'type': '[ApplicationGatewayHeaderConfiguration]'},
'response_header_configurations': {'key': 'responseHeaderConfigurations', 'type': '[ApplicationGatewayHeaderConfiguration]'},
'url_configuration': {'key': 'urlConfiguration', 'type': 'ApplicationGatewayUrlConfiguration'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayRewriteRuleActionSet, self).__init__(**kwargs)
self.request_header_configurations = kwargs.get('request_header_configurations', None)
self.response_header_configurations = kwargs.get('response_header_configurations', None)
self.url_configuration = kwargs.get('url_configuration', None)
class ApplicationGatewayRewriteRuleCondition(msrest.serialization.Model):
"""Set of conditions in the Rewrite Rule in Application Gateway.
:param variable: The condition parameter of the RewriteRuleCondition.
:type variable: str
:param pattern: The pattern, either fixed string or regular expression, that evaluates the
truthfulness of the condition.
:type pattern: str
:param ignore_case: Setting this parameter to truth value with force the pattern to do a case
in-sensitive comparison.
:type ignore_case: bool
:param negate: Setting this value as truth will force to check the negation of the condition
given by the user.
:type negate: bool
"""
_attribute_map = {
'variable': {'key': 'variable', 'type': 'str'},
'pattern': {'key': 'pattern', 'type': 'str'},
'ignore_case': {'key': 'ignoreCase', 'type': 'bool'},
'negate': {'key': 'negate', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayRewriteRuleCondition, self).__init__(**kwargs)
self.variable = kwargs.get('variable', None)
self.pattern = kwargs.get('pattern', None)
self.ignore_case = kwargs.get('ignore_case', None)
self.negate = kwargs.get('negate', None)
class ApplicationGatewayRewriteRuleSet(SubResource):
"""Rewrite rule set of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the rewrite rule set that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param rewrite_rules: Rewrite rules in the rewrite rule set.
:type rewrite_rules: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayRewriteRule]
:ivar provisioning_state: The provisioning state of the rewrite rule set resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'rewrite_rules': {'key': 'properties.rewriteRules', 'type': '[ApplicationGatewayRewriteRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayRewriteRuleSet, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.rewrite_rules = kwargs.get('rewrite_rules', None)
self.provisioning_state = None
class ApplicationGatewaySku(msrest.serialization.Model):
"""SKU of an application gateway.
:param name: Name of an application gateway SKU. Possible values include: "Standard_Small",
"Standard_Medium", "Standard_Large", "WAF_Medium", "WAF_Large", "Standard_v2", "WAF_v2".
:type name: str or ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySkuName
:param tier: Tier of an application gateway. Possible values include: "Standard", "WAF",
"Standard_v2", "WAF_v2".
:type tier: str or ~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayTier
:param capacity: Capacity (instance count) of an application gateway.
:type capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewaySku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.capacity = kwargs.get('capacity', None)
class ApplicationGatewaySslCertificate(SubResource):
"""SSL certificates of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the SSL certificate that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param data: Base-64 encoded pfx certificate. Only applicable in PUT Request.
:type data: str
:param password: Password for the pfx file specified in data. Only applicable in PUT request.
:type password: str
:ivar public_cert_data: Base-64 encoded Public cert data corresponding to pfx specified in
data. Only applicable in GET request.
:vartype public_cert_data: str
:param key_vault_secret_id: Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or
'Certificate' object stored in KeyVault.
:type key_vault_secret_id: str
:ivar provisioning_state: The provisioning state of the SSL certificate resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'public_cert_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'data': {'key': 'properties.data', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'public_cert_data': {'key': 'properties.publicCertData', 'type': 'str'},
'key_vault_secret_id': {'key': 'properties.keyVaultSecretId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewaySslCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.data = kwargs.get('data', None)
self.password = kwargs.get('password', None)
self.public_cert_data = None
self.key_vault_secret_id = kwargs.get('key_vault_secret_id', None)
self.provisioning_state = None
class ApplicationGatewaySslPolicy(msrest.serialization.Model):
"""Application Gateway Ssl policy.
:param disabled_ssl_protocols: Ssl protocols to be disabled on application gateway.
:type disabled_ssl_protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslProtocol]
:param policy_type: Type of Ssl Policy. Possible values include: "Predefined", "Custom".
:type policy_type: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslPolicyType
:param policy_name: Name of Ssl predefined policy. Possible values include:
"AppGwSslPolicy20150501", "AppGwSslPolicy20170401", "AppGwSslPolicy20170401S".
:type policy_name: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslPolicyName
:param cipher_suites: Ssl cipher suites to be enabled in the specified order to application
gateway.
:type cipher_suites: list[str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslCipherSuite]
:param min_protocol_version: Minimum version of Ssl protocol to be supported on application
gateway. Possible values include: "TLSv1_0", "TLSv1_1", "TLSv1_2".
:type min_protocol_version: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslProtocol
"""
_attribute_map = {
'disabled_ssl_protocols': {'key': 'disabledSslProtocols', 'type': '[str]'},
'policy_type': {'key': 'policyType', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'},
'cipher_suites': {'key': 'cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'minProtocolVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewaySslPolicy, self).__init__(**kwargs)
self.disabled_ssl_protocols = kwargs.get('disabled_ssl_protocols', None)
self.policy_type = kwargs.get('policy_type', None)
self.policy_name = kwargs.get('policy_name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
class ApplicationGatewaySslPredefinedPolicy(SubResource):
"""An Ssl predefined policy.
:param id: Resource ID.
:type id: str
:param name: Name of the Ssl predefined policy.
:type name: str
:param cipher_suites: Ssl cipher suites to be enabled in the specified order for application
gateway.
:type cipher_suites: list[str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslCipherSuite]
:param min_protocol_version: Minimum version of Ssl protocol to be supported on application
gateway. Possible values include: "TLSv1_0", "TLSv1_1", "TLSv1_2".
:type min_protocol_version: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewaySslProtocol
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewaySslPredefinedPolicy, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
class ApplicationGatewayTrustedRootCertificate(SubResource):
"""Trusted Root certificates of an application gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the trusted root certificate that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param data: Certificate public data.
:type data: str
:param key_vault_secret_id: Secret Id of (base-64 encoded unencrypted pfx) 'Secret' or
'Certificate' object stored in KeyVault.
:type key_vault_secret_id: str
:ivar provisioning_state: The provisioning state of the trusted root certificate resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'data': {'key': 'properties.data', 'type': 'str'},
'key_vault_secret_id': {'key': 'properties.keyVaultSecretId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayTrustedRootCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.data = kwargs.get('data', None)
self.key_vault_secret_id = kwargs.get('key_vault_secret_id', None)
self.provisioning_state = None
class ApplicationGatewayUrlConfiguration(msrest.serialization.Model):
"""Url configuration of the Actions set in Application Gateway.
:param modified_path: Url path which user has provided for url rewrite. Null means no path will
be updated. Default value is null.
:type modified_path: str
:param modified_query_string: Query string which user has provided for url rewrite. Null means
no query string will be updated. Default value is null.
:type modified_query_string: str
:param reroute: If set as true, it will re-evaluate the url path map provided in path based
request routing rules using modified path. Default value is false.
:type reroute: bool
"""
_attribute_map = {
'modified_path': {'key': 'modifiedPath', 'type': 'str'},
'modified_query_string': {'key': 'modifiedQueryString', 'type': 'str'},
'reroute': {'key': 'reroute', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayUrlConfiguration, self).__init__(**kwargs)
self.modified_path = kwargs.get('modified_path', None)
self.modified_query_string = kwargs.get('modified_query_string', None)
self.reroute = kwargs.get('reroute', None)
class ApplicationGatewayUrlPathMap(SubResource):
"""UrlPathMaps give a url path to the backend mapping information for PathBasedRouting.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the URL path map that is unique within an Application Gateway.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param default_backend_address_pool: Default backend address pool resource of URL path map.
:type default_backend_address_pool: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param default_backend_http_settings: Default backend http settings resource of URL path map.
:type default_backend_http_settings: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param default_rewrite_rule_set: Default Rewrite rule set resource of URL path map.
:type default_rewrite_rule_set: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param default_redirect_configuration: Default redirect configuration resource of URL path map.
:type default_redirect_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param path_rules: Path rule of URL path map resource.
:type path_rules: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayPathRule]
:ivar provisioning_state: The provisioning state of the URL path map resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'default_rewrite_rule_set': {'key': 'properties.defaultRewriteRuleSet', 'type': 'SubResource'},
'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayUrlPathMap, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.default_backend_address_pool = kwargs.get('default_backend_address_pool', None)
self.default_backend_http_settings = kwargs.get('default_backend_http_settings', None)
self.default_rewrite_rule_set = kwargs.get('default_rewrite_rule_set', None)
self.default_redirect_configuration = kwargs.get('default_redirect_configuration', None)
self.path_rules = kwargs.get('path_rules', None)
self.provisioning_state = None
class ApplicationGatewayWebApplicationFirewallConfiguration(msrest.serialization.Model):
"""Application gateway web application firewall configuration.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the web application firewall is enabled or not.
:type enabled: bool
:param firewall_mode: Required. Web application firewall mode. Possible values include:
"Detection", "Prevention".
:type firewall_mode: str or
~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFirewallMode
:param rule_set_type: Required. The type of the web application firewall rule set. Possible
values are: 'OWASP'.
:type rule_set_type: str
:param rule_set_version: Required. The version of the rule set type.
:type rule_set_version: str
:param disabled_rule_groups: The disabled rule groups.
:type disabled_rule_groups:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFirewallDisabledRuleGroup]
:param request_body_check: Whether allow WAF to check request Body.
:type request_body_check: bool
:param max_request_body_size: Maximum request body size for WAF.
:type max_request_body_size: int
:param max_request_body_size_in_kb: Maximum request body size in Kb for WAF.
:type max_request_body_size_in_kb: int
:param file_upload_limit_in_mb: Maximum file upload size in Mb for WAF.
:type file_upload_limit_in_mb: int
:param exclusions: The exclusion list.
:type exclusions:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayFirewallExclusion]
"""
_validation = {
'enabled': {'required': True},
'firewall_mode': {'required': True},
'rule_set_type': {'required': True},
'rule_set_version': {'required': True},
'max_request_body_size': {'maximum': 128, 'minimum': 8},
'max_request_body_size_in_kb': {'maximum': 128, 'minimum': 8},
'file_upload_limit_in_mb': {'minimum': 0},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'firewall_mode': {'key': 'firewallMode', 'type': 'str'},
'rule_set_type': {'key': 'ruleSetType', 'type': 'str'},
'rule_set_version': {'key': 'ruleSetVersion', 'type': 'str'},
'disabled_rule_groups': {'key': 'disabledRuleGroups', 'type': '[ApplicationGatewayFirewallDisabledRuleGroup]'},
'request_body_check': {'key': 'requestBodyCheck', 'type': 'bool'},
'max_request_body_size': {'key': 'maxRequestBodySize', 'type': 'int'},
'max_request_body_size_in_kb': {'key': 'maxRequestBodySizeInKb', 'type': 'int'},
'file_upload_limit_in_mb': {'key': 'fileUploadLimitInMb', 'type': 'int'},
'exclusions': {'key': 'exclusions', 'type': '[ApplicationGatewayFirewallExclusion]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationGatewayWebApplicationFirewallConfiguration, self).__init__(**kwargs)
self.enabled = kwargs['enabled']
self.firewall_mode = kwargs['firewall_mode']
self.rule_set_type = kwargs['rule_set_type']
self.rule_set_version = kwargs['rule_set_version']
self.disabled_rule_groups = kwargs.get('disabled_rule_groups', None)
self.request_body_check = kwargs.get('request_body_check', None)
self.max_request_body_size = kwargs.get('max_request_body_size', None)
self.max_request_body_size_in_kb = kwargs.get('max_request_body_size_in_kb', None)
self.file_upload_limit_in_mb = kwargs.get('file_upload_limit_in_mb', None)
self.exclusions = kwargs.get('exclusions', None)
class FirewallPolicyRuleCondition(msrest.serialization.Model):
"""Properties of a rule.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ApplicationRuleCondition, NatRuleCondition, NetworkRuleCondition.
All required parameters must be populated in order to send to Azure.
:param name: Name of the rule condition.
:type name: str
:param description: Description of the rule condition.
:type description: str
:param rule_condition_type: Required. Rule Condition Type.Constant filled by server. Possible
values include: "ApplicationRuleCondition", "NetworkRuleCondition", "NatRuleCondition".
:type rule_condition_type: str or
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionType
"""
_validation = {
'rule_condition_type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'rule_condition_type': {'key': 'ruleConditionType', 'type': 'str'},
}
_subtype_map = {
'rule_condition_type': {'ApplicationRuleCondition': 'ApplicationRuleCondition', 'NatRuleCondition': 'NatRuleCondition', 'NetworkRuleCondition': 'NetworkRuleCondition'}
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyRuleCondition, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.rule_condition_type = None # type: Optional[str]
class ApplicationRuleCondition(FirewallPolicyRuleCondition):
"""Rule condition of type application.
All required parameters must be populated in order to send to Azure.
:param name: Name of the rule condition.
:type name: str
:param description: Description of the rule condition.
:type description: str
:param rule_condition_type: Required. Rule Condition Type.Constant filled by server. Possible
values include: "ApplicationRuleCondition", "NetworkRuleCondition", "NatRuleCondition".
:type rule_condition_type: str or
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionType
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param destination_addresses: List of destination IP addresses or Service Tags.
:type destination_addresses: list[str]
:param protocols: Array of Application Protocols.
:type protocols:
list[~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionApplicationProtocol]
:param target_fqdns: List of FQDNs for this rule condition.
:type target_fqdns: list[str]
:param fqdn_tags: List of FQDN Tags for this rule condition.
:type fqdn_tags: list[str]
:param source_ip_groups: List of source IpGroups for this rule.
:type source_ip_groups: list[str]
"""
_validation = {
'rule_condition_type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'rule_condition_type': {'key': 'ruleConditionType', 'type': 'str'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'protocols': {'key': 'protocols', 'type': '[FirewallPolicyRuleConditionApplicationProtocol]'},
'target_fqdns': {'key': 'targetFqdns', 'type': '[str]'},
'fqdn_tags': {'key': 'fqdnTags', 'type': '[str]'},
'source_ip_groups': {'key': 'sourceIpGroups', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ApplicationRuleCondition, self).__init__(**kwargs)
self.rule_condition_type = 'ApplicationRuleCondition' # type: str
self.source_addresses = kwargs.get('source_addresses', None)
self.destination_addresses = kwargs.get('destination_addresses', None)
self.protocols = kwargs.get('protocols', None)
self.target_fqdns = kwargs.get('target_fqdns', None)
self.fqdn_tags = kwargs.get('fqdn_tags', None)
self.source_ip_groups = kwargs.get('source_ip_groups', None)
class ApplicationSecurityGroup(Resource):
"""An application security group in a resource group.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar resource_guid: The resource GUID property of the application security group resource. It
uniquely identifies a resource, even if the user changes its name or migrate the resource
across subscriptions or resource groups.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the application security group resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationSecurityGroup, self).__init__(**kwargs)
self.etag = None
self.resource_guid = None
self.provisioning_state = None
class ApplicationSecurityGroupListResult(msrest.serialization.Model):
"""A list of application security groups.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of application security groups.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ApplicationSecurityGroup]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ApplicationSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplicationSecurityGroupListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class AuthorizationListResult(msrest.serialization.Model):
"""Response for ListAuthorizations API service call retrieves all authorizations that belongs to an ExpressRouteCircuit.
:param value: The authorizations in an ExpressRoute Circuit.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitAuthorization]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitAuthorization]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AuthorizationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AutoApprovedPrivateLinkService(msrest.serialization.Model):
"""The information of an AutoApprovedPrivateLinkService.
:param private_link_service: The id of the private link service resource.
:type private_link_service: str
"""
_attribute_map = {
'private_link_service': {'key': 'privateLinkService', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AutoApprovedPrivateLinkService, self).__init__(**kwargs)
self.private_link_service = kwargs.get('private_link_service', None)
class AutoApprovedPrivateLinkServicesResult(msrest.serialization.Model):
"""An array of private link service id that can be linked to a private end point with auto approved.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: An array of auto approved private link service.
:type value: list[~azure.mgmt.network.v2020_04_01.models.AutoApprovedPrivateLinkService]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AutoApprovedPrivateLinkService]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AutoApprovedPrivateLinkServicesResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class Availability(msrest.serialization.Model):
"""Availability of the metric.
:param time_grain: The time grain of the availability.
:type time_grain: str
:param retention: The retention of the availability.
:type retention: str
:param blob_duration: Duration of the availability blob.
:type blob_duration: str
"""
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'str'},
'retention': {'key': 'retention', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Availability, self).__init__(**kwargs)
self.time_grain = kwargs.get('time_grain', None)
self.retention = kwargs.get('retention', None)
self.blob_duration = kwargs.get('blob_duration', None)
class AvailableDelegation(msrest.serialization.Model):
"""The serviceName of an AvailableDelegation indicates a possible delegation for a subnet.
:param name: The name of the AvailableDelegation resource.
:type name: str
:param id: A unique identifier of the AvailableDelegation resource.
:type id: str
:param type: Resource type.
:type type: str
:param service_name: The name of the service and resource.
:type service_name: str
:param actions: The actions permitted to the service upon delegation.
:type actions: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'service_name': {'key': 'serviceName', 'type': 'str'},
'actions': {'key': 'actions', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AvailableDelegation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', None)
self.service_name = kwargs.get('service_name', None)
self.actions = kwargs.get('actions', None)
class AvailableDelegationsResult(msrest.serialization.Model):
"""An array of available delegations.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: An array of available delegations.
:type value: list[~azure.mgmt.network.v2020_04_01.models.AvailableDelegation]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AvailableDelegation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailableDelegationsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class AvailablePrivateEndpointType(msrest.serialization.Model):
"""The information of an AvailablePrivateEndpointType.
:param name: The name of the service and resource.
:type name: str
:param id: A unique identifier of the AvailablePrivateEndpoint Type resource.
:type id: str
:param type: Resource type.
:type type: str
:param resource_name: The name of the service and resource.
:type resource_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailablePrivateEndpointType, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', None)
self.resource_name = kwargs.get('resource_name', None)
class AvailablePrivateEndpointTypesResult(msrest.serialization.Model):
"""An array of available PrivateEndpoint types.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: An array of available privateEndpoint type.
:type value: list[~azure.mgmt.network.v2020_04_01.models.AvailablePrivateEndpointType]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AvailablePrivateEndpointType]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailablePrivateEndpointTypesResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class AvailableProvidersList(msrest.serialization.Model):
"""List of available countries with details.
All required parameters must be populated in order to send to Azure.
:param countries: Required. List of available countries.
:type countries: list[~azure.mgmt.network.v2020_04_01.models.AvailableProvidersListCountry]
"""
_validation = {
'countries': {'required': True},
}
_attribute_map = {
'countries': {'key': 'countries', 'type': '[AvailableProvidersListCountry]'},
}
def __init__(
self,
**kwargs
):
super(AvailableProvidersList, self).__init__(**kwargs)
self.countries = kwargs['countries']
class AvailableProvidersListCity(msrest.serialization.Model):
"""City or town details.
:param city_name: The city or town name.
:type city_name: str
:param providers: A list of Internet service providers.
:type providers: list[str]
"""
_attribute_map = {
'city_name': {'key': 'cityName', 'type': 'str'},
'providers': {'key': 'providers', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AvailableProvidersListCity, self).__init__(**kwargs)
self.city_name = kwargs.get('city_name', None)
self.providers = kwargs.get('providers', None)
class AvailableProvidersListCountry(msrest.serialization.Model):
"""Country details.
:param country_name: The country name.
:type country_name: str
:param providers: A list of Internet service providers.
:type providers: list[str]
:param states: List of available states in the country.
:type states: list[~azure.mgmt.network.v2020_04_01.models.AvailableProvidersListState]
"""
_attribute_map = {
'country_name': {'key': 'countryName', 'type': 'str'},
'providers': {'key': 'providers', 'type': '[str]'},
'states': {'key': 'states', 'type': '[AvailableProvidersListState]'},
}
def __init__(
self,
**kwargs
):
super(AvailableProvidersListCountry, self).__init__(**kwargs)
self.country_name = kwargs.get('country_name', None)
self.providers = kwargs.get('providers', None)
self.states = kwargs.get('states', None)
class AvailableProvidersListParameters(msrest.serialization.Model):
"""Constraints that determine the list of available Internet service providers.
:param azure_locations: A list of Azure regions.
:type azure_locations: list[str]
:param country: The country for available providers list.
:type country: str
:param state: The state for available providers list.
:type state: str
:param city: The city or town for available providers list.
:type city: str
"""
_attribute_map = {
'azure_locations': {'key': 'azureLocations', 'type': '[str]'},
'country': {'key': 'country', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailableProvidersListParameters, self).__init__(**kwargs)
self.azure_locations = kwargs.get('azure_locations', None)
self.country = kwargs.get('country', None)
self.state = kwargs.get('state', None)
self.city = kwargs.get('city', None)
class AvailableProvidersListState(msrest.serialization.Model):
"""State details.
:param state_name: The state name.
:type state_name: str
:param providers: A list of Internet service providers.
:type providers: list[str]
:param cities: List of available cities or towns in the state.
:type cities: list[~azure.mgmt.network.v2020_04_01.models.AvailableProvidersListCity]
"""
_attribute_map = {
'state_name': {'key': 'stateName', 'type': 'str'},
'providers': {'key': 'providers', 'type': '[str]'},
'cities': {'key': 'cities', 'type': '[AvailableProvidersListCity]'},
}
def __init__(
self,
**kwargs
):
super(AvailableProvidersListState, self).__init__(**kwargs)
self.state_name = kwargs.get('state_name', None)
self.providers = kwargs.get('providers', None)
self.cities = kwargs.get('cities', None)
class AvailableServiceAlias(msrest.serialization.Model):
"""The available service alias.
:param name: The name of the service alias.
:type name: str
:param id: The ID of the service alias.
:type id: str
:param type: The type of the resource.
:type type: str
:param resource_name: The resource name of the service alias.
:type resource_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailableServiceAlias, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.type = kwargs.get('type', None)
self.resource_name = kwargs.get('resource_name', None)
class AvailableServiceAliasesResult(msrest.serialization.Model):
"""An array of available service aliases.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: An array of available service aliases.
:type value: list[~azure.mgmt.network.v2020_04_01.models.AvailableServiceAlias]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AvailableServiceAlias]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AvailableServiceAliasesResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class AzureAsyncOperationResult(msrest.serialization.Model):
"""The response body contains the status of the specified asynchronous operation, indicating whether it has succeeded, is in progress, or has failed. Note that this status is distinct from the HTTP status code returned for the Get Operation Status operation itself. If the asynchronous operation succeeded, the response body includes the HTTP status code for the successful request. If the asynchronous operation failed, the response body includes the HTTP status code for the failed request and error information regarding the failure.
:param status: Status of the Azure async operation. Possible values include: "InProgress",
"Succeeded", "Failed".
:type status: str or ~azure.mgmt.network.v2020_04_01.models.NetworkOperationStatus
:param error: Details of the error occurred during specified asynchronous operation.
:type error: ~azure.mgmt.network.v2020_04_01.models.Error
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(AzureAsyncOperationResult, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class AzureFirewall(Resource):
"""Azure Firewall resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param zones: A list of availability zones denoting where the resource needs to come from.
:type zones: list[str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param application_rule_collections: Collection of application rule collections used by Azure
Firewall.
:type application_rule_collections:
list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallApplicationRuleCollection]
:param nat_rule_collections: Collection of NAT rule collections used by Azure Firewall.
:type nat_rule_collections:
list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallNatRuleCollection]
:param network_rule_collections: Collection of network rule collections used by Azure Firewall.
:type network_rule_collections:
list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallNetworkRuleCollection]
:param ip_configurations: IP configuration of the Azure Firewall resource.
:type ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallIPConfiguration]
:param management_ip_configuration: IP configuration of the Azure Firewall used for management
traffic.
:type management_ip_configuration:
~azure.mgmt.network.v2020_04_01.models.AzureFirewallIPConfiguration
:ivar provisioning_state: The provisioning state of the Azure firewall resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param threat_intel_mode: The operation mode for Threat Intelligence. Possible values include:
"Alert", "Deny", "Off".
:type threat_intel_mode: str or
~azure.mgmt.network.v2020_04_01.models.AzureFirewallThreatIntelMode
:param virtual_hub: The virtualHub to which the firewall belongs.
:type virtual_hub: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param firewall_policy: The firewallPolicy associated with this azure firewall.
:type firewall_policy: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar hub_ip_addresses: IP addresses associated with AzureFirewall.
:vartype hub_ip_addresses: ~azure.mgmt.network.v2020_04_01.models.HubIPAddresses
:ivar ip_groups: IpGroups associated with AzureFirewall.
:vartype ip_groups: list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallIpGroups]
:param sku: The Azure Firewall Resource SKU.
:type sku: ~azure.mgmt.network.v2020_04_01.models.AzureFirewallSku
:param additional_properties: The additional properties used to further config this azure
firewall.
:type additional_properties: dict[str, str]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'hub_ip_addresses': {'readonly': True},
'ip_groups': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'zones': {'key': 'zones', 'type': '[str]'},
'etag': {'key': 'etag', 'type': 'str'},
'application_rule_collections': {'key': 'properties.applicationRuleCollections', 'type': '[AzureFirewallApplicationRuleCollection]'},
'nat_rule_collections': {'key': 'properties.natRuleCollections', 'type': '[AzureFirewallNatRuleCollection]'},
'network_rule_collections': {'key': 'properties.networkRuleCollections', 'type': '[AzureFirewallNetworkRuleCollection]'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[AzureFirewallIPConfiguration]'},
'management_ip_configuration': {'key': 'properties.managementIpConfiguration', 'type': 'AzureFirewallIPConfiguration'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'threat_intel_mode': {'key': 'properties.threatIntelMode', 'type': 'str'},
'virtual_hub': {'key': 'properties.virtualHub', 'type': 'SubResource'},
'firewall_policy': {'key': 'properties.firewallPolicy', 'type': 'SubResource'},
'hub_ip_addresses': {'key': 'properties.hubIpAddresses', 'type': 'HubIPAddresses'},
'ip_groups': {'key': 'properties.ipGroups', 'type': '[AzureFirewallIpGroups]'},
'sku': {'key': 'properties.sku', 'type': 'AzureFirewallSku'},
'additional_properties': {'key': 'properties.additionalProperties', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewall, self).__init__(**kwargs)
self.zones = kwargs.get('zones', None)
self.etag = None
self.application_rule_collections = kwargs.get('application_rule_collections', None)
self.nat_rule_collections = kwargs.get('nat_rule_collections', None)
self.network_rule_collections = kwargs.get('network_rule_collections', None)
self.ip_configurations = kwargs.get('ip_configurations', None)
self.management_ip_configuration = kwargs.get('management_ip_configuration', None)
self.provisioning_state = None
self.threat_intel_mode = kwargs.get('threat_intel_mode', None)
self.virtual_hub = kwargs.get('virtual_hub', None)
self.firewall_policy = kwargs.get('firewall_policy', None)
self.hub_ip_addresses = None
self.ip_groups = None
self.sku = kwargs.get('sku', None)
self.additional_properties = kwargs.get('additional_properties', None)
class AzureFirewallApplicationRule(msrest.serialization.Model):
"""Properties of an application rule.
:param name: Name of the application rule.
:type name: str
:param description: Description of the rule.
:type description: str
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param protocols: Array of ApplicationRuleProtocols.
:type protocols:
list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallApplicationRuleProtocol]
:param target_fqdns: List of FQDNs for this rule.
:type target_fqdns: list[str]
:param fqdn_tags: List of FQDN Tags for this rule.
:type fqdn_tags: list[str]
:param source_ip_groups: List of source IpGroups for this rule.
:type source_ip_groups: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'protocols': {'key': 'protocols', 'type': '[AzureFirewallApplicationRuleProtocol]'},
'target_fqdns': {'key': 'targetFqdns', 'type': '[str]'},
'fqdn_tags': {'key': 'fqdnTags', 'type': '[str]'},
'source_ip_groups': {'key': 'sourceIpGroups', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallApplicationRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.source_addresses = kwargs.get('source_addresses', None)
self.protocols = kwargs.get('protocols', None)
self.target_fqdns = kwargs.get('target_fqdns', None)
self.fqdn_tags = kwargs.get('fqdn_tags', None)
self.source_ip_groups = kwargs.get('source_ip_groups', None)
class AzureFirewallApplicationRuleCollection(SubResource):
"""Application rule collection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the Azure firewall. This name can
be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param priority: Priority of the application rule collection resource.
:type priority: int
:param action: The action type of a rule collection.
:type action: ~azure.mgmt.network.v2020_04_01.models.AzureFirewallRCAction
:param rules: Collection of rules used by a application rule collection.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallApplicationRule]
:ivar provisioning_state: The provisioning state of the application rule collection resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'priority': {'maximum': 65000, 'minimum': 100},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'action': {'key': 'properties.action', 'type': 'AzureFirewallRCAction'},
'rules': {'key': 'properties.rules', 'type': '[AzureFirewallApplicationRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallApplicationRuleCollection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.priority = kwargs.get('priority', None)
self.action = kwargs.get('action', None)
self.rules = kwargs.get('rules', None)
self.provisioning_state = None
class AzureFirewallApplicationRuleProtocol(msrest.serialization.Model):
"""Properties of the application rule protocol.
:param protocol_type: Protocol type. Possible values include: "Http", "Https", "Mssql".
:type protocol_type: str or
~azure.mgmt.network.v2020_04_01.models.AzureFirewallApplicationRuleProtocolType
:param port: Port number for the protocol, cannot be greater than 64000. This field is
optional.
:type port: int
"""
_validation = {
'port': {'maximum': 64000, 'minimum': 0},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallApplicationRuleProtocol, self).__init__(**kwargs)
self.protocol_type = kwargs.get('protocol_type', None)
self.port = kwargs.get('port', None)
class AzureFirewallFqdnTag(Resource):
"""Azure Firewall FQDN Tag Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar provisioning_state: The provisioning state of the Azure firewall FQDN tag resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar fqdn_tag_name: The name of this FQDN Tag.
:vartype fqdn_tag_name: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'fqdn_tag_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'fqdn_tag_name': {'key': 'properties.fqdnTagName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallFqdnTag, self).__init__(**kwargs)
self.etag = None
self.provisioning_state = None
self.fqdn_tag_name = None
class AzureFirewallFqdnTagListResult(msrest.serialization.Model):
"""Response for ListAzureFirewallFqdnTags API service call.
:param value: List of Azure Firewall FQDN Tags in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallFqdnTag]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AzureFirewallFqdnTag]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallFqdnTagListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AzureFirewallIPConfiguration(SubResource):
"""IP configuration of an Azure Firewall.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the resource that is unique within a resource group. This name can be used
to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:ivar private_ip_address: The Firewall Internal Load Balancer IP to be used as the next hop in
User Defined Routes.
:vartype private_ip_address: str
:param subnet: Reference to the subnet resource. This resource must be named
'AzureFirewallSubnet' or 'AzureFirewallManagementSubnet'.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param public_ip_address: Reference to the PublicIP resource. This field is a mandatory input
if subnet is not null.
:type public_ip_address: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the Azure firewall IP configuration
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'private_ip_address': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallIPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.private_ip_address = None
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.provisioning_state = None
class AzureFirewallIpGroups(msrest.serialization.Model):
"""IpGroups associated with azure firewall.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar change_number: The iteration number.
:vartype change_number: str
"""
_validation = {
'id': {'readonly': True},
'change_number': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'change_number': {'key': 'changeNumber', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallIpGroups, self).__init__(**kwargs)
self.id = None
self.change_number = None
class AzureFirewallListResult(msrest.serialization.Model):
"""Response for ListAzureFirewalls API service call.
:param value: List of Azure Firewalls in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.AzureFirewall]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AzureFirewall]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AzureFirewallNatRCAction(msrest.serialization.Model):
"""AzureFirewall NAT Rule Collection Action.
:param type: The type of action. Possible values include: "Snat", "Dnat".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.AzureFirewallNatRCActionType
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallNatRCAction, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
class AzureFirewallNatRule(msrest.serialization.Model):
"""Properties of a NAT rule.
:param name: Name of the NAT rule.
:type name: str
:param description: Description of the rule.
:type description: str
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param destination_addresses: List of destination IP addresses for this rule. Supports IP
ranges, prefixes, and service tags.
:type destination_addresses: list[str]
:param destination_ports: List of destination ports.
:type destination_ports: list[str]
:param protocols: Array of AzureFirewallNetworkRuleProtocols applicable to this NAT rule.
:type protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.AzureFirewallNetworkRuleProtocol]
:param translated_address: The translated address for this NAT rule.
:type translated_address: str
:param translated_port: The translated port for this NAT rule.
:type translated_port: str
:param translated_fqdn: The translated FQDN for this NAT rule.
:type translated_fqdn: str
:param source_ip_groups: List of source IpGroups for this rule.
:type source_ip_groups: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'destination_ports': {'key': 'destinationPorts', 'type': '[str]'},
'protocols': {'key': 'protocols', 'type': '[str]'},
'translated_address': {'key': 'translatedAddress', 'type': 'str'},
'translated_port': {'key': 'translatedPort', 'type': 'str'},
'translated_fqdn': {'key': 'translatedFqdn', 'type': 'str'},
'source_ip_groups': {'key': 'sourceIpGroups', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallNatRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.source_addresses = kwargs.get('source_addresses', None)
self.destination_addresses = kwargs.get('destination_addresses', None)<|fim▁hole|> self.translated_address = kwargs.get('translated_address', None)
self.translated_port = kwargs.get('translated_port', None)
self.translated_fqdn = kwargs.get('translated_fqdn', None)
self.source_ip_groups = kwargs.get('source_ip_groups', None)
class AzureFirewallNatRuleCollection(SubResource):
"""NAT rule collection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the Azure firewall. This name can
be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param priority: Priority of the NAT rule collection resource.
:type priority: int
:param action: The action type of a NAT rule collection.
:type action: ~azure.mgmt.network.v2020_04_01.models.AzureFirewallNatRCAction
:param rules: Collection of rules used by a NAT rule collection.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallNatRule]
:ivar provisioning_state: The provisioning state of the NAT rule collection resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'priority': {'maximum': 65000, 'minimum': 100},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'action': {'key': 'properties.action', 'type': 'AzureFirewallNatRCAction'},
'rules': {'key': 'properties.rules', 'type': '[AzureFirewallNatRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallNatRuleCollection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.priority = kwargs.get('priority', None)
self.action = kwargs.get('action', None)
self.rules = kwargs.get('rules', None)
self.provisioning_state = None
class AzureFirewallNetworkRule(msrest.serialization.Model):
"""Properties of the network rule.
:param name: Name of the network rule.
:type name: str
:param description: Description of the rule.
:type description: str
:param protocols: Array of AzureFirewallNetworkRuleProtocols.
:type protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.AzureFirewallNetworkRuleProtocol]
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param destination_addresses: List of destination IP addresses.
:type destination_addresses: list[str]
:param destination_ports: List of destination ports.
:type destination_ports: list[str]
:param destination_fqdns: List of destination FQDNs.
:type destination_fqdns: list[str]
:param source_ip_groups: List of source IpGroups for this rule.
:type source_ip_groups: list[str]
:param destination_ip_groups: List of destination IpGroups for this rule.
:type destination_ip_groups: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'protocols': {'key': 'protocols', 'type': '[str]'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'destination_ports': {'key': 'destinationPorts', 'type': '[str]'},
'destination_fqdns': {'key': 'destinationFqdns', 'type': '[str]'},
'source_ip_groups': {'key': 'sourceIpGroups', 'type': '[str]'},
'destination_ip_groups': {'key': 'destinationIpGroups', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallNetworkRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.protocols = kwargs.get('protocols', None)
self.source_addresses = kwargs.get('source_addresses', None)
self.destination_addresses = kwargs.get('destination_addresses', None)
self.destination_ports = kwargs.get('destination_ports', None)
self.destination_fqdns = kwargs.get('destination_fqdns', None)
self.source_ip_groups = kwargs.get('source_ip_groups', None)
self.destination_ip_groups = kwargs.get('destination_ip_groups', None)
class AzureFirewallNetworkRuleCollection(SubResource):
"""Network rule collection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the Azure firewall. This name can
be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param priority: Priority of the network rule collection resource.
:type priority: int
:param action: The action type of a rule collection.
:type action: ~azure.mgmt.network.v2020_04_01.models.AzureFirewallRCAction
:param rules: Collection of rules used by a network rule collection.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallNetworkRule]
:ivar provisioning_state: The provisioning state of the network rule collection resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'priority': {'maximum': 65000, 'minimum': 100},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'action': {'key': 'properties.action', 'type': 'AzureFirewallRCAction'},
'rules': {'key': 'properties.rules', 'type': '[AzureFirewallNetworkRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallNetworkRuleCollection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.priority = kwargs.get('priority', None)
self.action = kwargs.get('action', None)
self.rules = kwargs.get('rules', None)
self.provisioning_state = None
class AzureFirewallPublicIPAddress(msrest.serialization.Model):
"""Public IP Address associated with azure firewall.
:param address: Public IP Address value.
:type address: str
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallPublicIPAddress, self).__init__(**kwargs)
self.address = kwargs.get('address', None)
class AzureFirewallRCAction(msrest.serialization.Model):
"""Properties of the AzureFirewallRCAction.
:param type: The type of action. Possible values include: "Allow", "Deny".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.AzureFirewallRCActionType
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallRCAction, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
class AzureFirewallSku(msrest.serialization.Model):
"""SKU of an Azure Firewall.
:param name: Name of an Azure Firewall SKU. Possible values include: "AZFW_VNet", "AZFW_Hub".
:type name: str or ~azure.mgmt.network.v2020_04_01.models.AzureFirewallSkuName
:param tier: Tier of an Azure Firewall. Possible values include: "Standard", "Premium".
:type tier: str or ~azure.mgmt.network.v2020_04_01.models.AzureFirewallSkuTier
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureFirewallSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
class AzureReachabilityReport(msrest.serialization.Model):
"""Azure reachability report details.
All required parameters must be populated in order to send to Azure.
:param aggregation_level: Required. The aggregation level of Azure reachability report. Can be
Country, State or City.
:type aggregation_level: str
:param provider_location: Required. Parameters that define a geographic location.
:type provider_location: ~azure.mgmt.network.v2020_04_01.models.AzureReachabilityReportLocation
:param reachability_report: Required. List of Azure reachability report items.
:type reachability_report:
list[~azure.mgmt.network.v2020_04_01.models.AzureReachabilityReportItem]
"""
_validation = {
'aggregation_level': {'required': True},
'provider_location': {'required': True},
'reachability_report': {'required': True},
}
_attribute_map = {
'aggregation_level': {'key': 'aggregationLevel', 'type': 'str'},
'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'},
'reachability_report': {'key': 'reachabilityReport', 'type': '[AzureReachabilityReportItem]'},
}
def __init__(
self,
**kwargs
):
super(AzureReachabilityReport, self).__init__(**kwargs)
self.aggregation_level = kwargs['aggregation_level']
self.provider_location = kwargs['provider_location']
self.reachability_report = kwargs['reachability_report']
class AzureReachabilityReportItem(msrest.serialization.Model):
"""Azure reachability report details for a given provider location.
:param provider: The Internet service provider.
:type provider: str
:param azure_location: The Azure region.
:type azure_location: str
:param latencies: List of latency details for each of the time series.
:type latencies:
list[~azure.mgmt.network.v2020_04_01.models.AzureReachabilityReportLatencyInfo]
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'azure_location': {'key': 'azureLocation', 'type': 'str'},
'latencies': {'key': 'latencies', 'type': '[AzureReachabilityReportLatencyInfo]'},
}
def __init__(
self,
**kwargs
):
super(AzureReachabilityReportItem, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.azure_location = kwargs.get('azure_location', None)
self.latencies = kwargs.get('latencies', None)
class AzureReachabilityReportLatencyInfo(msrest.serialization.Model):
"""Details on latency for a time series.
:param time_stamp: The time stamp.
:type time_stamp: ~datetime.datetime
:param score: The relative latency score between 1 and 100, higher values indicating a faster
connection.
:type score: int
"""
_validation = {
'score': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'score': {'key': 'score', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AzureReachabilityReportLatencyInfo, self).__init__(**kwargs)
self.time_stamp = kwargs.get('time_stamp', None)
self.score = kwargs.get('score', None)
class AzureReachabilityReportLocation(msrest.serialization.Model):
"""Parameters that define a geographic location.
All required parameters must be populated in order to send to Azure.
:param country: Required. The name of the country.
:type country: str
:param state: The name of the state.
:type state: str
:param city: The name of the city or town.
:type city: str
"""
_validation = {
'country': {'required': True},
}
_attribute_map = {
'country': {'key': 'country', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureReachabilityReportLocation, self).__init__(**kwargs)
self.country = kwargs['country']
self.state = kwargs.get('state', None)
self.city = kwargs.get('city', None)
class AzureReachabilityReportParameters(msrest.serialization.Model):
"""Geographic and time constraints for Azure reachability report.
All required parameters must be populated in order to send to Azure.
:param provider_location: Required. Parameters that define a geographic location.
:type provider_location: ~azure.mgmt.network.v2020_04_01.models.AzureReachabilityReportLocation
:param providers: List of Internet service providers.
:type providers: list[str]
:param azure_locations: Optional Azure regions to scope the query to.
:type azure_locations: list[str]
:param start_time: Required. The start time for the Azure reachability report.
:type start_time: ~datetime.datetime
:param end_time: Required. The end time for the Azure reachability report.
:type end_time: ~datetime.datetime
"""
_validation = {
'provider_location': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'},
'providers': {'key': 'providers', 'type': '[str]'},
'azure_locations': {'key': 'azureLocations', 'type': '[str]'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(AzureReachabilityReportParameters, self).__init__(**kwargs)
self.provider_location = kwargs['provider_location']
self.providers = kwargs.get('providers', None)
self.azure_locations = kwargs.get('azure_locations', None)
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
class BackendAddressPool(SubResource):
"""Pool of backend IP addresses.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the set of backend address pools
used by the load balancer. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param load_balancer_backend_addresses: An array of backend addresses.
:type load_balancer_backend_addresses:
list[~azure.mgmt.network.v2020_04_01.models.LoadBalancerBackendAddress]
:ivar backend_ip_configurations: An array of references to IP addresses defined in network
interfaces.
:vartype backend_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfiguration]
:ivar load_balancing_rules: An array of references to load balancing rules that use this
backend address pool.
:vartype load_balancing_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar outbound_rule: A reference to an outbound rule that uses this backend address pool.
:vartype outbound_rule: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar outbound_rules: An array of references to outbound rules that use this backend address
pool.
:vartype outbound_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar provisioning_state: The provisioning state of the backend address pool resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'backend_ip_configurations': {'readonly': True},
'load_balancing_rules': {'readonly': True},
'outbound_rule': {'readonly': True},
'outbound_rules': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'load_balancer_backend_addresses': {'key': 'properties.loadBalancerBackendAddresses', 'type': '[LoadBalancerBackendAddress]'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'outbound_rule': {'key': 'properties.outboundRule', 'type': 'SubResource'},
'outbound_rules': {'key': 'properties.outboundRules', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BackendAddressPool, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.load_balancer_backend_addresses = kwargs.get('load_balancer_backend_addresses', None)
self.backend_ip_configurations = None
self.load_balancing_rules = None
self.outbound_rule = None
self.outbound_rules = None
self.provisioning_state = None
class BastionActiveSession(msrest.serialization.Model):
"""The session detail for a target.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar session_id: A unique id for the session.
:vartype session_id: str
:ivar start_time: The time when the session started.
:vartype start_time: any
:ivar target_subscription_id: The subscription id for the target virtual machine.
:vartype target_subscription_id: str
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar target_host_name: The host name of the target.
:vartype target_host_name: str
:ivar target_resource_group: The resource group of the target.
:vartype target_resource_group: str
:ivar user_name: The user name who is active on this session.
:vartype user_name: str
:ivar target_ip_address: The IP Address of the target.
:vartype target_ip_address: str
:ivar protocol: The protocol used to connect to the target. Possible values include: "SSH",
"RDP".
:vartype protocol: str or ~azure.mgmt.network.v2020_04_01.models.BastionConnectProtocol
:ivar target_resource_id: The resource id of the target.
:vartype target_resource_id: str
:ivar session_duration_in_mins: Duration in mins the session has been active.
:vartype session_duration_in_mins: float
"""
_validation = {
'session_id': {'readonly': True},
'start_time': {'readonly': True},
'target_subscription_id': {'readonly': True},
'resource_type': {'readonly': True},
'target_host_name': {'readonly': True},
'target_resource_group': {'readonly': True},
'user_name': {'readonly': True},
'target_ip_address': {'readonly': True},
'protocol': {'readonly': True},
'target_resource_id': {'readonly': True},
'session_duration_in_mins': {'readonly': True},
}
_attribute_map = {
'session_id': {'key': 'sessionId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'object'},
'target_subscription_id': {'key': 'targetSubscriptionId', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_host_name': {'key': 'targetHostName', 'type': 'str'},
'target_resource_group': {'key': 'targetResourceGroup', 'type': 'str'},
'user_name': {'key': 'userName', 'type': 'str'},
'target_ip_address': {'key': 'targetIpAddress', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'session_duration_in_mins': {'key': 'sessionDurationInMins', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(BastionActiveSession, self).__init__(**kwargs)
self.session_id = None
self.start_time = None
self.target_subscription_id = None
self.resource_type = None
self.target_host_name = None
self.target_resource_group = None
self.user_name = None
self.target_ip_address = None
self.protocol = None
self.target_resource_id = None
self.session_duration_in_mins = None
class BastionActiveSessionListResult(msrest.serialization.Model):
"""Response for GetActiveSessions.
:param value: List of active sessions on the bastion.
:type value: list[~azure.mgmt.network.v2020_04_01.models.BastionActiveSession]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BastionActiveSession]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionActiveSessionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class BastionHost(Resource):
"""Bastion Host resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param ip_configurations: IP configuration of the Bastion Host resource.
:type ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.BastionHostIPConfiguration]
:param dns_name: FQDN for the endpoint on which bastion host is accessible.
:type dns_name: str
:ivar provisioning_state: The provisioning state of the bastion host resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[BastionHostIPConfiguration]'},
'dns_name': {'key': 'properties.dnsName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionHost, self).__init__(**kwargs)
self.etag = None
self.ip_configurations = kwargs.get('ip_configurations', None)
self.dns_name = kwargs.get('dns_name', None)
self.provisioning_state = None
class BastionHostIPConfiguration(SubResource):
"""IP configuration of an Bastion Host.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the resource that is unique within a resource group. This name can be used
to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Ip configuration type.
:vartype type: str
:param subnet: Reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param public_ip_address: Reference of the PublicIP resource.
:type public_ip_address: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the bastion host IP configuration resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param private_ip_allocation_method: Private IP allocation method. Possible values include:
"Static", "Dynamic".
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionHostIPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.provisioning_state = None
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
class BastionHostListResult(msrest.serialization.Model):
"""Response for ListBastionHosts API service call.
:param value: List of Bastion Hosts in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.BastionHost]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BastionHost]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionHostListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class BastionSessionDeleteResult(msrest.serialization.Model):
"""Response for DisconnectActiveSessions.
:param value: List of sessions with their corresponding state.
:type value: list[~azure.mgmt.network.v2020_04_01.models.BastionSessionState]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BastionSessionState]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionSessionDeleteResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class BastionSessionState(msrest.serialization.Model):
"""The session state detail for a target.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar session_id: A unique id for the session.
:vartype session_id: str
:ivar message: Used for extra information.
:vartype message: str
:ivar state: The state of the session. Disconnected/Failed/NotFound.
:vartype state: str
"""
_validation = {
'session_id': {'readonly': True},
'message': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'session_id': {'key': 'sessionId', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionSessionState, self).__init__(**kwargs)
self.session_id = None
self.message = None
self.state = None
class BastionShareableLink(msrest.serialization.Model):
"""Bastion Shareable Link.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param vm: Required. Reference of the virtual machine resource.
:type vm: ~azure.mgmt.network.v2020_04_01.models.VM
:ivar bsl: The unique Bastion Shareable Link to the virtual machine.
:vartype bsl: str
:ivar created_at: The time when the link was created.
:vartype created_at: str
:ivar message: Optional field indicating the warning or error message related to the vm in case
of partial failure.
:vartype message: str
"""
_validation = {
'vm': {'required': True},
'bsl': {'readonly': True},
'created_at': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'vm': {'key': 'vm', 'type': 'VM'},
'bsl': {'key': 'bsl', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionShareableLink, self).__init__(**kwargs)
self.vm = kwargs['vm']
self.bsl = None
self.created_at = None
self.message = None
class BastionShareableLinkListRequest(msrest.serialization.Model):
"""Post request for all the Bastion Shareable Link endpoints.
:param vms: List of VM references.
:type vms: list[~azure.mgmt.network.v2020_04_01.models.BastionShareableLink]
"""
_attribute_map = {
'vms': {'key': 'vms', 'type': '[BastionShareableLink]'},
}
def __init__(
self,
**kwargs
):
super(BastionShareableLinkListRequest, self).__init__(**kwargs)
self.vms = kwargs.get('vms', None)
class BastionShareableLinkListResult(msrest.serialization.Model):
"""Response for all the Bastion Shareable Link endpoints.
:param value: List of Bastion Shareable Links for the request.
:type value: list[~azure.mgmt.network.v2020_04_01.models.BastionShareableLink]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BastionShareableLink]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BastionShareableLinkListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class BGPCommunity(msrest.serialization.Model):
"""Contains bgp community information offered in Service Community resources.
:param service_supported_region: The region which the service support. e.g. For O365, region is
Global.
:type service_supported_region: str
:param community_name: The name of the bgp community. e.g. Skype.
:type community_name: str
:param community_value: The value of the bgp community. For more information:
https://docs.microsoft.com/en-us/azure/expressroute/expressroute-routing.
:type community_value: str
:param community_prefixes: The prefixes that the bgp community contains.
:type community_prefixes: list[str]
:param is_authorized_to_use: Customer is authorized to use bgp community or not.
:type is_authorized_to_use: bool
:param service_group: The service group of the bgp community contains.
:type service_group: str
"""
_attribute_map = {
'service_supported_region': {'key': 'serviceSupportedRegion', 'type': 'str'},
'community_name': {'key': 'communityName', 'type': 'str'},
'community_value': {'key': 'communityValue', 'type': 'str'},
'community_prefixes': {'key': 'communityPrefixes', 'type': '[str]'},
'is_authorized_to_use': {'key': 'isAuthorizedToUse', 'type': 'bool'},
'service_group': {'key': 'serviceGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BGPCommunity, self).__init__(**kwargs)
self.service_supported_region = kwargs.get('service_supported_region', None)
self.community_name = kwargs.get('community_name', None)
self.community_value = kwargs.get('community_value', None)
self.community_prefixes = kwargs.get('community_prefixes', None)
self.is_authorized_to_use = kwargs.get('is_authorized_to_use', None)
self.service_group = kwargs.get('service_group', None)
class BgpPeerStatus(msrest.serialization.Model):
"""BGP peer status details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar local_address: The virtual network gateway's local address.
:vartype local_address: str
:ivar neighbor: The remote BGP peer.
:vartype neighbor: str
:ivar asn: The autonomous system number of the remote BGP peer.
:vartype asn: long
:ivar state: The BGP peer state. Possible values include: "Unknown", "Stopped", "Idle",
"Connecting", "Connected".
:vartype state: str or ~azure.mgmt.network.v2020_04_01.models.BgpPeerState
:ivar connected_duration: For how long the peering has been up.
:vartype connected_duration: str
:ivar routes_received: The number of routes learned from this peer.
:vartype routes_received: long
:ivar messages_sent: The number of BGP messages sent.
:vartype messages_sent: long
:ivar messages_received: The number of BGP messages received.
:vartype messages_received: long
"""
_validation = {
'local_address': {'readonly': True},
'neighbor': {'readonly': True},
'asn': {'readonly': True, 'maximum': 4294967295, 'minimum': 0},
'state': {'readonly': True},
'connected_duration': {'readonly': True},
'routes_received': {'readonly': True},
'messages_sent': {'readonly': True},
'messages_received': {'readonly': True},
}
_attribute_map = {
'local_address': {'key': 'localAddress', 'type': 'str'},
'neighbor': {'key': 'neighbor', 'type': 'str'},
'asn': {'key': 'asn', 'type': 'long'},
'state': {'key': 'state', 'type': 'str'},
'connected_duration': {'key': 'connectedDuration', 'type': 'str'},
'routes_received': {'key': 'routesReceived', 'type': 'long'},
'messages_sent': {'key': 'messagesSent', 'type': 'long'},
'messages_received': {'key': 'messagesReceived', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(BgpPeerStatus, self).__init__(**kwargs)
self.local_address = None
self.neighbor = None
self.asn = None
self.state = None
self.connected_duration = None
self.routes_received = None
self.messages_sent = None
self.messages_received = None
class BgpPeerStatusListResult(msrest.serialization.Model):
"""Response for list BGP peer status API service call.
:param value: List of BGP peers.
:type value: list[~azure.mgmt.network.v2020_04_01.models.BgpPeerStatus]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BgpPeerStatus]'},
}
def __init__(
self,
**kwargs
):
super(BgpPeerStatusListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class BgpServiceCommunity(Resource):
"""Service Community Properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param service_name: The name of the bgp community. e.g. Skype.
:type service_name: str
:param bgp_communities: A list of bgp communities.
:type bgp_communities: list[~azure.mgmt.network.v2020_04_01.models.BGPCommunity]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'service_name': {'key': 'properties.serviceName', 'type': 'str'},
'bgp_communities': {'key': 'properties.bgpCommunities', 'type': '[BGPCommunity]'},
}
def __init__(
self,
**kwargs
):
super(BgpServiceCommunity, self).__init__(**kwargs)
self.service_name = kwargs.get('service_name', None)
self.bgp_communities = kwargs.get('bgp_communities', None)
class BgpServiceCommunityListResult(msrest.serialization.Model):
"""Response for the ListServiceCommunity API service call.
:param value: A list of service community resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.BgpServiceCommunity]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BgpServiceCommunity]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BgpServiceCommunityListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class BgpSettings(msrest.serialization.Model):
"""BGP settings details.
:param asn: The BGP speaker's ASN.
:type asn: long
:param bgp_peering_address: The BGP peering address and BGP identifier of this BGP speaker.
:type bgp_peering_address: str
:param peer_weight: The weight added to routes learned from this BGP speaker.
:type peer_weight: int
:param bgp_peering_addresses: BGP peering address with IP configuration ID for virtual network
gateway.
:type bgp_peering_addresses:
list[~azure.mgmt.network.v2020_04_01.models.IPConfigurationBgpPeeringAddress]
"""
_validation = {
'asn': {'maximum': 4294967295, 'minimum': 0},
}
_attribute_map = {
'asn': {'key': 'asn', 'type': 'long'},
'bgp_peering_address': {'key': 'bgpPeeringAddress', 'type': 'str'},
'peer_weight': {'key': 'peerWeight', 'type': 'int'},
'bgp_peering_addresses': {'key': 'bgpPeeringAddresses', 'type': '[IPConfigurationBgpPeeringAddress]'},
}
def __init__(
self,
**kwargs
):
super(BgpSettings, self).__init__(**kwargs)
self.asn = kwargs.get('asn', None)
self.bgp_peering_address = kwargs.get('bgp_peering_address', None)
self.peer_weight = kwargs.get('peer_weight', None)
self.bgp_peering_addresses = kwargs.get('bgp_peering_addresses', None)
class CheckPrivateLinkServiceVisibilityRequest(msrest.serialization.Model):
"""Request body of the CheckPrivateLinkServiceVisibility API service call.
:param private_link_service_alias: The alias of the private link service.
:type private_link_service_alias: str
"""
_attribute_map = {
'private_link_service_alias': {'key': 'privateLinkServiceAlias', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckPrivateLinkServiceVisibilityRequest, self).__init__(**kwargs)
self.private_link_service_alias = kwargs.get('private_link_service_alias', None)
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
:param target: The target of the particular error. For example, the name of the property in
error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~azure.mgmt.network.v2020_04_01.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ConnectionMonitor(msrest.serialization.Model):
"""Parameters that define the operation to create a connection monitor.
:param location: Connection monitor location.
:type location: str
:param tags: A set of tags. Connection monitor tags.
:type tags: dict[str, str]
:param source: Describes the source of connection monitor.
:type source: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorSource
:param destination: Describes the destination of connection monitor.
:type destination: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start automatically once created.
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
:type monitoring_interval_in_seconds: int
:param endpoints: List of connection monitor endpoints.
:type endpoints: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpoint]
:param test_configurations: List of connection monitor test configurations.
:type test_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestConfiguration]
:param test_groups: List of connection monitor test groups.
:type test_groups: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestGroup]
:param outputs: List of connection monitor outputs.
:type outputs: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorOutput]
:param notes: Optional notes to be associated with the connection monitor.
:type notes: str
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'properties.destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'properties.autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'properties.monitoringIntervalInSeconds', 'type': 'int'},
'endpoints': {'key': 'properties.endpoints', 'type': '[ConnectionMonitorEndpoint]'},
'test_configurations': {'key': 'properties.testConfigurations', 'type': '[ConnectionMonitorTestConfiguration]'},
'test_groups': {'key': 'properties.testGroups', 'type': '[ConnectionMonitorTestGroup]'},
'outputs': {'key': 'properties.outputs', 'type': '[ConnectionMonitorOutput]'},
'notes': {'key': 'properties.notes', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitor, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.source = kwargs.get('source', None)
self.destination = kwargs.get('destination', None)
self.auto_start = kwargs.get('auto_start', True)
self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60)
self.endpoints = kwargs.get('endpoints', None)
self.test_configurations = kwargs.get('test_configurations', None)
self.test_groups = kwargs.get('test_groups', None)
self.outputs = kwargs.get('outputs', None)
self.notes = kwargs.get('notes', None)
class ConnectionMonitorDestination(msrest.serialization.Model):
"""Describes the destination of connection monitor.
:param resource_id: The ID of the resource used as the destination by connection monitor.
:type resource_id: str
:param address: Address of the connection monitor destination (IP or domain name).
:type address: str
:param port: The destination port used by connection monitor.
:type port: int
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorDestination, self).__init__(**kwargs)
self.resource_id = kwargs.get('resource_id', None)
self.address = kwargs.get('address', None)
self.port = kwargs.get('port', None)
class ConnectionMonitorEndpoint(msrest.serialization.Model):
"""Describes the connection monitor endpoint.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the connection monitor endpoint.
:type name: str
:param resource_id: Resource ID of the connection monitor endpoint.
:type resource_id: str
:param address: Address of the connection monitor endpoint (IP or domain name).
:type address: str
:param filter: Filter for sub-items within the endpoint.
:type filter: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpointFilter
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'filter': {'key': 'filter', 'type': 'ConnectionMonitorEndpointFilter'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorEndpoint, self).__init__(**kwargs)
self.name = kwargs['name']
self.resource_id = kwargs.get('resource_id', None)
self.address = kwargs.get('address', None)
self.filter = kwargs.get('filter', None)
class ConnectionMonitorEndpointFilter(msrest.serialization.Model):
"""Describes the connection monitor endpoint filter.
:param type: The behavior of the endpoint filter. Currently only 'Include' is supported.
Possible values include: "Include".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpointFilterType
:param items: List of items in the filter.
:type items: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpointFilterItem]
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'items': {'key': 'items', 'type': '[ConnectionMonitorEndpointFilterItem]'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorEndpointFilter, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.items = kwargs.get('items', None)
class ConnectionMonitorEndpointFilterItem(msrest.serialization.Model):
"""Describes the connection monitor endpoint filter item.
:param type: The type of item included in the filter. Currently only 'AgentAddress' is
supported. Possible values include: "AgentAddress".
:type type: str or
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpointFilterItemType
:param address: The address of the filter item.
:type address: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorEndpointFilterItem, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.address = kwargs.get('address', None)
class ConnectionMonitorHttpConfiguration(msrest.serialization.Model):
"""Describes the HTTP configuration.
:param port: The port to connect to.
:type port: int
:param method: The HTTP method to use. Possible values include: "Get", "Post".
:type method: str or ~azure.mgmt.network.v2020_04_01.models.HTTPConfigurationMethod
:param path: The path component of the URI. For instance, "/dir1/dir2".
:type path: str
:param request_headers: The HTTP headers to transmit with the request.
:type request_headers: list[~azure.mgmt.network.v2020_04_01.models.HTTPHeader]
:param valid_status_code_ranges: HTTP status codes to consider successful. For instance,
"2xx,301-304,418".
:type valid_status_code_ranges: list[str]
:param prefer_https: Value indicating whether HTTPS is preferred over HTTP in cases where the
choice is not explicit.
:type prefer_https: bool
"""
_attribute_map = {
'port': {'key': 'port', 'type': 'int'},
'method': {'key': 'method', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'request_headers': {'key': 'requestHeaders', 'type': '[HTTPHeader]'},
'valid_status_code_ranges': {'key': 'validStatusCodeRanges', 'type': '[str]'},
'prefer_https': {'key': 'preferHTTPS', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorHttpConfiguration, self).__init__(**kwargs)
self.port = kwargs.get('port', None)
self.method = kwargs.get('method', None)
self.path = kwargs.get('path', None)
self.request_headers = kwargs.get('request_headers', None)
self.valid_status_code_ranges = kwargs.get('valid_status_code_ranges', None)
self.prefer_https = kwargs.get('prefer_https', None)
class ConnectionMonitorIcmpConfiguration(msrest.serialization.Model):
"""Describes the ICMP configuration.
:param disable_trace_route: Value indicating whether path evaluation with trace route should be
disabled.
:type disable_trace_route: bool
"""
_attribute_map = {
'disable_trace_route': {'key': 'disableTraceRoute', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorIcmpConfiguration, self).__init__(**kwargs)
self.disable_trace_route = kwargs.get('disable_trace_route', None)
class ConnectionMonitorListResult(msrest.serialization.Model):
"""List of connection monitors.
:param value: Information about connection monitors.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorResult]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ConnectionMonitorResult]'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ConnectionMonitorOutput(msrest.serialization.Model):
"""Describes a connection monitor output destination.
:param type: Connection monitor output destination type. Currently, only "Workspace" is
supported. Possible values include: "Workspace".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.OutputType
:param workspace_settings: Describes the settings for producing output into a log analytics
workspace.
:type workspace_settings:
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorWorkspaceSettings
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'workspace_settings': {'key': 'workspaceSettings', 'type': 'ConnectionMonitorWorkspaceSettings'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorOutput, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.workspace_settings = kwargs.get('workspace_settings', None)
class ConnectionMonitorParameters(msrest.serialization.Model):
"""Parameters that define the operation to create a connection monitor.
:param source: Describes the source of connection monitor.
:type source: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorSource
:param destination: Describes the destination of connection monitor.
:type destination: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start automatically once created.
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
:type monitoring_interval_in_seconds: int
:param endpoints: List of connection monitor endpoints.
:type endpoints: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpoint]
:param test_configurations: List of connection monitor test configurations.
:type test_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestConfiguration]
:param test_groups: List of connection monitor test groups.
:type test_groups: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestGroup]
:param outputs: List of connection monitor outputs.
:type outputs: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorOutput]
:param notes: Optional notes to be associated with the connection monitor.
:type notes: str
"""
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'monitoringIntervalInSeconds', 'type': 'int'},
'endpoints': {'key': 'endpoints', 'type': '[ConnectionMonitorEndpoint]'},
'test_configurations': {'key': 'testConfigurations', 'type': '[ConnectionMonitorTestConfiguration]'},
'test_groups': {'key': 'testGroups', 'type': '[ConnectionMonitorTestGroup]'},
'outputs': {'key': 'outputs', 'type': '[ConnectionMonitorOutput]'},
'notes': {'key': 'notes', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorParameters, self).__init__(**kwargs)
self.source = kwargs.get('source', None)
self.destination = kwargs.get('destination', None)
self.auto_start = kwargs.get('auto_start', True)
self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60)
self.endpoints = kwargs.get('endpoints', None)
self.test_configurations = kwargs.get('test_configurations', None)
self.test_groups = kwargs.get('test_groups', None)
self.outputs = kwargs.get('outputs', None)
self.notes = kwargs.get('notes', None)
class ConnectionMonitorQueryResult(msrest.serialization.Model):
"""List of connection states snapshots.
:param source_status: Status of connection monitor source. Possible values include: "Unknown",
"Active", "Inactive".
:type source_status: str or
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorSourceStatus
:param states: Information about connection states.
:type states: list[~azure.mgmt.network.v2020_04_01.models.ConnectionStateSnapshot]
"""
_attribute_map = {
'source_status': {'key': 'sourceStatus', 'type': 'str'},
'states': {'key': 'states', 'type': '[ConnectionStateSnapshot]'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorQueryResult, self).__init__(**kwargs)
self.source_status = kwargs.get('source_status', None)
self.states = kwargs.get('states', None)
class ConnectionMonitorResult(msrest.serialization.Model):
"""Information about the connection monitor.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the connection monitor.
:vartype name: str
:ivar id: ID of the connection monitor.
:vartype id: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Connection monitor type.
:vartype type: str
:param location: Connection monitor location.
:type location: str
:param tags: A set of tags. Connection monitor tags.
:type tags: dict[str, str]
:param source: Describes the source of connection monitor.
:type source: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorSource
:param destination: Describes the destination of connection monitor.
:type destination: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start automatically once created.
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
:type monitoring_interval_in_seconds: int
:param endpoints: List of connection monitor endpoints.
:type endpoints: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpoint]
:param test_configurations: List of connection monitor test configurations.
:type test_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestConfiguration]
:param test_groups: List of connection monitor test groups.
:type test_groups: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestGroup]
:param outputs: List of connection monitor outputs.
:type outputs: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorOutput]
:param notes: Optional notes to be associated with the connection monitor.
:type notes: str
:ivar provisioning_state: The provisioning state of the connection monitor. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar start_time: The date and time when the connection monitor was started.
:vartype start_time: ~datetime.datetime
:ivar monitoring_status: The monitoring status of the connection monitor.
:vartype monitoring_status: str
:ivar connection_monitor_type: Type of connection monitor. Possible values include:
"MultiEndpoint", "SingleSourceDestination".
:vartype connection_monitor_type: str or
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorType
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'start_time': {'readonly': True},
'monitoring_status': {'readonly': True},
'connection_monitor_type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'properties.destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'properties.autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'properties.monitoringIntervalInSeconds', 'type': 'int'},
'endpoints': {'key': 'properties.endpoints', 'type': '[ConnectionMonitorEndpoint]'},
'test_configurations': {'key': 'properties.testConfigurations', 'type': '[ConnectionMonitorTestConfiguration]'},
'test_groups': {'key': 'properties.testGroups', 'type': '[ConnectionMonitorTestGroup]'},
'outputs': {'key': 'properties.outputs', 'type': '[ConnectionMonitorOutput]'},
'notes': {'key': 'properties.notes', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'monitoring_status': {'key': 'properties.monitoringStatus', 'type': 'str'},
'connection_monitor_type': {'key': 'properties.connectionMonitorType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorResult, self).__init__(**kwargs)
self.name = None
self.id = None
self.etag = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.source = kwargs.get('source', None)
self.destination = kwargs.get('destination', None)
self.auto_start = kwargs.get('auto_start', True)
self.monitoring_interval_in_seconds = kwargs.get('monitoring_interval_in_seconds', 60)
self.endpoints = kwargs.get('endpoints', None)
self.test_configurations = kwargs.get('test_configurations', None)
self.test_groups = kwargs.get('test_groups', None)
self.outputs = kwargs.get('outputs', None)
self.notes = kwargs.get('notes', None)
self.provisioning_state = None
self.start_time = None
self.monitoring_status = None
self.connection_monitor_type = None
class ConnectionMonitorResultProperties(ConnectionMonitorParameters):
"""Describes the properties of a connection monitor.
Variables are only populated by the server, and will be ignored when sending a request.
:param source: Describes the source of connection monitor.
:type source: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorSource
:param destination: Describes the destination of connection monitor.
:type destination: ~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start automatically once created.
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
:type monitoring_interval_in_seconds: int
:param endpoints: List of connection monitor endpoints.
:type endpoints: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorEndpoint]
:param test_configurations: List of connection monitor test configurations.
:type test_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestConfiguration]
:param test_groups: List of connection monitor test groups.
:type test_groups: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestGroup]
:param outputs: List of connection monitor outputs.
:type outputs: list[~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorOutput]
:param notes: Optional notes to be associated with the connection monitor.
:type notes: str
:ivar provisioning_state: The provisioning state of the connection monitor. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar start_time: The date and time when the connection monitor was started.
:vartype start_time: ~datetime.datetime
:ivar monitoring_status: The monitoring status of the connection monitor.
:vartype monitoring_status: str
:ivar connection_monitor_type: Type of connection monitor. Possible values include:
"MultiEndpoint", "SingleSourceDestination".
:vartype connection_monitor_type: str or
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorType
"""
_validation = {
'provisioning_state': {'readonly': True},
'start_time': {'readonly': True},
'monitoring_status': {'readonly': True},
'connection_monitor_type': {'readonly': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'monitoringIntervalInSeconds', 'type': 'int'},
'endpoints': {'key': 'endpoints', 'type': '[ConnectionMonitorEndpoint]'},
'test_configurations': {'key': 'testConfigurations', 'type': '[ConnectionMonitorTestConfiguration]'},
'test_groups': {'key': 'testGroups', 'type': '[ConnectionMonitorTestGroup]'},
'outputs': {'key': 'outputs', 'type': '[ConnectionMonitorOutput]'},
'notes': {'key': 'notes', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'monitoring_status': {'key': 'monitoringStatus', 'type': 'str'},
'connection_monitor_type': {'key': 'connectionMonitorType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorResultProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.start_time = None
self.monitoring_status = None
self.connection_monitor_type = None
class ConnectionMonitorSource(msrest.serialization.Model):
"""Describes the source of connection monitor.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The ID of the resource used as the source by connection monitor.
:type resource_id: str
:param port: The source port used by connection monitor.
:type port: int
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorSource, self).__init__(**kwargs)
self.resource_id = kwargs['resource_id']
self.port = kwargs.get('port', None)
class ConnectionMonitorSuccessThreshold(msrest.serialization.Model):
"""Describes the threshold for declaring a test successful.
:param checks_failed_percent: The maximum percentage of failed checks permitted for a test to
evaluate as successful.
:type checks_failed_percent: int
:param round_trip_time_ms: The maximum round-trip time in milliseconds permitted for a test to
evaluate as successful.
:type round_trip_time_ms: float
"""
_attribute_map = {
'checks_failed_percent': {'key': 'checksFailedPercent', 'type': 'int'},
'round_trip_time_ms': {'key': 'roundTripTimeMs', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorSuccessThreshold, self).__init__(**kwargs)
self.checks_failed_percent = kwargs.get('checks_failed_percent', None)
self.round_trip_time_ms = kwargs.get('round_trip_time_ms', None)
class ConnectionMonitorTcpConfiguration(msrest.serialization.Model):
"""Describes the TCP configuration.
:param port: The port to connect to.
:type port: int
:param disable_trace_route: Value indicating whether path evaluation with trace route should be
disabled.
:type disable_trace_route: bool
"""
_attribute_map = {
'port': {'key': 'port', 'type': 'int'},
'disable_trace_route': {'key': 'disableTraceRoute', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorTcpConfiguration, self).__init__(**kwargs)
self.port = kwargs.get('port', None)
self.disable_trace_route = kwargs.get('disable_trace_route', None)
class ConnectionMonitorTestConfiguration(msrest.serialization.Model):
"""Describes a connection monitor test configuration.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the connection monitor test configuration.
:type name: str
:param test_frequency_sec: The frequency of test evaluation, in seconds.
:type test_frequency_sec: int
:param protocol: Required. The protocol to use in test evaluation. Possible values include:
"Tcp", "Http", "Icmp".
:type protocol: str or
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTestConfigurationProtocol
:param preferred_ip_version: The preferred IP version to use in test evaluation. The connection
monitor may choose to use a different version depending on other parameters. Possible values
include: "IPv4", "IPv6".
:type preferred_ip_version: str or ~azure.mgmt.network.v2020_04_01.models.PreferredIPVersion
:param http_configuration: The parameters used to perform test evaluation over HTTP.
:type http_configuration:
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorHttpConfiguration
:param tcp_configuration: The parameters used to perform test evaluation over TCP.
:type tcp_configuration:
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorTcpConfiguration
:param icmp_configuration: The parameters used to perform test evaluation over ICMP.
:type icmp_configuration:
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorIcmpConfiguration
:param success_threshold: The threshold for declaring a test successful.
:type success_threshold:
~azure.mgmt.network.v2020_04_01.models.ConnectionMonitorSuccessThreshold
"""
_validation = {
'name': {'required': True},
'protocol': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'test_frequency_sec': {'key': 'testFrequencySec', 'type': 'int'},
'protocol': {'key': 'protocol', 'type': 'str'},
'preferred_ip_version': {'key': 'preferredIPVersion', 'type': 'str'},
'http_configuration': {'key': 'httpConfiguration', 'type': 'ConnectionMonitorHttpConfiguration'},
'tcp_configuration': {'key': 'tcpConfiguration', 'type': 'ConnectionMonitorTcpConfiguration'},
'icmp_configuration': {'key': 'icmpConfiguration', 'type': 'ConnectionMonitorIcmpConfiguration'},
'success_threshold': {'key': 'successThreshold', 'type': 'ConnectionMonitorSuccessThreshold'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorTestConfiguration, self).__init__(**kwargs)
self.name = kwargs['name']
self.test_frequency_sec = kwargs.get('test_frequency_sec', None)
self.protocol = kwargs['protocol']
self.preferred_ip_version = kwargs.get('preferred_ip_version', None)
self.http_configuration = kwargs.get('http_configuration', None)
self.tcp_configuration = kwargs.get('tcp_configuration', None)
self.icmp_configuration = kwargs.get('icmp_configuration', None)
self.success_threshold = kwargs.get('success_threshold', None)
class ConnectionMonitorTestGroup(msrest.serialization.Model):
"""Describes the connection monitor test group.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the connection monitor test group.
:type name: str
:param disable: Value indicating whether test group is disabled.
:type disable: bool
:param test_configurations: Required. List of test configuration names.
:type test_configurations: list[str]
:param sources: Required. List of source endpoint names.
:type sources: list[str]
:param destinations: Required. List of destination endpoint names.
:type destinations: list[str]
"""
_validation = {
'name': {'required': True},
'test_configurations': {'required': True},
'sources': {'required': True},
'destinations': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'disable': {'key': 'disable', 'type': 'bool'},
'test_configurations': {'key': 'testConfigurations', 'type': '[str]'},
'sources': {'key': 'sources', 'type': '[str]'},
'destinations': {'key': 'destinations', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorTestGroup, self).__init__(**kwargs)
self.name = kwargs['name']
self.disable = kwargs.get('disable', None)
self.test_configurations = kwargs['test_configurations']
self.sources = kwargs['sources']
self.destinations = kwargs['destinations']
class ConnectionMonitorWorkspaceSettings(msrest.serialization.Model):
"""Describes the settings for producing output into a log analytics workspace.
:param workspace_resource_id: Log analytics workspace resource ID.
:type workspace_resource_id: str
"""
_attribute_map = {
'workspace_resource_id': {'key': 'workspaceResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionMonitorWorkspaceSettings, self).__init__(**kwargs)
self.workspace_resource_id = kwargs.get('workspace_resource_id', None)
class ConnectionResetSharedKey(msrest.serialization.Model):
"""The virtual network connection reset shared key.
All required parameters must be populated in order to send to Azure.
:param key_length: Required. The virtual network connection reset shared key length, should
between 1 and 128.
:type key_length: int
"""
_validation = {
'key_length': {'required': True, 'maximum': 128, 'minimum': 1},
}
_attribute_map = {
'key_length': {'key': 'keyLength', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ConnectionResetSharedKey, self).__init__(**kwargs)
self.key_length = kwargs['key_length']
class ConnectionSharedKey(SubResource):
"""Response for GetConnectionSharedKey API service call.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param value: Required. The virtual network connection shared key value.
:type value: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectionSharedKey, self).__init__(**kwargs)
self.value = kwargs['value']
class ConnectionStateSnapshot(msrest.serialization.Model):
"""Connection state snapshot.
Variables are only populated by the server, and will be ignored when sending a request.
:param connection_state: The connection state. Possible values include: "Reachable",
"Unreachable", "Unknown".
:type connection_state: str or ~azure.mgmt.network.v2020_04_01.models.ConnectionState
:param start_time: The start time of the connection snapshot.
:type start_time: ~datetime.datetime
:param end_time: The end time of the connection snapshot.
:type end_time: ~datetime.datetime
:param evaluation_state: Connectivity analysis evaluation state. Possible values include:
"NotStarted", "InProgress", "Completed".
:type evaluation_state: str or ~azure.mgmt.network.v2020_04_01.models.EvaluationState
:param avg_latency_in_ms: Average latency in ms.
:type avg_latency_in_ms: int
:param min_latency_in_ms: Minimum latency in ms.
:type min_latency_in_ms: int
:param max_latency_in_ms: Maximum latency in ms.
:type max_latency_in_ms: int
:param probes_sent: The number of sent probes.
:type probes_sent: int
:param probes_failed: The number of failed probes.
:type probes_failed: int
:ivar hops: List of hops between the source and the destination.
:vartype hops: list[~azure.mgmt.network.v2020_04_01.models.ConnectivityHop]
"""
_validation = {
'hops': {'readonly': True},
}
_attribute_map = {
'connection_state': {'key': 'connectionState', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'evaluation_state': {'key': 'evaluationState', 'type': 'str'},
'avg_latency_in_ms': {'key': 'avgLatencyInMs', 'type': 'int'},
'min_latency_in_ms': {'key': 'minLatencyInMs', 'type': 'int'},
'max_latency_in_ms': {'key': 'maxLatencyInMs', 'type': 'int'},
'probes_sent': {'key': 'probesSent', 'type': 'int'},
'probes_failed': {'key': 'probesFailed', 'type': 'int'},
'hops': {'key': 'hops', 'type': '[ConnectivityHop]'},
}
def __init__(
self,
**kwargs
):
super(ConnectionStateSnapshot, self).__init__(**kwargs)
self.connection_state = kwargs.get('connection_state', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.evaluation_state = kwargs.get('evaluation_state', None)
self.avg_latency_in_ms = kwargs.get('avg_latency_in_ms', None)
self.min_latency_in_ms = kwargs.get('min_latency_in_ms', None)
self.max_latency_in_ms = kwargs.get('max_latency_in_ms', None)
self.probes_sent = kwargs.get('probes_sent', None)
self.probes_failed = kwargs.get('probes_failed', None)
self.hops = None
class ConnectivityDestination(msrest.serialization.Model):
"""Parameters that define destination of connection.
:param resource_id: The ID of the resource to which a connection attempt will be made.
:type resource_id: str
:param address: The IP address or URI the resource to which a connection attempt will be made.
:type address: str
:param port: Port on which check connectivity will be performed.
:type port: int
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ConnectivityDestination, self).__init__(**kwargs)
self.resource_id = kwargs.get('resource_id', None)
self.address = kwargs.get('address', None)
self.port = kwargs.get('port', None)
class ConnectivityHop(msrest.serialization.Model):
"""Information about a hop between the source and the destination.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of the hop.
:vartype type: str
:ivar id: The ID of the hop.
:vartype id: str
:ivar address: The IP address of the hop.
:vartype address: str
:ivar resource_id: The ID of the resource corresponding to this hop.
:vartype resource_id: str
:ivar next_hop_ids: List of next hop identifiers.
:vartype next_hop_ids: list[str]
:ivar issues: List of issues.
:vartype issues: list[~azure.mgmt.network.v2020_04_01.models.ConnectivityIssue]
"""
_validation = {
'type': {'readonly': True},
'id': {'readonly': True},
'address': {'readonly': True},
'resource_id': {'readonly': True},
'next_hop_ids': {'readonly': True},
'issues': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'address': {'key': 'address', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'next_hop_ids': {'key': 'nextHopIds', 'type': '[str]'},
'issues': {'key': 'issues', 'type': '[ConnectivityIssue]'},
}
def __init__(
self,
**kwargs
):
super(ConnectivityHop, self).__init__(**kwargs)
self.type = None
self.id = None
self.address = None
self.resource_id = None
self.next_hop_ids = None
self.issues = None
class ConnectivityInformation(msrest.serialization.Model):
"""Information on the connectivity status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar hops: List of hops between the source and the destination.
:vartype hops: list[~azure.mgmt.network.v2020_04_01.models.ConnectivityHop]
:ivar connection_status: The connection status. Possible values include: "Unknown",
"Connected", "Disconnected", "Degraded".
:vartype connection_status: str or ~azure.mgmt.network.v2020_04_01.models.ConnectionStatus
:ivar avg_latency_in_ms: Average latency in milliseconds.
:vartype avg_latency_in_ms: int
:ivar min_latency_in_ms: Minimum latency in milliseconds.
:vartype min_latency_in_ms: int
:ivar max_latency_in_ms: Maximum latency in milliseconds.
:vartype max_latency_in_ms: int
:ivar probes_sent: Total number of probes sent.
:vartype probes_sent: int
:ivar probes_failed: Number of failed probes.
:vartype probes_failed: int
"""
_validation = {
'hops': {'readonly': True},
'connection_status': {'readonly': True},
'avg_latency_in_ms': {'readonly': True},
'min_latency_in_ms': {'readonly': True},
'max_latency_in_ms': {'readonly': True},
'probes_sent': {'readonly': True},
'probes_failed': {'readonly': True},
}
_attribute_map = {
'hops': {'key': 'hops', 'type': '[ConnectivityHop]'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'avg_latency_in_ms': {'key': 'avgLatencyInMs', 'type': 'int'},
'min_latency_in_ms': {'key': 'minLatencyInMs', 'type': 'int'},
'max_latency_in_ms': {'key': 'maxLatencyInMs', 'type': 'int'},
'probes_sent': {'key': 'probesSent', 'type': 'int'},
'probes_failed': {'key': 'probesFailed', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ConnectivityInformation, self).__init__(**kwargs)
self.hops = None
self.connection_status = None
self.avg_latency_in_ms = None
self.min_latency_in_ms = None
self.max_latency_in_ms = None
self.probes_sent = None
self.probes_failed = None
class ConnectivityIssue(msrest.serialization.Model):
"""Information about an issue encountered in the process of checking for connectivity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar origin: The origin of the issue. Possible values include: "Local", "Inbound", "Outbound".
:vartype origin: str or ~azure.mgmt.network.v2020_04_01.models.Origin
:ivar severity: The severity of the issue. Possible values include: "Error", "Warning".
:vartype severity: str or ~azure.mgmt.network.v2020_04_01.models.Severity
:ivar type: The type of issue. Possible values include: "Unknown", "AgentStopped",
"GuestFirewall", "DnsResolution", "SocketBind", "NetworkSecurityRule", "UserDefinedRoute",
"PortThrottled", "Platform".
:vartype type: str or ~azure.mgmt.network.v2020_04_01.models.IssueType
:ivar context: Provides additional context on the issue.
:vartype context: list[dict[str, str]]
"""
_validation = {
'origin': {'readonly': True},
'severity': {'readonly': True},
'type': {'readonly': True},
'context': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'context': {'key': 'context', 'type': '[{str}]'},
}
def __init__(
self,
**kwargs
):
super(ConnectivityIssue, self).__init__(**kwargs)
self.origin = None
self.severity = None
self.type = None
self.context = None
class ConnectivityParameters(msrest.serialization.Model):
"""Parameters that determine how the connectivity check will be performed.
All required parameters must be populated in order to send to Azure.
:param source: Required. The source of the connection.
:type source: ~azure.mgmt.network.v2020_04_01.models.ConnectivitySource
:param destination: Required. The destination of connection.
:type destination: ~azure.mgmt.network.v2020_04_01.models.ConnectivityDestination
:param protocol: Network protocol. Possible values include: "Tcp", "Http", "Https", "Icmp".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.Protocol
:param protocol_configuration: Configuration of the protocol.
:type protocol_configuration: ~azure.mgmt.network.v2020_04_01.models.ProtocolConfiguration
:param preferred_ip_version: Preferred IP version of the connection. Possible values include:
"IPv4", "IPv6".
:type preferred_ip_version: str or ~azure.mgmt.network.v2020_04_01.models.IPVersion
"""
_validation = {
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectivitySource'},
'destination': {'key': 'destination', 'type': 'ConnectivityDestination'},
'protocol': {'key': 'protocol', 'type': 'str'},
'protocol_configuration': {'key': 'protocolConfiguration', 'type': 'ProtocolConfiguration'},
'preferred_ip_version': {'key': 'preferredIPVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectivityParameters, self).__init__(**kwargs)
self.source = kwargs['source']
self.destination = kwargs['destination']
self.protocol = kwargs.get('protocol', None)
self.protocol_configuration = kwargs.get('protocol_configuration', None)
self.preferred_ip_version = kwargs.get('preferred_ip_version', None)
class ConnectivitySource(msrest.serialization.Model):
"""Parameters that define the source of the connection.
All required parameters must be populated in order to send to Azure.
:param resource_id: Required. The ID of the resource from which a connectivity check will be
initiated.
:type resource_id: str
:param port: The source port from which a connectivity check will be performed.
:type port: int
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ConnectivitySource, self).__init__(**kwargs)
self.resource_id = kwargs['resource_id']
self.port = kwargs.get('port', None)
class Container(SubResource):
"""Reference to container resource in remote resource provider.
:param id: Resource ID.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Container, self).__init__(**kwargs)
class ContainerNetworkInterface(SubResource):
"""Container network interface child resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource. This name can be used to access the resource.
:type name: str
:ivar type: Sub Resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar container_network_interface_configuration: Container network interface configuration from
which this container network interface is created.
:vartype container_network_interface_configuration:
~azure.mgmt.network.v2020_04_01.models.ContainerNetworkInterfaceConfiguration
:param container: Reference to the container to which this container network interface is
attached.
:type container: ~azure.mgmt.network.v2020_04_01.models.Container
:ivar ip_configurations: Reference to the ip configuration on this container nic.
:vartype ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ContainerNetworkInterfaceIpConfiguration]
:ivar provisioning_state: The provisioning state of the container network interface resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'type': {'readonly': True},
'etag': {'readonly': True},
'container_network_interface_configuration': {'readonly': True},
'ip_configurations': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'container_network_interface_configuration': {'key': 'properties.containerNetworkInterfaceConfiguration', 'type': 'ContainerNetworkInterfaceConfiguration'},
'container': {'key': 'properties.container', 'type': 'Container'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[ContainerNetworkInterfaceIpConfiguration]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerNetworkInterface, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = None
self.etag = None
self.container_network_interface_configuration = None
self.container = kwargs.get('container', None)
self.ip_configurations = None
self.provisioning_state = None
class ContainerNetworkInterfaceConfiguration(SubResource):
"""Container network interface configuration child resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource. This name can be used to access the resource.
:type name: str
:ivar type: Sub Resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param ip_configurations: A list of ip configurations of the container network interface
configuration.
:type ip_configurations: list[~azure.mgmt.network.v2020_04_01.models.IPConfigurationProfile]
:param container_network_interfaces: A list of container network interfaces created from this
container network interface configuration.
:type container_network_interfaces: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar provisioning_state: The provisioning state of the container network interface
configuration resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[IPConfigurationProfile]'},
'container_network_interfaces': {'key': 'properties.containerNetworkInterfaces', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerNetworkInterfaceConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = None
self.etag = None
self.ip_configurations = kwargs.get('ip_configurations', None)
self.container_network_interfaces = kwargs.get('container_network_interfaces', None)
self.provisioning_state = None
class ContainerNetworkInterfaceIpConfiguration(msrest.serialization.Model):
"""The ip configuration for a container network interface.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The name of the resource. This name can be used to access the resource.
:type name: str
:ivar type: Sub Resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar provisioning_state: The provisioning state of the container network interface IP
configuration resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerNetworkInterfaceIpConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = None
self.etag = None
self.provisioning_state = None
class CustomDnsConfigPropertiesFormat(msrest.serialization.Model):
"""Contains custom Dns resolution configuration from customer.
:param fqdn: Fqdn that resolves to private endpoint ip address.
:type fqdn: str
:param ip_addresses: A list of private ip addresses of the private endpoint.
:type ip_addresses: list[str]
"""
_attribute_map = {
'fqdn': {'key': 'fqdn', 'type': 'str'},
'ip_addresses': {'key': 'ipAddresses', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(CustomDnsConfigPropertiesFormat, self).__init__(**kwargs)
self.fqdn = kwargs.get('fqdn', None)
self.ip_addresses = kwargs.get('ip_addresses', None)
class DdosCustomPolicy(Resource):
"""A DDoS custom policy in a resource group.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar resource_guid: The resource GUID property of the DDoS custom policy resource. It uniquely
identifies the resource, even if the user changes its name or migrate the resource across
subscriptions or resource groups.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the DDoS custom policy resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar public_ip_addresses: The list of public IPs associated with the DDoS custom policy
resource. This list is read-only.
:vartype public_ip_addresses: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param protocol_custom_settings: The protocol-specific DDoS policy customization parameters.
:type protocol_custom_settings:
list[~azure.mgmt.network.v2020_04_01.models.ProtocolCustomSettingsFormat]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'public_ip_addresses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'public_ip_addresses': {'key': 'properties.publicIPAddresses', 'type': '[SubResource]'},
'protocol_custom_settings': {'key': 'properties.protocolCustomSettings', 'type': '[ProtocolCustomSettingsFormat]'},
}
def __init__(
self,
**kwargs
):
super(DdosCustomPolicy, self).__init__(**kwargs)
self.etag = None
self.resource_guid = None
self.provisioning_state = None
self.public_ip_addresses = None
self.protocol_custom_settings = kwargs.get('protocol_custom_settings', None)
class DdosProtectionPlan(msrest.serialization.Model):
"""A DDoS protection plan in a resource group.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar resource_guid: The resource GUID property of the DDoS protection plan resource. It
uniquely identifies the resource, even if the user changes its name or migrate the resource
across subscriptions or resource groups.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the DDoS protection plan resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar virtual_networks: The list of virtual networks associated with the DDoS protection plan
resource. This list is read-only.
:vartype virtual_networks: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'virtual_networks': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'virtual_networks': {'key': 'properties.virtualNetworks', 'type': '[SubResource]'},
}
def __init__(
self,
**kwargs
):
super(DdosProtectionPlan, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.etag = None
self.resource_guid = None
self.provisioning_state = None
self.virtual_networks = None
class DdosProtectionPlanListResult(msrest.serialization.Model):
"""A list of DDoS protection plans.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of DDoS protection plans.
:type value: list[~azure.mgmt.network.v2020_04_01.models.DdosProtectionPlan]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DdosProtectionPlan]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DdosProtectionPlanListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class DdosSettings(msrest.serialization.Model):
"""Contains the DDoS protection settings of the public IP.
:param ddos_custom_policy: The DDoS custom policy associated with the public IP.
:type ddos_custom_policy: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param protection_coverage: The DDoS protection policy customizability of the public IP. Only
standard coverage will have the ability to be customized. Possible values include: "Basic",
"Standard".
:type protection_coverage: str or
~azure.mgmt.network.v2020_04_01.models.DdosSettingsProtectionCoverage
:param protected_ip: Enables DDoS protection on the public IP.
:type protected_ip: bool
"""
_attribute_map = {
'ddos_custom_policy': {'key': 'ddosCustomPolicy', 'type': 'SubResource'},
'protection_coverage': {'key': 'protectionCoverage', 'type': 'str'},
'protected_ip': {'key': 'protectedIP', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(DdosSettings, self).__init__(**kwargs)
self.ddos_custom_policy = kwargs.get('ddos_custom_policy', None)
self.protection_coverage = kwargs.get('protection_coverage', None)
self.protected_ip = kwargs.get('protected_ip', None)
class Delegation(SubResource):
"""Details the service to which the subnet is delegated.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a subnet. This name can be used to
access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param service_name: The name of the service to whom the subnet should be delegated (e.g.
Microsoft.Sql/servers).
:type service_name: str
:ivar actions: The actions permitted to the service upon delegation.
:vartype actions: list[str]
:ivar provisioning_state: The provisioning state of the service delegation resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'actions': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'service_name': {'key': 'properties.serviceName', 'type': 'str'},
'actions': {'key': 'properties.actions', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Delegation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.service_name = kwargs.get('service_name', None)
self.actions = None
self.provisioning_state = None
class DeviceProperties(msrest.serialization.Model):
"""List of properties of the device.
:param device_vendor: Name of the device Vendor.
:type device_vendor: str
:param device_model: Model of the device.
:type device_model: str
:param link_speed_in_mbps: Link speed.
:type link_speed_in_mbps: int
"""
_attribute_map = {
'device_vendor': {'key': 'deviceVendor', 'type': 'str'},
'device_model': {'key': 'deviceModel', 'type': 'str'},
'link_speed_in_mbps': {'key': 'linkSpeedInMbps', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DeviceProperties, self).__init__(**kwargs)
self.device_vendor = kwargs.get('device_vendor', None)
self.device_model = kwargs.get('device_model', None)
self.link_speed_in_mbps = kwargs.get('link_speed_in_mbps', None)
class DhcpOptions(msrest.serialization.Model):
"""DhcpOptions contains an array of DNS servers available to VMs deployed in the virtual network. Standard DHCP option for a subnet overrides VNET DHCP options.
:param dns_servers: The list of DNS servers IP addresses.
:type dns_servers: list[str]
"""
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(DhcpOptions, self).__init__(**kwargs)
self.dns_servers = kwargs.get('dns_servers', None)
class Dimension(msrest.serialization.Model):
"""Dimension of the metric.
:param name: The name of the dimension.
:type name: str
:param display_name: The display name of the dimension.
:type display_name: str
:param internal_name: The internal name of the dimension.
:type internal_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.internal_name = kwargs.get('internal_name', None)
class DnsNameAvailabilityResult(msrest.serialization.Model):
"""Response for the CheckDnsNameAvailability API service call.
:param available: Domain availability (True/False).
:type available: bool
"""
_attribute_map = {
'available': {'key': 'available', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(DnsNameAvailabilityResult, self).__init__(**kwargs)
self.available = kwargs.get('available', None)
class EffectiveNetworkSecurityGroup(msrest.serialization.Model):
"""Effective network security group.
:param network_security_group: The ID of network security group that is applied.
:type network_security_group: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param association: Associated resources.
:type association:
~azure.mgmt.network.v2020_04_01.models.EffectiveNetworkSecurityGroupAssociation
:param effective_security_rules: A collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2020_04_01.models.EffectiveNetworkSecurityRule]
:param tag_map: Mapping of tags to list of IP Addresses included within the tag.
:type tag_map: str
"""
_attribute_map = {
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'},
'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
'tag_map': {'key': 'tagMap', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EffectiveNetworkSecurityGroup, self).__init__(**kwargs)
self.network_security_group = kwargs.get('network_security_group', None)
self.association = kwargs.get('association', None)
self.effective_security_rules = kwargs.get('effective_security_rules', None)
self.tag_map = kwargs.get('tag_map', None)
class EffectiveNetworkSecurityGroupAssociation(msrest.serialization.Model):
"""The effective network security group association.
:param subnet: The ID of the subnet if assigned.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param network_interface: The ID of the network interface if assigned.
:type network_interface: ~azure.mgmt.network.v2020_04_01.models.SubResource
"""
_attribute_map = {
'subnet': {'key': 'subnet', 'type': 'SubResource'},
'network_interface': {'key': 'networkInterface', 'type': 'SubResource'},
}
def __init__(
self,
**kwargs
):
super(EffectiveNetworkSecurityGroupAssociation, self).__init__(**kwargs)
self.subnet = kwargs.get('subnet', None)
self.network_interface = kwargs.get('network_interface', None)
class EffectiveNetworkSecurityGroupListResult(msrest.serialization.Model):
"""Response for list effective network security groups API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of effective network security groups.
:type value: list[~azure.mgmt.network.v2020_04_01.models.EffectiveNetworkSecurityGroup]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EffectiveNetworkSecurityGroupListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class EffectiveNetworkSecurityRule(msrest.serialization.Model):
"""Effective network security rules.
:param name: The name of the security rule specified by the user (if created by the user).
:type name: str
:param protocol: The network protocol this rule applies to. Possible values include: "Tcp",
"Udp", "All".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.EffectiveSecurityRuleProtocol
:param source_port_range: The source port or range.
:type source_port_range: str
:param destination_port_range: The destination port or range.
:type destination_port_range: str
:param source_port_ranges: The source port ranges. Expected values include a single integer
between 0 and 65535, a range using '-' as separator (e.g. 100-400), or an asterisk (*).
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges. Expected values include a single
integer between 0 and 65535, a range using '-' as separator (e.g. 100-400), or an asterisk (*).
:type destination_port_ranges: list[str]
:param source_address_prefix: The source address prefix.
:type source_address_prefix: str
:param destination_address_prefix: The destination address prefix.
:type destination_address_prefix: str
:param source_address_prefixes: The source address prefixes. Expected values include CIDR IP
ranges, Default Tags (VirtualNetwork, AzureLoadBalancer, Internet), System Tags, and the
asterisk (*).
:type source_address_prefixes: list[str]
:param destination_address_prefixes: The destination address prefixes. Expected values include
CIDR IP ranges, Default Tags (VirtualNetwork, AzureLoadBalancer, Internet), System Tags, and
the asterisk (*).
:type destination_address_prefixes: list[str]
:param expanded_source_address_prefix: The expanded source address prefix.
:type expanded_source_address_prefix: list[str]
:param expanded_destination_address_prefix: Expanded destination address prefix.
:type expanded_destination_address_prefix: list[str]
:param access: Whether network traffic is allowed or denied. Possible values include: "Allow",
"Deny".
:type access: str or ~azure.mgmt.network.v2020_04_01.models.SecurityRuleAccess
:param priority: The priority of the rule.
:type priority: int
:param direction: The direction of the rule. Possible values include: "Inbound", "Outbound".
:type direction: str or ~azure.mgmt.network.v2020_04_01.models.SecurityRuleDirection
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'source_port_range': {'key': 'sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'destinationPortRange', 'type': 'str'},
'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'destinationPortRanges', 'type': '[str]'},
'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'destinationAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'sourceAddressPrefixes', 'type': '[str]'},
'destination_address_prefixes': {'key': 'destinationAddressPrefixes', 'type': '[str]'},
'expanded_source_address_prefix': {'key': 'expandedSourceAddressPrefix', 'type': '[str]'},
'expanded_destination_address_prefix': {'key': 'expandedDestinationAddressPrefix', 'type': '[str]'},
'access': {'key': 'access', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'direction': {'key': 'direction', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EffectiveNetworkSecurityRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.protocol = kwargs.get('protocol', None)
self.source_port_range = kwargs.get('source_port_range', None)
self.destination_port_range = kwargs.get('destination_port_range', None)
self.source_port_ranges = kwargs.get('source_port_ranges', None)
self.destination_port_ranges = kwargs.get('destination_port_ranges', None)
self.source_address_prefix = kwargs.get('source_address_prefix', None)
self.destination_address_prefix = kwargs.get('destination_address_prefix', None)
self.source_address_prefixes = kwargs.get('source_address_prefixes', None)
self.destination_address_prefixes = kwargs.get('destination_address_prefixes', None)
self.expanded_source_address_prefix = kwargs.get('expanded_source_address_prefix', None)
self.expanded_destination_address_prefix = kwargs.get('expanded_destination_address_prefix', None)
self.access = kwargs.get('access', None)
self.priority = kwargs.get('priority', None)
self.direction = kwargs.get('direction', None)
class EffectiveRoute(msrest.serialization.Model):
"""Effective Route.
:param name: The name of the user defined route. This is optional.
:type name: str
:param disable_bgp_route_propagation: If true, on-premises routes are not propagated to the
network interfaces in the subnet.
:type disable_bgp_route_propagation: bool
:param source: Who created the route. Possible values include: "Unknown", "User",
"VirtualNetworkGateway", "Default".
:type source: str or ~azure.mgmt.network.v2020_04_01.models.EffectiveRouteSource
:param state: The value of effective route. Possible values include: "Active", "Invalid".
:type state: str or ~azure.mgmt.network.v2020_04_01.models.EffectiveRouteState
:param address_prefix: The address prefixes of the effective routes in CIDR notation.
:type address_prefix: list[str]
:param next_hop_ip_address: The IP address of the next hop of the effective route.
:type next_hop_ip_address: list[str]
:param next_hop_type: The type of Azure hop the packet should be sent to. Possible values
include: "VirtualNetworkGateway", "VnetLocal", "Internet", "VirtualAppliance", "None".
:type next_hop_type: str or ~azure.mgmt.network.v2020_04_01.models.RouteNextHopType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'disable_bgp_route_propagation': {'key': 'disableBgpRoutePropagation', 'type': 'bool'},
'source': {'key': 'source', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'address_prefix': {'key': 'addressPrefix', 'type': '[str]'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': '[str]'},
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EffectiveRoute, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.disable_bgp_route_propagation = kwargs.get('disable_bgp_route_propagation', None)
self.source = kwargs.get('source', None)
self.state = kwargs.get('state', None)
self.address_prefix = kwargs.get('address_prefix', None)
self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None)
self.next_hop_type = kwargs.get('next_hop_type', None)
class EffectiveRouteListResult(msrest.serialization.Model):
"""Response for list effective route API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of effective routes.
:type value: list[~azure.mgmt.network.v2020_04_01.models.EffectiveRoute]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveRoute]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EffectiveRouteListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class EndpointServiceResult(SubResource):
"""Endpoint service.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Name of the endpoint service.
:vartype name: str
:ivar type: Type of the endpoint service.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EndpointServiceResult, self).__init__(**kwargs)
self.name = None
self.type = None
class EndpointServicesListResult(msrest.serialization.Model):
"""Response for the ListAvailableEndpointServices API service call.
:param value: List of available endpoint services in a region.
:type value: list[~azure.mgmt.network.v2020_04_01.models.EndpointServiceResult]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EndpointServiceResult]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EndpointServicesListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class Error(msrest.serialization.Model):
"""Common error representation.
:param code: Error code.
:type code: str
:param message: Error message.
:type message: str
:param target: Error target.
:type target: str
:param details: Error details.
:type details: list[~azure.mgmt.network.v2020_04_01.models.ErrorDetails]
:param inner_error: Inner error message.
:type inner_error: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetails]'},
'inner_error': {'key': 'innerError', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.inner_error = kwargs.get('inner_error', None)
class ErrorDetails(msrest.serialization.Model):
"""Common error details representation.
:param code: Error code.
:type code: str
:param target: Error target.
:type target: str
:param message: Error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetails, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.target = kwargs.get('target', None)
self.message = kwargs.get('message', None)
class ErrorResponse(msrest.serialization.Model):
"""The error object.
:param error: The error details object.
:type error: ~azure.mgmt.network.v2020_04_01.models.ErrorDetails
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetails'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class EvaluatedNetworkSecurityGroup(msrest.serialization.Model):
"""Results of network security group evaluation.
Variables are only populated by the server, and will be ignored when sending a request.
:param network_security_group_id: Network security group ID.
:type network_security_group_id: str
:param applied_to: Resource ID of nic or subnet to which network security group is applied.
:type applied_to: str
:param matched_rule: Matched network security rule.
:type matched_rule: ~azure.mgmt.network.v2020_04_01.models.MatchedRule
:ivar rules_evaluation_result: List of network security rules evaluation results.
:vartype rules_evaluation_result:
list[~azure.mgmt.network.v2020_04_01.models.NetworkSecurityRulesEvaluationResult]
"""
_validation = {
'rules_evaluation_result': {'readonly': True},
}
_attribute_map = {
'network_security_group_id': {'key': 'networkSecurityGroupId', 'type': 'str'},
'applied_to': {'key': 'appliedTo', 'type': 'str'},
'matched_rule': {'key': 'matchedRule', 'type': 'MatchedRule'},
'rules_evaluation_result': {'key': 'rulesEvaluationResult', 'type': '[NetworkSecurityRulesEvaluationResult]'},
}
def __init__(
self,
**kwargs
):
super(EvaluatedNetworkSecurityGroup, self).__init__(**kwargs)
self.network_security_group_id = kwargs.get('network_security_group_id', None)
self.applied_to = kwargs.get('applied_to', None)
self.matched_rule = kwargs.get('matched_rule', None)
self.rules_evaluation_result = None
class ExpressRouteCircuit(Resource):
"""ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The SKU.
:type sku: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitSku
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param allow_classic_operations: Allow classic operations.
:type allow_classic_operations: bool
:param circuit_provisioning_state: The CircuitProvisioningState state of the resource.
:type circuit_provisioning_state: str
:param service_provider_provisioning_state: The ServiceProviderProvisioningState state of the
resource. Possible values include: "NotProvisioned", "Provisioning", "Provisioned",
"Deprovisioning".
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2020_04_01.models.ServiceProviderProvisioningState
:param authorizations: The list of authorizations.
:type authorizations:
list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitAuthorization]
:param peerings: The list of peerings.
:type peerings: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering]
:param service_key: The ServiceKey.
:type service_key: str
:param service_provider_notes: The ServiceProviderNotes.
:type service_provider_notes: str
:param service_provider_properties: The ServiceProviderProperties.
:type service_provider_properties:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitServiceProviderProperties
:param express_route_port: The reference to the ExpressRoutePort resource when the circuit is
provisioned on an ExpressRoutePort resource.
:type express_route_port: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param bandwidth_in_gbps: The bandwidth of the circuit when the circuit is provisioned on an
ExpressRoutePort resource.
:type bandwidth_in_gbps: float
:ivar stag: The identifier of the circuit traffic. Outer tag for QinQ encapsulation.
:vartype stag: int
:ivar provisioning_state: The provisioning state of the express route circuit resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:param global_reach_enabled: Flag denoting global reach status.
:type global_reach_enabled: bool
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'stag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ExpressRouteCircuitSku'},
'etag': {'key': 'etag', 'type': 'str'},
'allow_classic_operations': {'key': 'properties.allowClassicOperations', 'type': 'bool'},
'circuit_provisioning_state': {'key': 'properties.circuitProvisioningState', 'type': 'str'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'authorizations': {'key': 'properties.authorizations', 'type': '[ExpressRouteCircuitAuthorization]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'service_key': {'key': 'properties.serviceKey', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'service_provider_properties': {'key': 'properties.serviceProviderProperties', 'type': 'ExpressRouteCircuitServiceProviderProperties'},
'express_route_port': {'key': 'properties.expressRoutePort', 'type': 'SubResource'},
'bandwidth_in_gbps': {'key': 'properties.bandwidthInGbps', 'type': 'float'},
'stag': {'key': 'properties.stag', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'global_reach_enabled': {'key': 'properties.globalReachEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuit, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.etag = None
self.allow_classic_operations = kwargs.get('allow_classic_operations', None)
self.circuit_provisioning_state = kwargs.get('circuit_provisioning_state', None)
self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None)
self.authorizations = kwargs.get('authorizations', None)
self.peerings = kwargs.get('peerings', None)
self.service_key = kwargs.get('service_key', None)
self.service_provider_notes = kwargs.get('service_provider_notes', None)
self.service_provider_properties = kwargs.get('service_provider_properties', None)
self.express_route_port = kwargs.get('express_route_port', None)
self.bandwidth_in_gbps = kwargs.get('bandwidth_in_gbps', None)
self.stag = None
self.provisioning_state = None
self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None)
self.global_reach_enabled = kwargs.get('global_reach_enabled', None)
class ExpressRouteCircuitArpTable(msrest.serialization.Model):
"""The ARP table associated with the ExpressRouteCircuit.
:param age: Entry age in minutes.
:type age: int
:param interface: Interface address.
:type interface: str
:param ip_address: The IP address.
:type ip_address: str
:param mac_address: The MAC address.
:type mac_address: str
"""
_attribute_map = {
'age': {'key': 'age', 'type': 'int'},
'interface': {'key': 'interface', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'mac_address': {'key': 'macAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitArpTable, self).__init__(**kwargs)
self.age = kwargs.get('age', None)
self.interface = kwargs.get('interface', None)
self.ip_address = kwargs.get('ip_address', None)
self.mac_address = kwargs.get('mac_address', None)
class ExpressRouteCircuitAuthorization(SubResource):
"""Authorization in an ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param authorization_key: The authorization key.
:type authorization_key: str
:param authorization_use_status: The authorization use status. Possible values include:
"Available", "InUse".
:type authorization_use_status: str or
~azure.mgmt.network.v2020_04_01.models.AuthorizationUseStatus
:ivar provisioning_state: The provisioning state of the authorization resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'authorization_use_status': {'key': 'properties.authorizationUseStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitAuthorization, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.authorization_key = kwargs.get('authorization_key', None)
self.authorization_use_status = kwargs.get('authorization_use_status', None)
self.provisioning_state = None
class ExpressRouteCircuitConnection(SubResource):
"""Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param express_route_circuit_peering: Reference to Express Route Circuit Private Peering
Resource of the circuit initiating connection.
:type express_route_circuit_peering: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering
Resource of the peered circuit.
:type peer_express_route_circuit_peering: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:type address_prefix: str
:param authorization_key: The authorization key.
:type authorization_key: str
:param ipv6_circuit_connection_config: IPv6 Address PrefixProperties of the express route
circuit connection.
:type ipv6_circuit_connection_config:
~azure.mgmt.network.v2020_04_01.models.Ipv6CircuitConnectionConfig
:ivar circuit_connection_status: Express Route Circuit connection state. Possible values
include: "Connected", "Connecting", "Disconnected".
:vartype circuit_connection_status: str or
~azure.mgmt.network.v2020_04_01.models.CircuitConnectionStatus
:ivar provisioning_state: The provisioning state of the express route circuit connection
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'circuit_connection_status': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'express_route_circuit_peering': {'key': 'properties.expressRouteCircuitPeering', 'type': 'SubResource'},
'peer_express_route_circuit_peering': {'key': 'properties.peerExpressRouteCircuitPeering', 'type': 'SubResource'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'ipv6_circuit_connection_config': {'key': 'properties.ipv6CircuitConnectionConfig', 'type': 'Ipv6CircuitConnectionConfig'},
'circuit_connection_status': {'key': 'properties.circuitConnectionStatus', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitConnection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.express_route_circuit_peering = kwargs.get('express_route_circuit_peering', None)
self.peer_express_route_circuit_peering = kwargs.get('peer_express_route_circuit_peering', None)
self.address_prefix = kwargs.get('address_prefix', None)
self.authorization_key = kwargs.get('authorization_key', None)
self.ipv6_circuit_connection_config = kwargs.get('ipv6_circuit_connection_config', None)
self.circuit_connection_status = None
self.provisioning_state = None
class ExpressRouteCircuitConnectionListResult(msrest.serialization.Model):
"""Response for ListConnections API service call retrieves all global reach connections that belongs to a Private Peering for an ExpressRouteCircuit.
:param value: The global reach connection associated with Private Peering in an ExpressRoute
Circuit.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitConnection]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteCircuitListResult(msrest.serialization.Model):
"""Response for ListExpressRouteCircuit API service call.
:param value: A list of ExpressRouteCircuits in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuit]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuit]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteCircuitPeering(SubResource):
"""Peering in an ExpressRouteCircuit resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param peering_type: The peering type. Possible values include: "AzurePublicPeering",
"AzurePrivatePeering", "MicrosoftPeering".
:type peering_type: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRoutePeeringType
:param state: The peering state. Possible values include: "Disabled", "Enabled".
:type state: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRoutePeeringState
:param azure_asn: The Azure ASN.
:type azure_asn: int
:param peer_asn: The peer ASN.
:type peer_asn: long
:param primary_peer_address_prefix: The primary address prefix.
:type primary_peer_address_prefix: str
:param secondary_peer_address_prefix: The secondary address prefix.
:type secondary_peer_address_prefix: str
:param primary_azure_port: The primary port.
:type primary_azure_port: str
:param secondary_azure_port: The secondary port.
:type secondary_azure_port: str
:param shared_key: The shared key.
:type shared_key: str
:param vlan_id: The VLAN ID.
:type vlan_id: int
:param microsoft_peering_config: The Microsoft peering configuration.
:type microsoft_peering_config:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeeringConfig
:param stats: The peering stats of express route circuit.
:type stats: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitStats
:ivar provisioning_state: The provisioning state of the express route circuit peering resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar last_modified_by: Who was the last to modify the peering.
:vartype last_modified_by: str
:param route_filter: The reference to the RouteFilter resource.
:type route_filter: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param ipv6_peering_config: The IPv6 peering configuration.
:type ipv6_peering_config:
~azure.mgmt.network.v2020_04_01.models.Ipv6ExpressRouteCircuitPeeringConfig
:param express_route_connection: The ExpressRoute connection.
:type express_route_connection: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteConnectionId
:param connections: The list of circuit connections associated with Azure Private Peering for
this circuit.
:type connections: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitConnection]
:ivar peered_connections: The list of peered circuit connections associated with Azure Private
Peering for this circuit.
:vartype peered_connections:
list[~azure.mgmt.network.v2020_04_01.models.PeerExpressRouteCircuitConnection]
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'peer_asn': {'maximum': 4294967295, 'minimum': 1},
'provisioning_state': {'readonly': True},
'last_modified_by': {'readonly': True},
'peered_connections': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'peering_type': {'key': 'properties.peeringType', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'azure_asn': {'key': 'properties.azureASN', 'type': 'int'},
'peer_asn': {'key': 'properties.peerASN', 'type': 'long'},
'primary_peer_address_prefix': {'key': 'properties.primaryPeerAddressPrefix', 'type': 'str'},
'secondary_peer_address_prefix': {'key': 'properties.secondaryPeerAddressPrefix', 'type': 'str'},
'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'},
'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'vlan_id': {'key': 'properties.vlanId', 'type': 'int'},
'microsoft_peering_config': {'key': 'properties.microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'},
'stats': {'key': 'properties.stats', 'type': 'ExpressRouteCircuitStats'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'last_modified_by': {'key': 'properties.lastModifiedBy', 'type': 'str'},
'route_filter': {'key': 'properties.routeFilter', 'type': 'SubResource'},
'ipv6_peering_config': {'key': 'properties.ipv6PeeringConfig', 'type': 'Ipv6ExpressRouteCircuitPeeringConfig'},
'express_route_connection': {'key': 'properties.expressRouteConnection', 'type': 'ExpressRouteConnectionId'},
'connections': {'key': 'properties.connections', 'type': '[ExpressRouteCircuitConnection]'},
'peered_connections': {'key': 'properties.peeredConnections', 'type': '[PeerExpressRouteCircuitConnection]'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitPeering, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.peering_type = kwargs.get('peering_type', None)
self.state = kwargs.get('state', None)
self.azure_asn = kwargs.get('azure_asn', None)
self.peer_asn = kwargs.get('peer_asn', None)
self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None)
self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None)
self.primary_azure_port = kwargs.get('primary_azure_port', None)
self.secondary_azure_port = kwargs.get('secondary_azure_port', None)
self.shared_key = kwargs.get('shared_key', None)
self.vlan_id = kwargs.get('vlan_id', None)
self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None)
self.stats = kwargs.get('stats', None)
self.provisioning_state = None
self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None)
self.last_modified_by = None
self.route_filter = kwargs.get('route_filter', None)
self.ipv6_peering_config = kwargs.get('ipv6_peering_config', None)
self.express_route_connection = kwargs.get('express_route_connection', None)
self.connections = kwargs.get('connections', None)
self.peered_connections = None
class ExpressRouteCircuitPeeringConfig(msrest.serialization.Model):
"""Specifies the peering configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:param advertised_public_prefixes: The reference to AdvertisedPublicPrefixes.
:type advertised_public_prefixes: list[str]
:param advertised_communities: The communities of bgp peering. Specified for microsoft peering.
:type advertised_communities: list[str]
:ivar advertised_public_prefixes_state: The advertised public prefix state of the Peering
resource. Possible values include: "NotConfigured", "Configuring", "Configured",
"ValidationNeeded".
:vartype advertised_public_prefixes_state: str or
~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeeringAdvertisedPublicPrefixState
:param legacy_mode: The legacy mode of the peering.
:type legacy_mode: int
:param customer_asn: The CustomerASN of the peering.
:type customer_asn: int
:param routing_registry_name: The RoutingRegistryName of the configuration.
:type routing_registry_name: str
"""
_validation = {
'advertised_public_prefixes_state': {'readonly': True},
}
_attribute_map = {
'advertised_public_prefixes': {'key': 'advertisedPublicPrefixes', 'type': '[str]'},
'advertised_communities': {'key': 'advertisedCommunities', 'type': '[str]'},
'advertised_public_prefixes_state': {'key': 'advertisedPublicPrefixesState', 'type': 'str'},
'legacy_mode': {'key': 'legacyMode', 'type': 'int'},
'customer_asn': {'key': 'customerASN', 'type': 'int'},
'routing_registry_name': {'key': 'routingRegistryName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitPeeringConfig, self).__init__(**kwargs)
self.advertised_public_prefixes = kwargs.get('advertised_public_prefixes', None)
self.advertised_communities = kwargs.get('advertised_communities', None)
self.advertised_public_prefixes_state = None
self.legacy_mode = kwargs.get('legacy_mode', None)
self.customer_asn = kwargs.get('customer_asn', None)
self.routing_registry_name = kwargs.get('routing_registry_name', None)
class ExpressRouteCircuitPeeringId(msrest.serialization.Model):
"""ExpressRoute circuit peering identifier.
:param id: The ID of the ExpressRoute circuit peering.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitPeeringId, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class ExpressRouteCircuitPeeringListResult(msrest.serialization.Model):
"""Response for ListPeering API service call retrieves all peerings that belong to an ExpressRouteCircuit.
:param value: The peerings in an express route circuit.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitPeering]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitPeeringListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteCircuitReference(msrest.serialization.Model):
"""Reference to an express route circuit.
:param id: Corresponding Express Route Circuit Id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitReference, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class ExpressRouteCircuitRoutesTable(msrest.serialization.Model):
"""The routes table associated with the ExpressRouteCircuit.
:param network: IP address of a network entity.
:type network: str
:param next_hop: NextHop address.
:type next_hop: str
:param loc_prf: Local preference value as set with the set local-preference route-map
configuration command.
:type loc_prf: str
:param weight: Route Weight.
:type weight: int
:param path: Autonomous system paths to the destination network.
:type path: str
"""
_attribute_map = {
'network': {'key': 'network', 'type': 'str'},
'next_hop': {'key': 'nextHop', 'type': 'str'},
'loc_prf': {'key': 'locPrf', 'type': 'str'},
'weight': {'key': 'weight', 'type': 'int'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitRoutesTable, self).__init__(**kwargs)
self.network = kwargs.get('network', None)
self.next_hop = kwargs.get('next_hop', None)
self.loc_prf = kwargs.get('loc_prf', None)
self.weight = kwargs.get('weight', None)
self.path = kwargs.get('path', None)
class ExpressRouteCircuitRoutesTableSummary(msrest.serialization.Model):
"""The routes table associated with the ExpressRouteCircuit.
:param neighbor: IP address of the neighbor.
:type neighbor: str
:param v: BGP version number spoken to the neighbor.
:type v: int
:param as_property: Autonomous system number.
:type as_property: int
:param up_down: The length of time that the BGP session has been in the Established state, or
the current status if not in the Established state.
:type up_down: str
:param state_pfx_rcd: Current state of the BGP session, and the number of prefixes that have
been received from a neighbor or peer group.
:type state_pfx_rcd: str
"""
_attribute_map = {
'neighbor': {'key': 'neighbor', 'type': 'str'},
'v': {'key': 'v', 'type': 'int'},
'as_property': {'key': 'as', 'type': 'int'},
'up_down': {'key': 'upDown', 'type': 'str'},
'state_pfx_rcd': {'key': 'statePfxRcd', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitRoutesTableSummary, self).__init__(**kwargs)
self.neighbor = kwargs.get('neighbor', None)
self.v = kwargs.get('v', None)
self.as_property = kwargs.get('as_property', None)
self.up_down = kwargs.get('up_down', None)
self.state_pfx_rcd = kwargs.get('state_pfx_rcd', None)
class ExpressRouteCircuitsArpTableListResult(msrest.serialization.Model):
"""Response for ListArpTable associated with the Express Route Circuits API.
:param value: A list of the ARP tables.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitArpTable]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitArpTable]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitsArpTableListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteCircuitServiceProviderProperties(msrest.serialization.Model):
"""Contains ServiceProviderProperties in an ExpressRouteCircuit.
:param service_provider_name: The serviceProviderName.
:type service_provider_name: str
:param peering_location: The peering location.
:type peering_location: str
:param bandwidth_in_mbps: The BandwidthInMbps.
:type bandwidth_in_mbps: int
"""
_attribute_map = {
'service_provider_name': {'key': 'serviceProviderName', 'type': 'str'},
'peering_location': {'key': 'peeringLocation', 'type': 'str'},
'bandwidth_in_mbps': {'key': 'bandwidthInMbps', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitServiceProviderProperties, self).__init__(**kwargs)
self.service_provider_name = kwargs.get('service_provider_name', None)
self.peering_location = kwargs.get('peering_location', None)
self.bandwidth_in_mbps = kwargs.get('bandwidth_in_mbps', None)
class ExpressRouteCircuitSku(msrest.serialization.Model):
"""Contains SKU in an ExpressRouteCircuit.
:param name: The name of the SKU.
:type name: str
:param tier: The tier of the SKU. Possible values include: "Standard", "Premium", "Basic",
"Local".
:type tier: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitSkuTier
:param family: The family of the SKU. Possible values include: "UnlimitedData", "MeteredData".
:type family: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitSkuFamily
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.family = kwargs.get('family', None)
class ExpressRouteCircuitsRoutesTableListResult(msrest.serialization.Model):
"""Response for ListRoutesTable associated with the Express Route Circuits API.
:param value: The list of routes table.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitRoutesTable]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTable]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitsRoutesTableListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteCircuitsRoutesTableSummaryListResult(msrest.serialization.Model):
"""Response for ListRoutesTable associated with the Express Route Circuits API.
:param value: A list of the routes table.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitRoutesTableSummary]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitsRoutesTableSummaryListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteCircuitStats(msrest.serialization.Model):
"""Contains stats associated with the peering.
:param primarybytes_in: The Primary BytesIn of the peering.
:type primarybytes_in: long
:param primarybytes_out: The primary BytesOut of the peering.
:type primarybytes_out: long
:param secondarybytes_in: The secondary BytesIn of the peering.
:type secondarybytes_in: long
:param secondarybytes_out: The secondary BytesOut of the peering.
:type secondarybytes_out: long
"""
_attribute_map = {
'primarybytes_in': {'key': 'primarybytesIn', 'type': 'long'},
'primarybytes_out': {'key': 'primarybytesOut', 'type': 'long'},
'secondarybytes_in': {'key': 'secondarybytesIn', 'type': 'long'},
'secondarybytes_out': {'key': 'secondarybytesOut', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCircuitStats, self).__init__(**kwargs)
self.primarybytes_in = kwargs.get('primarybytes_in', None)
self.primarybytes_out = kwargs.get('primarybytes_out', None)
self.secondarybytes_in = kwargs.get('secondarybytes_in', None)
self.secondarybytes_out = kwargs.get('secondarybytes_out', None)
class ExpressRouteConnection(SubResource):
"""ExpressRouteConnection resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param name: Required. The name of the resource.
:type name: str
:ivar provisioning_state: The provisioning state of the express route connection resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param express_route_circuit_peering: The ExpressRoute circuit peering.
:type express_route_circuit_peering:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeeringId
:param authorization_key: Authorization key to establish the connection.
:type authorization_key: str
:param routing_weight: The routing weight associated to the connection.
:type routing_weight: int
:param enable_internet_security: Enable internet security.
:type enable_internet_security: bool
:param routing_configuration: The Routing Configuration indicating the associated and
propagated route tables on this connection.
:type routing_configuration: ~azure.mgmt.network.v2020_04_01.models.RoutingConfiguration
"""
_validation = {
'name': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'express_route_circuit_peering': {'key': 'properties.expressRouteCircuitPeering', 'type': 'ExpressRouteCircuitPeeringId'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'enable_internet_security': {'key': 'properties.enableInternetSecurity', 'type': 'bool'},
'routing_configuration': {'key': 'properties.routingConfiguration', 'type': 'RoutingConfiguration'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteConnection, self).__init__(**kwargs)
self.name = kwargs['name']
self.provisioning_state = None
self.express_route_circuit_peering = kwargs.get('express_route_circuit_peering', None)
self.authorization_key = kwargs.get('authorization_key', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.enable_internet_security = kwargs.get('enable_internet_security', None)
self.routing_configuration = kwargs.get('routing_configuration', None)
class ExpressRouteConnectionId(msrest.serialization.Model):
"""The ID of the ExpressRouteConnection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the ExpressRouteConnection.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteConnectionId, self).__init__(**kwargs)
self.id = None
class ExpressRouteConnectionList(msrest.serialization.Model):
"""ExpressRouteConnection list.
:param value: The list of ExpressRoute connections.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteConnection]'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteConnectionList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ExpressRouteCrossConnection(Resource):
"""ExpressRouteCrossConnection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar primary_azure_port: The name of the primary port.
:vartype primary_azure_port: str
:ivar secondary_azure_port: The name of the secondary port.
:vartype secondary_azure_port: str
:ivar s_tag: The identifier of the circuit traffic.
:vartype s_tag: int
:param peering_location: The peering location of the ExpressRoute circuit.
:type peering_location: str
:param bandwidth_in_mbps: The circuit bandwidth In Mbps.
:type bandwidth_in_mbps: int
:param express_route_circuit: The ExpressRouteCircuit.
:type express_route_circuit:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitReference
:param service_provider_provisioning_state: The provisioning state of the circuit in the
connectivity provider system. Possible values include: "NotProvisioned", "Provisioning",
"Provisioned", "Deprovisioning".
:type service_provider_provisioning_state: str or
~azure.mgmt.network.v2020_04_01.models.ServiceProviderProvisioningState
:param service_provider_notes: Additional read only notes set by the connectivity provider.
:type service_provider_notes: str
:ivar provisioning_state: The provisioning state of the express route cross connection
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param peerings: The list of peerings.
:type peerings: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnectionPeering]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'primary_azure_port': {'readonly': True},
'secondary_azure_port': {'readonly': True},
's_tag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'},
'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'},
's_tag': {'key': 'properties.sTag', 'type': 'int'},
'peering_location': {'key': 'properties.peeringLocation', 'type': 'str'},
'bandwidth_in_mbps': {'key': 'properties.bandwidthInMbps', 'type': 'int'},
'express_route_circuit': {'key': 'properties.expressRouteCircuit', 'type': 'ExpressRouteCircuitReference'},
'service_provider_provisioning_state': {'key': 'properties.serviceProviderProvisioningState', 'type': 'str'},
'service_provider_notes': {'key': 'properties.serviceProviderNotes', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCrossConnectionPeering]'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCrossConnection, self).__init__(**kwargs)
self.etag = None
self.primary_azure_port = None
self.secondary_azure_port = None
self.s_tag = None
self.peering_location = kwargs.get('peering_location', None)
self.bandwidth_in_mbps = kwargs.get('bandwidth_in_mbps', None)
self.express_route_circuit = kwargs.get('express_route_circuit', None)
self.service_provider_provisioning_state = kwargs.get('service_provider_provisioning_state', None)
self.service_provider_notes = kwargs.get('service_provider_notes', None)
self.provisioning_state = None
self.peerings = kwargs.get('peerings', None)
class ExpressRouteCrossConnectionListResult(msrest.serialization.Model):
"""Response for ListExpressRouteCrossConnection API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of ExpressRouteCrossConnection resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnection]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCrossConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCrossConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ExpressRouteCrossConnectionPeering(SubResource):
"""Peering in an ExpressRoute Cross Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param peering_type: The peering type. Possible values include: "AzurePublicPeering",
"AzurePrivatePeering", "MicrosoftPeering".
:type peering_type: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRoutePeeringType
:param state: The peering state. Possible values include: "Disabled", "Enabled".
:type state: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRoutePeeringState
:ivar azure_asn: The Azure ASN.
:vartype azure_asn: int
:param peer_asn: The peer ASN.
:type peer_asn: long
:param primary_peer_address_prefix: The primary address prefix.
:type primary_peer_address_prefix: str
:param secondary_peer_address_prefix: The secondary address prefix.
:type secondary_peer_address_prefix: str
:ivar primary_azure_port: The primary port.
:vartype primary_azure_port: str
:ivar secondary_azure_port: The secondary port.
:vartype secondary_azure_port: str
:param shared_key: The shared key.
:type shared_key: str
:param vlan_id: The VLAN ID.
:type vlan_id: int
:param microsoft_peering_config: The Microsoft peering configuration.
:type microsoft_peering_config:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeeringConfig
:ivar provisioning_state: The provisioning state of the express route cross connection peering
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param gateway_manager_etag: The GatewayManager Etag.
:type gateway_manager_etag: str
:ivar last_modified_by: Who was the last to modify the peering.
:vartype last_modified_by: str
:param ipv6_peering_config: The IPv6 peering configuration.
:type ipv6_peering_config:
~azure.mgmt.network.v2020_04_01.models.Ipv6ExpressRouteCircuitPeeringConfig
"""
_validation = {
'etag': {'readonly': True},
'azure_asn': {'readonly': True},
'peer_asn': {'maximum': 4294967295, 'minimum': 1},
'primary_azure_port': {'readonly': True},
'secondary_azure_port': {'readonly': True},
'provisioning_state': {'readonly': True},
'last_modified_by': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'peering_type': {'key': 'properties.peeringType', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'azure_asn': {'key': 'properties.azureASN', 'type': 'int'},
'peer_asn': {'key': 'properties.peerASN', 'type': 'long'},
'primary_peer_address_prefix': {'key': 'properties.primaryPeerAddressPrefix', 'type': 'str'},
'secondary_peer_address_prefix': {'key': 'properties.secondaryPeerAddressPrefix', 'type': 'str'},
'primary_azure_port': {'key': 'properties.primaryAzurePort', 'type': 'str'},
'secondary_azure_port': {'key': 'properties.secondaryAzurePort', 'type': 'str'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'vlan_id': {'key': 'properties.vlanId', 'type': 'int'},
'microsoft_peering_config': {'key': 'properties.microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'gateway_manager_etag': {'key': 'properties.gatewayManagerEtag', 'type': 'str'},
'last_modified_by': {'key': 'properties.lastModifiedBy', 'type': 'str'},
'ipv6_peering_config': {'key': 'properties.ipv6PeeringConfig', 'type': 'Ipv6ExpressRouteCircuitPeeringConfig'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCrossConnectionPeering, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.peering_type = kwargs.get('peering_type', None)
self.state = kwargs.get('state', None)
self.azure_asn = None
self.peer_asn = kwargs.get('peer_asn', None)
self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None)
self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None)
self.primary_azure_port = None
self.secondary_azure_port = None
self.shared_key = kwargs.get('shared_key', None)
self.vlan_id = kwargs.get('vlan_id', None)
self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None)
self.provisioning_state = None
self.gateway_manager_etag = kwargs.get('gateway_manager_etag', None)
self.last_modified_by = None
self.ipv6_peering_config = kwargs.get('ipv6_peering_config', None)
class ExpressRouteCrossConnectionPeeringList(msrest.serialization.Model):
"""Response for ListPeering API service call retrieves all peerings that belong to an ExpressRouteCrossConnection.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The peerings in an express route cross connection.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnectionPeering]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCrossConnectionPeering]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCrossConnectionPeeringList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ExpressRouteCrossConnectionRoutesTableSummary(msrest.serialization.Model):
"""The routes table associated with the ExpressRouteCircuit.
:param neighbor: IP address of Neighbor router.
:type neighbor: str
:param asn: Autonomous system number.
:type asn: int
:param up_down: The length of time that the BGP session has been in the Established state, or
the current status if not in the Established state.
:type up_down: str
:param state_or_prefixes_received: Current state of the BGP session, and the number of prefixes
that have been received from a neighbor or peer group.
:type state_or_prefixes_received: str
"""
_attribute_map = {
'neighbor': {'key': 'neighbor', 'type': 'str'},
'asn': {'key': 'asn', 'type': 'int'},
'up_down': {'key': 'upDown', 'type': 'str'},
'state_or_prefixes_received': {'key': 'stateOrPrefixesReceived', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCrossConnectionRoutesTableSummary, self).__init__(**kwargs)
self.neighbor = kwargs.get('neighbor', None)
self.asn = kwargs.get('asn', None)
self.up_down = kwargs.get('up_down', None)
self.state_or_prefixes_received = kwargs.get('state_or_prefixes_received', None)
class ExpressRouteCrossConnectionsRoutesTableSummaryListResult(msrest.serialization.Model):
"""Response for ListRoutesTable associated with the Express Route Cross Connections.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of the routes table.
:type value:
list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCrossConnectionRoutesTableSummary]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCrossConnectionRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteCrossConnectionsRoutesTableSummaryListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ExpressRouteGateway(Resource):
"""ExpressRoute gateway resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param auto_scale_configuration: Configuration for auto scaling.
:type auto_scale_configuration:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteGatewayPropertiesAutoScaleConfiguration
:ivar express_route_connections: List of ExpressRoute connections to the ExpressRoute gateway.
:vartype express_route_connections:
list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteConnection]
:ivar provisioning_state: The provisioning state of the express route gateway resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param virtual_hub: The Virtual Hub where the ExpressRoute gateway is or will be deployed.
:type virtual_hub: ~azure.mgmt.network.v2020_04_01.models.VirtualHubId
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'express_route_connections': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'auto_scale_configuration': {'key': 'properties.autoScaleConfiguration', 'type': 'ExpressRouteGatewayPropertiesAutoScaleConfiguration'},
'express_route_connections': {'key': 'properties.expressRouteConnections', 'type': '[ExpressRouteConnection]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'virtual_hub': {'key': 'properties.virtualHub', 'type': 'VirtualHubId'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteGateway, self).__init__(**kwargs)
self.etag = None
self.auto_scale_configuration = kwargs.get('auto_scale_configuration', None)
self.express_route_connections = None
self.provisioning_state = None
self.virtual_hub = kwargs.get('virtual_hub', None)
class ExpressRouteGatewayList(msrest.serialization.Model):
"""List of ExpressRoute gateways.
:param value: List of ExpressRoute gateways.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteGateway]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteGateway]'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteGatewayList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ExpressRouteGatewayPropertiesAutoScaleConfiguration(msrest.serialization.Model):
"""Configuration for auto scaling.
:param bounds: Minimum and maximum number of scale units to deploy.
:type bounds:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds
"""
_attribute_map = {
'bounds': {'key': 'bounds', 'type': 'ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteGatewayPropertiesAutoScaleConfiguration, self).__init__(**kwargs)
self.bounds = kwargs.get('bounds', None)
class ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds(msrest.serialization.Model):
"""Minimum and maximum number of scale units to deploy.
:param min: Minimum number of scale units deployed for ExpressRoute gateway.
:type min: int
:param max: Maximum number of scale units deployed for ExpressRoute gateway.
:type max: int
"""
_attribute_map = {
'min': {'key': 'min', 'type': 'int'},
'max': {'key': 'max', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds, self).__init__(**kwargs)
self.min = kwargs.get('min', None)
self.max = kwargs.get('max', None)
class ExpressRouteLink(SubResource):
"""ExpressRouteLink child resource definition.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of child port resource that is unique among child port resources of the
parent.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar router_name: Name of Azure router associated with physical port.
:vartype router_name: str
:ivar interface_name: Name of Azure router interface.
:vartype interface_name: str
:ivar patch_panel_id: Mapping between physical port to patch panel port.
:vartype patch_panel_id: str
:ivar rack_id: Mapping of physical patch panel to rack.
:vartype rack_id: str
:ivar connector_type: Physical fiber port type. Possible values include: "LC", "SC".
:vartype connector_type: str or
~azure.mgmt.network.v2020_04_01.models.ExpressRouteLinkConnectorType
:param admin_state: Administrative state of the physical port. Possible values include:
"Enabled", "Disabled".
:type admin_state: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRouteLinkAdminState
:ivar provisioning_state: The provisioning state of the express route link resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param mac_sec_config: MacSec configuration.
:type mac_sec_config: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteLinkMacSecConfig
"""
_validation = {
'etag': {'readonly': True},
'router_name': {'readonly': True},
'interface_name': {'readonly': True},
'patch_panel_id': {'readonly': True},
'rack_id': {'readonly': True},
'connector_type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'router_name': {'key': 'properties.routerName', 'type': 'str'},
'interface_name': {'key': 'properties.interfaceName', 'type': 'str'},
'patch_panel_id': {'key': 'properties.patchPanelId', 'type': 'str'},
'rack_id': {'key': 'properties.rackId', 'type': 'str'},
'connector_type': {'key': 'properties.connectorType', 'type': 'str'},
'admin_state': {'key': 'properties.adminState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'mac_sec_config': {'key': 'properties.macSecConfig', 'type': 'ExpressRouteLinkMacSecConfig'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteLink, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.router_name = None
self.interface_name = None
self.patch_panel_id = None
self.rack_id = None
self.connector_type = None
self.admin_state = kwargs.get('admin_state', None)
self.provisioning_state = None
self.mac_sec_config = kwargs.get('mac_sec_config', None)
class ExpressRouteLinkListResult(msrest.serialization.Model):
"""Response for ListExpressRouteLinks API service call.
:param value: The list of ExpressRouteLink sub-resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteLink]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteLink]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteLinkListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteLinkMacSecConfig(msrest.serialization.Model):
"""ExpressRouteLink Mac Security Configuration.
:param ckn_secret_identifier: Keyvault Secret Identifier URL containing Mac security CKN key.
:type ckn_secret_identifier: str
:param cak_secret_identifier: Keyvault Secret Identifier URL containing Mac security CAK key.
:type cak_secret_identifier: str
:param cipher: Mac security cipher. Possible values include: "gcm-aes-128", "gcm-aes-256".
:type cipher: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRouteLinkMacSecCipher
"""
_attribute_map = {
'ckn_secret_identifier': {'key': 'cknSecretIdentifier', 'type': 'str'},
'cak_secret_identifier': {'key': 'cakSecretIdentifier', 'type': 'str'},
'cipher': {'key': 'cipher', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteLinkMacSecConfig, self).__init__(**kwargs)
self.ckn_secret_identifier = kwargs.get('ckn_secret_identifier', None)
self.cak_secret_identifier = kwargs.get('cak_secret_identifier', None)
self.cipher = kwargs.get('cipher', None)
class ExpressRoutePort(Resource):
"""ExpressRoutePort resource definition.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param identity: The identity of ExpressRoutePort, if configured.
:type identity: ~azure.mgmt.network.v2020_04_01.models.ManagedServiceIdentity
:param peering_location: The name of the peering location that the ExpressRoutePort is mapped
to physically.
:type peering_location: str
:param bandwidth_in_gbps: Bandwidth of procured ports in Gbps.
:type bandwidth_in_gbps: int
:ivar provisioned_bandwidth_in_gbps: Aggregate Gbps of associated circuit bandwidths.
:vartype provisioned_bandwidth_in_gbps: float
:ivar mtu: Maximum transmission unit of the physical port pair(s).
:vartype mtu: str
:param encapsulation: Encapsulation method on physical ports. Possible values include: "Dot1Q",
"QinQ".
:type encapsulation: str or
~azure.mgmt.network.v2020_04_01.models.ExpressRoutePortsEncapsulation
:ivar ether_type: Ether type of the physical port.
:vartype ether_type: str
:ivar allocation_date: Date of the physical port allocation to be used in Letter of
Authorization.
:vartype allocation_date: str
:param links: The set of physical links of the ExpressRoutePort resource.
:type links: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteLink]
:ivar circuits: Reference the ExpressRoute circuit(s) that are provisioned on this
ExpressRoutePort resource.
:vartype circuits: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar provisioning_state: The provisioning state of the express route port resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar resource_guid: The resource GUID property of the express route port resource.
:vartype resource_guid: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioned_bandwidth_in_gbps': {'readonly': True},
'mtu': {'readonly': True},
'ether_type': {'readonly': True},
'allocation_date': {'readonly': True},
'circuits': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_guid': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'peering_location': {'key': 'properties.peeringLocation', 'type': 'str'},
'bandwidth_in_gbps': {'key': 'properties.bandwidthInGbps', 'type': 'int'},
'provisioned_bandwidth_in_gbps': {'key': 'properties.provisionedBandwidthInGbps', 'type': 'float'},
'mtu': {'key': 'properties.mtu', 'type': 'str'},
'encapsulation': {'key': 'properties.encapsulation', 'type': 'str'},
'ether_type': {'key': 'properties.etherType', 'type': 'str'},
'allocation_date': {'key': 'properties.allocationDate', 'type': 'str'},
'links': {'key': 'properties.links', 'type': '[ExpressRouteLink]'},
'circuits': {'key': 'properties.circuits', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRoutePort, self).__init__(**kwargs)
self.etag = None
self.identity = kwargs.get('identity', None)
self.peering_location = kwargs.get('peering_location', None)
self.bandwidth_in_gbps = kwargs.get('bandwidth_in_gbps', None)
self.provisioned_bandwidth_in_gbps = None
self.mtu = None
self.encapsulation = kwargs.get('encapsulation', None)
self.ether_type = None
self.allocation_date = None
self.links = kwargs.get('links', None)
self.circuits = None
self.provisioning_state = None
self.resource_guid = None
class ExpressRoutePortListResult(msrest.serialization.Model):
"""Response for ListExpressRoutePorts API service call.
:param value: A list of ExpressRoutePort resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRoutePort]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRoutePort]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRoutePortListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRoutePortsLocation(Resource):
"""Definition of the ExpressRoutePorts peering location resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar address: Address of peering location.
:vartype address: str
:ivar contact: Contact details of peering locations.
:vartype contact: str
:param available_bandwidths: The inventory of available ExpressRoutePort bandwidths.
:type available_bandwidths:
list[~azure.mgmt.network.v2020_04_01.models.ExpressRoutePortsLocationBandwidths]
:ivar provisioning_state: The provisioning state of the express route port location resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'address': {'readonly': True},
'contact': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'address': {'key': 'properties.address', 'type': 'str'},
'contact': {'key': 'properties.contact', 'type': 'str'},
'available_bandwidths': {'key': 'properties.availableBandwidths', 'type': '[ExpressRoutePortsLocationBandwidths]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRoutePortsLocation, self).__init__(**kwargs)
self.address = None
self.contact = None
self.available_bandwidths = kwargs.get('available_bandwidths', None)
self.provisioning_state = None
class ExpressRoutePortsLocationBandwidths(msrest.serialization.Model):
"""Real-time inventory of available ExpressRoute port bandwidths.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar offer_name: Bandwidth descriptive name.
:vartype offer_name: str
:ivar value_in_gbps: Bandwidth value in Gbps.
:vartype value_in_gbps: int
"""
_validation = {
'offer_name': {'readonly': True},
'value_in_gbps': {'readonly': True},
}
_attribute_map = {
'offer_name': {'key': 'offerName', 'type': 'str'},
'value_in_gbps': {'key': 'valueInGbps', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ExpressRoutePortsLocationBandwidths, self).__init__(**kwargs)
self.offer_name = None
self.value_in_gbps = None
class ExpressRoutePortsLocationListResult(msrest.serialization.Model):
"""Response for ListExpressRoutePortsLocations API service call.
:param value: The list of all ExpressRoutePort peering locations.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRoutePortsLocation]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRoutePortsLocation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRoutePortsLocationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ExpressRouteServiceProvider(Resource):
"""A ExpressRouteResourceProvider object.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param peering_locations: A list of peering locations.
:type peering_locations: list[str]
:param bandwidths_offered: A list of bandwidths offered.
:type bandwidths_offered:
list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteServiceProviderBandwidthsOffered]
:ivar provisioning_state: The provisioning state of the express route service provider
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'peering_locations': {'key': 'properties.peeringLocations', 'type': '[str]'},
'bandwidths_offered': {'key': 'properties.bandwidthsOffered', 'type': '[ExpressRouteServiceProviderBandwidthsOffered]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteServiceProvider, self).__init__(**kwargs)
self.peering_locations = kwargs.get('peering_locations', None)
self.bandwidths_offered = kwargs.get('bandwidths_offered', None)
self.provisioning_state = None
class ExpressRouteServiceProviderBandwidthsOffered(msrest.serialization.Model):
"""Contains bandwidths offered in ExpressRouteServiceProvider resources.
:param offer_name: The OfferName.
:type offer_name: str
:param value_in_mbps: The ValueInMbps.
:type value_in_mbps: int
"""
_attribute_map = {
'offer_name': {'key': 'offerName', 'type': 'str'},
'value_in_mbps': {'key': 'valueInMbps', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteServiceProviderBandwidthsOffered, self).__init__(**kwargs)
self.offer_name = kwargs.get('offer_name', None)
self.value_in_mbps = kwargs.get('value_in_mbps', None)
class ExpressRouteServiceProviderListResult(msrest.serialization.Model):
"""Response for the ListExpressRouteServiceProvider API service call.
:param value: A list of ExpressRouteResourceProvider resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteServiceProvider]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteServiceProvider]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ExpressRouteServiceProviderListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class FirewallPolicy(Resource):
"""FirewallPolicy Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar rule_groups: List of references to FirewallPolicyRuleGroups.
:vartype rule_groups: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar provisioning_state: The provisioning state of the firewall policy resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param base_policy: The parent firewall policy from which rules are inherited.
:type base_policy: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar firewalls: List of references to Azure Firewalls that this Firewall Policy is associated
with.
:vartype firewalls: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar child_policies: List of references to Child Firewall Policies.
:vartype child_policies: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param threat_intel_mode: The operation mode for Threat Intelligence. Possible values include:
"Alert", "Deny", "Off".
:type threat_intel_mode: str or
~azure.mgmt.network.v2020_04_01.models.AzureFirewallThreatIntelMode
:param threat_intel_whitelist: ThreatIntel Whitelist for Firewall Policy.
:type threat_intel_whitelist:
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyThreatIntelWhitelist
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'rule_groups': {'readonly': True},
'provisioning_state': {'readonly': True},
'firewalls': {'readonly': True},
'child_policies': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'rule_groups': {'key': 'properties.ruleGroups', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'base_policy': {'key': 'properties.basePolicy', 'type': 'SubResource'},
'firewalls': {'key': 'properties.firewalls', 'type': '[SubResource]'},
'child_policies': {'key': 'properties.childPolicies', 'type': '[SubResource]'},
'threat_intel_mode': {'key': 'properties.threatIntelMode', 'type': 'str'},
'threat_intel_whitelist': {'key': 'properties.threatIntelWhitelist', 'type': 'FirewallPolicyThreatIntelWhitelist'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicy, self).__init__(**kwargs)
self.etag = None
self.rule_groups = None
self.provisioning_state = None
self.base_policy = kwargs.get('base_policy', None)
self.firewalls = None
self.child_policies = None
self.threat_intel_mode = kwargs.get('threat_intel_mode', None)
self.threat_intel_whitelist = kwargs.get('threat_intel_whitelist', None)
class FirewallPolicyRule(msrest.serialization.Model):
"""Properties of the rule.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FirewallPolicyFilterRule, FirewallPolicyNatRule.
All required parameters must be populated in order to send to Azure.
:param rule_type: Required. The type of the rule.Constant filled by server. Possible values
include: "FirewallPolicyNatRule", "FirewallPolicyFilterRule".
:type rule_type: str or ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleType
:param name: The name of the rule.
:type name: str
:param priority: Priority of the Firewall Policy Rule resource.
:type priority: int
"""
_validation = {
'rule_type': {'required': True},
'priority': {'maximum': 65000, 'minimum': 100},
}
_attribute_map = {
'rule_type': {'key': 'ruleType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
}
_subtype_map = {
'rule_type': {'FirewallPolicyFilterRule': 'FirewallPolicyFilterRule', 'FirewallPolicyNatRule': 'FirewallPolicyNatRule'}
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyRule, self).__init__(**kwargs)
self.rule_type = None # type: Optional[str]
self.name = kwargs.get('name', None)
self.priority = kwargs.get('priority', None)
class FirewallPolicyFilterRule(FirewallPolicyRule):
"""Firewall Policy Filter Rule.
All required parameters must be populated in order to send to Azure.
:param rule_type: Required. The type of the rule.Constant filled by server. Possible values
include: "FirewallPolicyNatRule", "FirewallPolicyFilterRule".
:type rule_type: str or ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleType
:param name: The name of the rule.
:type name: str
:param priority: Priority of the Firewall Policy Rule resource.
:type priority: int
:param action: The action type of a Filter rule.
:type action: ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyFilterRuleAction
:param rule_conditions: Collection of rule conditions used by a rule.
:type rule_conditions: list[~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleCondition]
"""
_validation = {
'rule_type': {'required': True},
'priority': {'maximum': 65000, 'minimum': 100},
}
_attribute_map = {
'rule_type': {'key': 'ruleType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'action': {'key': 'action', 'type': 'FirewallPolicyFilterRuleAction'},
'rule_conditions': {'key': 'ruleConditions', 'type': '[FirewallPolicyRuleCondition]'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyFilterRule, self).__init__(**kwargs)
self.rule_type = 'FirewallPolicyFilterRule' # type: str
self.action = kwargs.get('action', None)
self.rule_conditions = kwargs.get('rule_conditions', None)
class FirewallPolicyFilterRuleAction(msrest.serialization.Model):
"""Properties of the FirewallPolicyFilterRuleAction.
:param type: The type of action. Possible values include: "Allow", "Deny".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyFilterRuleActionType
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyFilterRuleAction, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
class FirewallPolicyListResult(msrest.serialization.Model):
"""Response for ListFirewallPolicies API service call.
:param value: List of Firewall Policies in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.FirewallPolicy]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[FirewallPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class FirewallPolicyNatRule(FirewallPolicyRule):
"""Firewall Policy NAT Rule.
All required parameters must be populated in order to send to Azure.
:param rule_type: Required. The type of the rule.Constant filled by server. Possible values
include: "FirewallPolicyNatRule", "FirewallPolicyFilterRule".
:type rule_type: str or ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleType
:param name: The name of the rule.
:type name: str
:param priority: Priority of the Firewall Policy Rule resource.
:type priority: int
:param action: The action type of a Nat rule.
:type action: ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyNatRuleAction
:param translated_address: The translated address for this NAT rule.
:type translated_address: str
:param translated_port: The translated port for this NAT rule.
:type translated_port: str
:param rule_condition: The match conditions for incoming traffic.
:type rule_condition: ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleCondition
"""
_validation = {
'rule_type': {'required': True},
'priority': {'maximum': 65000, 'minimum': 100},
}
_attribute_map = {
'rule_type': {'key': 'ruleType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'action': {'key': 'action', 'type': 'FirewallPolicyNatRuleAction'},
'translated_address': {'key': 'translatedAddress', 'type': 'str'},
'translated_port': {'key': 'translatedPort', 'type': 'str'},
'rule_condition': {'key': 'ruleCondition', 'type': 'FirewallPolicyRuleCondition'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyNatRule, self).__init__(**kwargs)
self.rule_type = 'FirewallPolicyNatRule' # type: str
self.action = kwargs.get('action', None)
self.translated_address = kwargs.get('translated_address', None)
self.translated_port = kwargs.get('translated_port', None)
self.rule_condition = kwargs.get('rule_condition', None)
class FirewallPolicyNatRuleAction(msrest.serialization.Model):
"""Properties of the FirewallPolicyNatRuleAction.
:param type: The type of action. Possible values include: "DNAT".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.FirewallPolicyNatRuleActionType
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyNatRuleAction, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
class FirewallPolicyRuleConditionApplicationProtocol(msrest.serialization.Model):
"""Properties of the application rule protocol.
:param protocol_type: Protocol type. Possible values include: "Http", "Https".
:type protocol_type: str or
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionApplicationProtocolType
:param port: Port number for the protocol, cannot be greater than 64000.
:type port: int
"""
_validation = {
'port': {'maximum': 64000, 'minimum': 0},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyRuleConditionApplicationProtocol, self).__init__(**kwargs)
self.protocol_type = kwargs.get('protocol_type', None)
self.port = kwargs.get('port', None)
class FirewallPolicyRuleGroup(SubResource):
"""Rule Group resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Rule Group type.
:vartype type: str
:param priority: Priority of the Firewall Policy Rule Group resource.
:type priority: int
:param rules: Group of Firewall Policy rules.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRule]
:ivar provisioning_state: The provisioning state of the firewall policy rule group resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'priority': {'maximum': 65000, 'minimum': 100},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'rules': {'key': 'properties.rules', 'type': '[FirewallPolicyRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyRuleGroup, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.priority = kwargs.get('priority', None)
self.rules = kwargs.get('rules', None)
self.provisioning_state = None
class FirewallPolicyRuleGroupListResult(msrest.serialization.Model):
"""Response for ListFirewallPolicyRuleGroups API service call.
:param value: List of FirewallPolicyRuleGroups in a FirewallPolicy.
:type value: list[~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleGroup]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[FirewallPolicyRuleGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyRuleGroupListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class FirewallPolicyThreatIntelWhitelist(msrest.serialization.Model):
"""ThreatIntel Whitelist for Firewall Policy.
:param ip_addresses: List of IP addresses for the ThreatIntel Whitelist.
:type ip_addresses: list[str]
:param fqdns: List of FQDNs for the ThreatIntel Whitelist.
:type fqdns: list[str]
"""
_attribute_map = {
'ip_addresses': {'key': 'ipAddresses', 'type': '[str]'},
'fqdns': {'key': 'fqdns', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(FirewallPolicyThreatIntelWhitelist, self).__init__(**kwargs)
self.ip_addresses = kwargs.get('ip_addresses', None)
self.fqdns = kwargs.get('fqdns', None)
class FlowLog(Resource):
"""A flow log resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param target_resource_id: ID of network security group to which flow log will be applied.
:type target_resource_id: str
:ivar target_resource_guid: Guid of network security group to which flow log will be applied.
:vartype target_resource_guid: str
:param storage_id: ID of the storage account which is used to store the flow log.
:type storage_id: str
:param enabled: Flag to enable/disable flow logging.
:type enabled: bool
:param retention_policy: Parameters that define the retention policy for flow log.
:type retention_policy: ~azure.mgmt.network.v2020_04_01.models.RetentionPolicyParameters
:param format: Parameters that define the flow log format.
:type format: ~azure.mgmt.network.v2020_04_01.models.FlowLogFormatParameters
:param flow_analytics_configuration: Parameters that define the configuration of traffic
analytics.
:type flow_analytics_configuration:
~azure.mgmt.network.v2020_04_01.models.TrafficAnalyticsProperties
:ivar provisioning_state: The provisioning state of the flow log. Possible values include:
"Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'target_resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'target_resource_id': {'key': 'properties.targetResourceId', 'type': 'str'},
'target_resource_guid': {'key': 'properties.targetResourceGuid', 'type': 'str'},
'storage_id': {'key': 'properties.storageId', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicyParameters'},
'format': {'key': 'properties.format', 'type': 'FlowLogFormatParameters'},
'flow_analytics_configuration': {'key': 'properties.flowAnalyticsConfiguration', 'type': 'TrafficAnalyticsProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FlowLog, self).__init__(**kwargs)
self.etag = None
self.target_resource_id = kwargs.get('target_resource_id', None)
self.target_resource_guid = None
self.storage_id = kwargs.get('storage_id', None)
self.enabled = kwargs.get('enabled', None)
self.retention_policy = kwargs.get('retention_policy', None)
self.format = kwargs.get('format', None)
self.flow_analytics_configuration = kwargs.get('flow_analytics_configuration', None)
self.provisioning_state = None
class FlowLogFormatParameters(msrest.serialization.Model):
"""Parameters that define the flow log format.
:param type: The file type of flow log. Possible values include: "JSON".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.FlowLogFormatType
:param version: The version (revision) of the flow log.
:type version: int
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'version': {'key': 'version', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(FlowLogFormatParameters, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.version = kwargs.get('version', 0)
class FlowLogInformation(msrest.serialization.Model):
"""Information on the configuration of flow log and traffic analytics (optional) .
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The ID of the resource to configure for flow log and
traffic analytics (optional) .
:type target_resource_id: str
:param flow_analytics_configuration: Parameters that define the configuration of traffic
analytics.
:type flow_analytics_configuration:
~azure.mgmt.network.v2020_04_01.models.TrafficAnalyticsProperties
:param storage_id: Required. ID of the storage account which is used to store the flow log.
:type storage_id: str
:param enabled: Required. Flag to enable/disable flow logging.
:type enabled: bool
:param retention_policy: Parameters that define the retention policy for flow log.
:type retention_policy: ~azure.mgmt.network.v2020_04_01.models.RetentionPolicyParameters
:param format: Parameters that define the flow log format.
:type format: ~azure.mgmt.network.v2020_04_01.models.FlowLogFormatParameters
"""
_validation = {
'target_resource_id': {'required': True},
'storage_id': {'required': True},
'enabled': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'flow_analytics_configuration': {'key': 'flowAnalyticsConfiguration', 'type': 'TrafficAnalyticsProperties'},
'storage_id': {'key': 'properties.storageId', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicyParameters'},
'format': {'key': 'properties.format', 'type': 'FlowLogFormatParameters'},
}
def __init__(
self,
**kwargs
):
super(FlowLogInformation, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
self.flow_analytics_configuration = kwargs.get('flow_analytics_configuration', None)
self.storage_id = kwargs['storage_id']
self.enabled = kwargs['enabled']
self.retention_policy = kwargs.get('retention_policy', None)
self.format = kwargs.get('format', None)
class FlowLogListResult(msrest.serialization.Model):
"""List of flow logs.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: Information about flow log resource.
:type value: list[~azure.mgmt.network.v2020_04_01.models.FlowLog]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[FlowLog]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FlowLogListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class FlowLogStatusParameters(msrest.serialization.Model):
"""Parameters that define a resource to query flow log and traffic analytics (optional) status.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The target resource where getting the flow log and traffic
analytics (optional) status.
:type target_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FlowLogStatusParameters, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
class FrontendIPConfiguration(SubResource):
"""Frontend IP address of the load balancer.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the set of frontend IP
configurations used by the load balancer. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param zones: A list of availability zones denoting the IP allocated for the resource needs to
come from.
:type zones: list[str]
:ivar inbound_nat_rules: An array of references to inbound rules that use this frontend IP.
:vartype inbound_nat_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar inbound_nat_pools: An array of references to inbound pools that use this frontend IP.
:vartype inbound_nat_pools: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar outbound_rules: An array of references to outbound rules that use this frontend IP.
:vartype outbound_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar load_balancing_rules: An array of references to load balancing rules that use this
frontend IP.
:vartype load_balancing_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The Private IP allocation method. Possible values include:
"Static", "Dynamic".
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
:param private_ip_address_version: Whether the specific ipconfiguration is IPv4 or IPv6.
Default is taken as IPv4. Possible values include: "IPv4", "IPv6".
:type private_ip_address_version: str or ~azure.mgmt.network.v2020_04_01.models.IPVersion
:param subnet: The reference to the subnet resource.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.Subnet
:param public_ip_address: The reference to the Public IP resource.
:type public_ip_address: ~azure.mgmt.network.v2020_04_01.models.PublicIPAddress
:param public_ip_prefix: The reference to the Public IP Prefix resource.
:type public_ip_prefix: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the frontend IP configuration resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'inbound_nat_rules': {'readonly': True},
'inbound_nat_pools': {'readonly': True},
'outbound_rules': {'readonly': True},
'load_balancing_rules': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[SubResource]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[SubResource]'},
'outbound_rules': {'key': 'properties.outboundRules', 'type': '[SubResource]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FrontendIPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.zones = kwargs.get('zones', None)
self.inbound_nat_rules = None
self.inbound_nat_pools = None
self.outbound_rules = None
self.load_balancing_rules = None
self.private_ip_address = kwargs.get('private_ip_address', None)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.private_ip_address_version = kwargs.get('private_ip_address_version', None)
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.public_ip_prefix = kwargs.get('public_ip_prefix', None)
self.provisioning_state = None
class GatewayRoute(msrest.serialization.Model):
"""Gateway routing details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar local_address: The gateway's local address.
:vartype local_address: str
:ivar network: The route's network prefix.
:vartype network: str
:ivar next_hop: The route's next hop.
:vartype next_hop: str
:ivar source_peer: The peer this route was learned from.
:vartype source_peer: str
:ivar origin: The source this route was learned from.
:vartype origin: str
:ivar as_path: The route's AS path sequence.
:vartype as_path: str
:ivar weight: The route's weight.
:vartype weight: int
"""
_validation = {
'local_address': {'readonly': True},
'network': {'readonly': True},
'next_hop': {'readonly': True},
'source_peer': {'readonly': True},
'origin': {'readonly': True},
'as_path': {'readonly': True},
'weight': {'readonly': True},
}
_attribute_map = {
'local_address': {'key': 'localAddress', 'type': 'str'},
'network': {'key': 'network', 'type': 'str'},
'next_hop': {'key': 'nextHop', 'type': 'str'},
'source_peer': {'key': 'sourcePeer', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'as_path': {'key': 'asPath', 'type': 'str'},
'weight': {'key': 'weight', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(GatewayRoute, self).__init__(**kwargs)
self.local_address = None
self.network = None
self.next_hop = None
self.source_peer = None
self.origin = None
self.as_path = None
self.weight = None
class GatewayRouteListResult(msrest.serialization.Model):
"""List of virtual network gateway routes.
:param value: List of gateway routes.
:type value: list[~azure.mgmt.network.v2020_04_01.models.GatewayRoute]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GatewayRoute]'},
}
def __init__(
self,
**kwargs
):
super(GatewayRouteListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class GetVpnSitesConfigurationRequest(msrest.serialization.Model):
"""List of Vpn-Sites.
All required parameters must be populated in order to send to Azure.
:param vpn_sites: List of resource-ids of the vpn-sites for which config is to be downloaded.
:type vpn_sites: list[str]
:param output_blob_sas_url: Required. The sas-url to download the configurations for vpn-sites.
:type output_blob_sas_url: str
"""
_validation = {
'output_blob_sas_url': {'required': True},
}
_attribute_map = {
'vpn_sites': {'key': 'vpnSites', 'type': '[str]'},
'output_blob_sas_url': {'key': 'outputBlobSasUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GetVpnSitesConfigurationRequest, self).__init__(**kwargs)
self.vpn_sites = kwargs.get('vpn_sites', None)
self.output_blob_sas_url = kwargs['output_blob_sas_url']
class HTTPConfiguration(msrest.serialization.Model):
"""HTTP configuration of the connectivity check.
:param method: HTTP method. Possible values include: "Get".
:type method: str or ~azure.mgmt.network.v2020_04_01.models.HTTPMethod
:param headers: List of HTTP headers.
:type headers: list[~azure.mgmt.network.v2020_04_01.models.HTTPHeader]
:param valid_status_codes: Valid status codes.
:type valid_status_codes: list[int]
"""
_attribute_map = {
'method': {'key': 'method', 'type': 'str'},
'headers': {'key': 'headers', 'type': '[HTTPHeader]'},
'valid_status_codes': {'key': 'validStatusCodes', 'type': '[int]'},
}
def __init__(
self,
**kwargs
):
super(HTTPConfiguration, self).__init__(**kwargs)
self.method = kwargs.get('method', None)
self.headers = kwargs.get('headers', None)
self.valid_status_codes = kwargs.get('valid_status_codes', None)
class HTTPHeader(msrest.serialization.Model):
"""The HTTP header.
:param name: The name in HTTP header.
:type name: str
:param value: The value in HTTP header.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HTTPHeader, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.value = kwargs.get('value', None)
class HubIPAddresses(msrest.serialization.Model):
"""IP addresses associated with azure firewall.
:param public_ip_addresses: List of Public IP addresses associated with azure firewall.
:type public_ip_addresses:
list[~azure.mgmt.network.v2020_04_01.models.AzureFirewallPublicIPAddress]
:param private_ip_address: Private IP Address associated with azure firewall.
:type private_ip_address: str
"""
_attribute_map = {
'public_ip_addresses': {'key': 'publicIPAddresses', 'type': '[AzureFirewallPublicIPAddress]'},
'private_ip_address': {'key': 'privateIPAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HubIPAddresses, self).__init__(**kwargs)
self.public_ip_addresses = kwargs.get('public_ip_addresses', None)
self.private_ip_address = kwargs.get('private_ip_address', None)
class HubRoute(msrest.serialization.Model):
"""RouteTable route.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the Route that is unique within a RouteTable. This name can
be used to access this route.
:type name: str
:param destination_type: Required. The type of destinations (eg: CIDR, ResourceId, Service).
:type destination_type: str
:param destinations: Required. List of all destinations.
:type destinations: list[str]
:param next_hop_type: Required. The type of next hop (eg: ResourceId).
:type next_hop_type: str
:param next_hop: Required. NextHop resource ID.
:type next_hop: str
"""
_validation = {
'name': {'required': True},
'destination_type': {'required': True},
'destinations': {'required': True},
'next_hop_type': {'required': True},
'next_hop': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'destination_type': {'key': 'destinationType', 'type': 'str'},
'destinations': {'key': 'destinations', 'type': '[str]'},
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
'next_hop': {'key': 'nextHop', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HubRoute, self).__init__(**kwargs)
self.name = kwargs['name']
self.destination_type = kwargs['destination_type']
self.destinations = kwargs['destinations']
self.next_hop_type = kwargs['next_hop_type']
self.next_hop = kwargs['next_hop']
class HubRouteTable(SubResource):
"""RouteTable resource in a virtual hub.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param routes: List of all routes.
:type routes: list[~azure.mgmt.network.v2020_04_01.models.HubRoute]
:param labels: List of labels associated with this route table.
:type labels: list[str]
:ivar associated_connections: List of all connections associated with this route table.
:vartype associated_connections: list[str]
:ivar propagating_connections: List of all connections that advertise to this route table.
:vartype propagating_connections: list[str]
:ivar provisioning_state: The provisioning state of the RouteTable resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'associated_connections': {'readonly': True},
'propagating_connections': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'routes': {'key': 'properties.routes', 'type': '[HubRoute]'},
'labels': {'key': 'properties.labels', 'type': '[str]'},
'associated_connections': {'key': 'properties.associatedConnections', 'type': '[str]'},
'propagating_connections': {'key': 'properties.propagatingConnections', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HubRouteTable, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.routes = kwargs.get('routes', None)
self.labels = kwargs.get('labels', None)
self.associated_connections = None
self.propagating_connections = None
self.provisioning_state = None
class HubVirtualNetworkConnection(SubResource):
"""HubVirtualNetworkConnection Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param remote_virtual_network: Reference to the remote virtual network.
:type remote_virtual_network: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param allow_hub_to_remote_vnet_transit: VirtualHub to RemoteVnet transit to enabled or not.
:type allow_hub_to_remote_vnet_transit: bool
:param allow_remote_vnet_to_use_hub_vnet_gateways: Allow RemoteVnet to use Virtual Hub's
gateways.
:type allow_remote_vnet_to_use_hub_vnet_gateways: bool
:param enable_internet_security: Enable internet security.
:type enable_internet_security: bool
:param routing_configuration: The Routing Configuration indicating the associated and
propagated route tables on this connection.
:type routing_configuration: ~azure.mgmt.network.v2020_04_01.models.RoutingConfiguration
:ivar provisioning_state: The provisioning state of the hub virtual network connection
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'allow_hub_to_remote_vnet_transit': {'key': 'properties.allowHubToRemoteVnetTransit', 'type': 'bool'},
'allow_remote_vnet_to_use_hub_vnet_gateways': {'key': 'properties.allowRemoteVnetToUseHubVnetGateways', 'type': 'bool'},
'enable_internet_security': {'key': 'properties.enableInternetSecurity', 'type': 'bool'},
'routing_configuration': {'key': 'properties.routingConfiguration', 'type': 'RoutingConfiguration'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(HubVirtualNetworkConnection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.remote_virtual_network = kwargs.get('remote_virtual_network', None)
self.allow_hub_to_remote_vnet_transit = kwargs.get('allow_hub_to_remote_vnet_transit', None)
self.allow_remote_vnet_to_use_hub_vnet_gateways = kwargs.get('allow_remote_vnet_to_use_hub_vnet_gateways', None)
self.enable_internet_security = kwargs.get('enable_internet_security', None)
self.routing_configuration = kwargs.get('routing_configuration', None)
self.provisioning_state = None
class InboundNatPool(SubResource):
"""Inbound NAT pool of the load balancer.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the set of inbound NAT pools used
by the load balancer. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param protocol: The reference to the transport protocol used by the inbound NAT pool. Possible
values include: "Udp", "Tcp", "All".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.TransportProtocol
:param frontend_port_range_start: The first port number in the range of external ports that
will be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values
range between 1 and 65534.
:type frontend_port_range_start: int
:param frontend_port_range_end: The last port number in the range of external ports that will
be used to provide Inbound Nat to NICs associated with a load balancer. Acceptable values range
between 1 and 65535.
:type frontend_port_range_end: int
:param backend_port: The port used for internal connections on the endpoint. Acceptable values
are between 1 and 65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set
between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the
protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the floating IP
capability required to configure a SQL AlwaysOn Availability Group. This setting is required
when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed
after you create the endpoint.
:type enable_floating_ip: bool
:param enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected
connection termination. This element is only used when the protocol is set to TCP.
:type enable_tcp_reset: bool
:ivar provisioning_state: The provisioning state of the inbound NAT pool resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'frontend_port_range_start': {'key': 'properties.frontendPortRangeStart', 'type': 'int'},
'frontend_port_range_end': {'key': 'properties.frontendPortRangeEnd', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'enable_tcp_reset': {'key': 'properties.enableTcpReset', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InboundNatPool, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None)
self.protocol = kwargs.get('protocol', None)
self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None)
self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None)
self.backend_port = kwargs.get('backend_port', None)
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
self.enable_floating_ip = kwargs.get('enable_floating_ip', None)
self.enable_tcp_reset = kwargs.get('enable_tcp_reset', None)
self.provisioning_state = None
class InboundNatRule(SubResource):
"""Inbound NAT rule of the load balancer.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the set of inbound NAT rules used
by the load balancer. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar backend_ip_configuration: A reference to a private IP address defined on a network
interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations
is forwarded to the backend IP.
:vartype backend_ip_configuration:
~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfiguration
:param protocol: The reference to the transport protocol used by the load balancing rule.
Possible values include: "Udp", "Tcp", "All".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.TransportProtocol
:param frontend_port: The port for the external endpoint. Port numbers for each rule must be
unique within the Load Balancer. Acceptable values range from 1 to 65534.
:type frontend_port: int
:param backend_port: The port used for the internal endpoint. Acceptable values range from 1 to
65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set
between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the
protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the floating IP
capability required to configure a SQL AlwaysOn Availability Group. This setting is required
when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed
after you create the endpoint.
:type enable_floating_ip: bool
:param enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected
connection termination. This element is only used when the protocol is set to TCP.
:type enable_tcp_reset: bool
:ivar provisioning_state: The provisioning state of the inbound NAT rule resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'backend_ip_configuration': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_ip_configuration': {'key': 'properties.backendIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'enable_tcp_reset': {'key': 'properties.enableTcpReset', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InboundNatRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None)
self.backend_ip_configuration = None
self.protocol = kwargs.get('protocol', None)
self.frontend_port = kwargs.get('frontend_port', None)
self.backend_port = kwargs.get('backend_port', None)
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
self.enable_floating_ip = kwargs.get('enable_floating_ip', None)
self.enable_tcp_reset = kwargs.get('enable_tcp_reset', None)
self.provisioning_state = None
class InboundNatRuleListResult(msrest.serialization.Model):
"""Response for ListInboundNatRule API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of inbound nat rules in a load balancer.
:type value: list[~azure.mgmt.network.v2020_04_01.models.InboundNatRule]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[InboundNatRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InboundNatRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class IPAddressAvailabilityResult(msrest.serialization.Model):
"""Response for CheckIPAddressAvailability API service call.
:param available: Private IP address availability.
:type available: bool
:param available_ip_addresses: Contains other available private IP addresses if the asked for
address is taken.
:type available_ip_addresses: list[str]
"""
_attribute_map = {
'available': {'key': 'available', 'type': 'bool'},
'available_ip_addresses': {'key': 'availableIPAddresses', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(IPAddressAvailabilityResult, self).__init__(**kwargs)
self.available = kwargs.get('available', None)
self.available_ip_addresses = kwargs.get('available_ip_addresses', None)
class IpAllocation(Resource):
"""IpAllocation resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar subnet: The Subnet that using the prefix of this IpAllocation resource.
:vartype subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar virtual_network: The VirtualNetwork that using the prefix of this IpAllocation resource.
:vartype virtual_network: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param type_properties_type: The type for the IpAllocation. Possible values include:
"Undefined", "Hypernet".
:type type_properties_type: str or ~azure.mgmt.network.v2020_04_01.models.IpAllocationType
:param prefix: The address prefix for the IpAllocation.
:type prefix: str
:param prefix_length: The address prefix length for the IpAllocation.
:type prefix_length: int
:param prefix_type: The address prefix Type for the IpAllocation. Possible values include:
"IPv4", "IPv6".
:type prefix_type: str or ~azure.mgmt.network.v2020_04_01.models.IPVersion
:param ipam_allocation_id: The IPAM allocation ID.
:type ipam_allocation_id: str
:param allocation_tags: IpAllocation tags.
:type allocation_tags: dict[str, str]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'subnet': {'readonly': True},
'virtual_network': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'SubResource'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'prefix': {'key': 'properties.prefix', 'type': 'str'},
'prefix_length': {'key': 'properties.prefixLength', 'type': 'int'},
'prefix_type': {'key': 'properties.prefixType', 'type': 'str'},
'ipam_allocation_id': {'key': 'properties.ipamAllocationId', 'type': 'str'},
'allocation_tags': {'key': 'properties.allocationTags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(IpAllocation, self).__init__(**kwargs)
self.etag = None
self.subnet = None
self.virtual_network = None
self.type_properties_type = kwargs.get('type_properties_type', None)
self.prefix = kwargs.get('prefix', None)
self.prefix_length = kwargs.get('prefix_length', 0)
self.prefix_type = kwargs.get('prefix_type', None)
self.ipam_allocation_id = kwargs.get('ipam_allocation_id', None)
self.allocation_tags = kwargs.get('allocation_tags', None)
class IpAllocationListResult(msrest.serialization.Model):
"""Response for the ListIpAllocations API service call.
:param value: A list of IpAllocation resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.IpAllocation]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IpAllocation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpAllocationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IPConfiguration(SubResource):
"""IP configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP address allocation method. Possible values
include: "Static", "Dynamic".
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
:param subnet: The reference to the subnet resource.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.Subnet
:param public_ip_address: The reference to the public IP resource.
:type public_ip_address: ~azure.mgmt.network.v2020_04_01.models.PublicIPAddress
:ivar provisioning_state: The provisioning state of the IP configuration resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.private_ip_address = kwargs.get('private_ip_address', None)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.provisioning_state = None
class IPConfigurationBgpPeeringAddress(msrest.serialization.Model):
"""Properties of IPConfigurationBgpPeeringAddress.
Variables are only populated by the server, and will be ignored when sending a request.
:param ipconfiguration_id: The ID of IP configuration which belongs to gateway.
:type ipconfiguration_id: str
:ivar default_bgp_ip_addresses: The list of default BGP peering addresses which belong to IP
configuration.
:vartype default_bgp_ip_addresses: list[str]
:param custom_bgp_ip_addresses: The list of custom BGP peering addresses which belong to IP
configuration.
:type custom_bgp_ip_addresses: list[str]
:ivar tunnel_ip_addresses: The list of tunnel public IP addresses which belong to IP
configuration.
:vartype tunnel_ip_addresses: list[str]
"""
_validation = {
'default_bgp_ip_addresses': {'readonly': True},
'tunnel_ip_addresses': {'readonly': True},
}
_attribute_map = {
'ipconfiguration_id': {'key': 'ipconfigurationId', 'type': 'str'},
'default_bgp_ip_addresses': {'key': 'defaultBgpIpAddresses', 'type': '[str]'},
'custom_bgp_ip_addresses': {'key': 'customBgpIpAddresses', 'type': '[str]'},
'tunnel_ip_addresses': {'key': 'tunnelIpAddresses', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(IPConfigurationBgpPeeringAddress, self).__init__(**kwargs)
self.ipconfiguration_id = kwargs.get('ipconfiguration_id', None)
self.default_bgp_ip_addresses = None
self.custom_bgp_ip_addresses = kwargs.get('custom_bgp_ip_addresses', None)
self.tunnel_ip_addresses = None
class IPConfigurationProfile(SubResource):
"""IP configuration profile child resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource. This name can be used to access the resource.
:type name: str
:ivar type: Sub Resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param subnet: The reference to the subnet resource to create a container network interface ip
configuration.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.Subnet
:ivar provisioning_state: The provisioning state of the IP configuration profile resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IPConfigurationProfile, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = None
self.etag = None
self.subnet = kwargs.get('subnet', None)
self.provisioning_state = None
class IpGroup(Resource):
"""The IpGroups resource information.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar provisioning_state: The provisioning state of the IpGroups resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param ip_addresses: IpAddresses/IpAddressPrefixes in the IpGroups resource.
:type ip_addresses: list[str]
:ivar firewalls: List of references to Azure resources that this IpGroups is associated with.
:vartype firewalls: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'firewalls': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'ip_addresses': {'key': 'properties.ipAddresses', 'type': '[str]'},
'firewalls': {'key': 'properties.firewalls', 'type': '[SubResource]'},
}
def __init__(
self,
**kwargs
):
super(IpGroup, self).__init__(**kwargs)
self.etag = None
self.provisioning_state = None
self.ip_addresses = kwargs.get('ip_addresses', None)
self.firewalls = None
class IpGroupListResult(msrest.serialization.Model):
"""Response for the ListIpGroups API service call.
:param value: The list of IpGroups information resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.IpGroup]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[IpGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpGroupListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class IpsecPolicy(msrest.serialization.Model):
"""An IPSec Policy configuration for a virtual network gateway connection.
All required parameters must be populated in order to send to Azure.
:param sa_life_time_seconds: Required. The IPSec Security Association (also called Quick Mode
or Phase 2 SA) lifetime in seconds for a site to site VPN tunnel.
:type sa_life_time_seconds: int
:param sa_data_size_kilobytes: Required. The IPSec Security Association (also called Quick Mode
or Phase 2 SA) payload size in KB for a site to site VPN tunnel.
:type sa_data_size_kilobytes: int
:param ipsec_encryption: Required. The IPSec encryption algorithm (IKE phase 1). Possible
values include: "None", "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES128", "GCMAES192",
"GCMAES256".
:type ipsec_encryption: str or ~azure.mgmt.network.v2020_04_01.models.IpsecEncryption
:param ipsec_integrity: Required. The IPSec integrity algorithm (IKE phase 1). Possible values
include: "MD5", "SHA1", "SHA256", "GCMAES128", "GCMAES192", "GCMAES256".
:type ipsec_integrity: str or ~azure.mgmt.network.v2020_04_01.models.IpsecIntegrity
:param ike_encryption: Required. The IKE encryption algorithm (IKE phase 2). Possible values
include: "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES256", "GCMAES128".
:type ike_encryption: str or ~azure.mgmt.network.v2020_04_01.models.IkeEncryption
:param ike_integrity: Required. The IKE integrity algorithm (IKE phase 2). Possible values
include: "MD5", "SHA1", "SHA256", "SHA384", "GCMAES256", "GCMAES128".
:type ike_integrity: str or ~azure.mgmt.network.v2020_04_01.models.IkeIntegrity
:param dh_group: Required. The DH Group used in IKE Phase 1 for initial SA. Possible values
include: "None", "DHGroup1", "DHGroup2", "DHGroup14", "DHGroup2048", "ECP256", "ECP384",
"DHGroup24".
:type dh_group: str or ~azure.mgmt.network.v2020_04_01.models.DhGroup
:param pfs_group: Required. The Pfs Group used in IKE Phase 2 for new child SA. Possible values
include: "None", "PFS1", "PFS2", "PFS2048", "ECP256", "ECP384", "PFS24", "PFS14", "PFSMM".
:type pfs_group: str or ~azure.mgmt.network.v2020_04_01.models.PfsGroup
"""
_validation = {
'sa_life_time_seconds': {'required': True},
'sa_data_size_kilobytes': {'required': True},
'ipsec_encryption': {'required': True},
'ipsec_integrity': {'required': True},
'ike_encryption': {'required': True},
'ike_integrity': {'required': True},
'dh_group': {'required': True},
'pfs_group': {'required': True},
}
_attribute_map = {
'sa_life_time_seconds': {'key': 'saLifeTimeSeconds', 'type': 'int'},
'sa_data_size_kilobytes': {'key': 'saDataSizeKilobytes', 'type': 'int'},
'ipsec_encryption': {'key': 'ipsecEncryption', 'type': 'str'},
'ipsec_integrity': {'key': 'ipsecIntegrity', 'type': 'str'},
'ike_encryption': {'key': 'ikeEncryption', 'type': 'str'},
'ike_integrity': {'key': 'ikeIntegrity', 'type': 'str'},
'dh_group': {'key': 'dhGroup', 'type': 'str'},
'pfs_group': {'key': 'pfsGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpsecPolicy, self).__init__(**kwargs)
self.sa_life_time_seconds = kwargs['sa_life_time_seconds']
self.sa_data_size_kilobytes = kwargs['sa_data_size_kilobytes']
self.ipsec_encryption = kwargs['ipsec_encryption']
self.ipsec_integrity = kwargs['ipsec_integrity']
self.ike_encryption = kwargs['ike_encryption']
self.ike_integrity = kwargs['ike_integrity']
self.dh_group = kwargs['dh_group']
self.pfs_group = kwargs['pfs_group']
class IpTag(msrest.serialization.Model):
"""Contains the IpTag associated with the object.
:param ip_tag_type: The IP tag type. Example: FirstPartyUsage.
:type ip_tag_type: str
:param tag: The value of the IP tag associated with the public IP. Example: SQL.
:type tag: str
"""
_attribute_map = {
'ip_tag_type': {'key': 'ipTagType', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IpTag, self).__init__(**kwargs)
self.ip_tag_type = kwargs.get('ip_tag_type', None)
self.tag = kwargs.get('tag', None)
class Ipv6CircuitConnectionConfig(msrest.serialization.Model):
"""IPv6 Circuit Connection properties for global reach.
Variables are only populated by the server, and will be ignored when sending a request.
:param address_prefix: /125 IP address space to carve out customer addresses for global reach.
:type address_prefix: str
:ivar circuit_connection_status: Express Route Circuit connection state. Possible values
include: "Connected", "Connecting", "Disconnected".
:vartype circuit_connection_status: str or
~azure.mgmt.network.v2020_04_01.models.CircuitConnectionStatus
"""
_validation = {
'circuit_connection_status': {'readonly': True},
}
_attribute_map = {
'address_prefix': {'key': 'addressPrefix', 'type': 'str'},
'circuit_connection_status': {'key': 'circuitConnectionStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Ipv6CircuitConnectionConfig, self).__init__(**kwargs)
self.address_prefix = kwargs.get('address_prefix', None)
self.circuit_connection_status = None
class Ipv6ExpressRouteCircuitPeeringConfig(msrest.serialization.Model):
"""Contains IPv6 peering config.
:param primary_peer_address_prefix: The primary address prefix.
:type primary_peer_address_prefix: str
:param secondary_peer_address_prefix: The secondary address prefix.
:type secondary_peer_address_prefix: str
:param microsoft_peering_config: The Microsoft peering configuration.
:type microsoft_peering_config:
~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeeringConfig
:param route_filter: The reference to the RouteFilter resource.
:type route_filter: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param state: The state of peering. Possible values include: "Disabled", "Enabled".
:type state: str or ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeeringState
"""
_attribute_map = {
'primary_peer_address_prefix': {'key': 'primaryPeerAddressPrefix', 'type': 'str'},
'secondary_peer_address_prefix': {'key': 'secondaryPeerAddressPrefix', 'type': 'str'},
'microsoft_peering_config': {'key': 'microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'},
'route_filter': {'key': 'routeFilter', 'type': 'SubResource'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Ipv6ExpressRouteCircuitPeeringConfig, self).__init__(**kwargs)
self.primary_peer_address_prefix = kwargs.get('primary_peer_address_prefix', None)
self.secondary_peer_address_prefix = kwargs.get('secondary_peer_address_prefix', None)
self.microsoft_peering_config = kwargs.get('microsoft_peering_config', None)
self.route_filter = kwargs.get('route_filter', None)
self.state = kwargs.get('state', None)
class ListHubRouteTablesResult(msrest.serialization.Model):
"""List of RouteTables and a URL nextLink to get the next set of results.
:param value: List of RouteTables.
:type value: list[~azure.mgmt.network.v2020_04_01.models.HubRouteTable]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[HubRouteTable]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListHubRouteTablesResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListHubVirtualNetworkConnectionsResult(msrest.serialization.Model):
"""List of HubVirtualNetworkConnections and a URL nextLink to get the next set of results.
:param value: List of HubVirtualNetworkConnections.
:type value: list[~azure.mgmt.network.v2020_04_01.models.HubVirtualNetworkConnection]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[HubVirtualNetworkConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListHubVirtualNetworkConnectionsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListP2SVpnGatewaysResult(msrest.serialization.Model):
"""Result of the request to list P2SVpnGateways. It contains a list of P2SVpnGateways and a URL nextLink to get the next set of results.
:param value: List of P2SVpnGateways.
:type value: list[~azure.mgmt.network.v2020_04_01.models.P2SVpnGateway]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[P2SVpnGateway]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListP2SVpnGatewaysResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVirtualHubRouteTableV2SResult(msrest.serialization.Model):
"""List of VirtualHubRouteTableV2s and a URL nextLink to get the next set of results.
:param value: List of VirtualHubRouteTableV2s.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualHubRouteTableV2]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualHubRouteTableV2]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVirtualHubRouteTableV2SResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVirtualHubsResult(msrest.serialization.Model):
"""Result of the request to list VirtualHubs. It contains a list of VirtualHubs and a URL nextLink to get the next set of results.
:param value: List of VirtualHubs.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualHub]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualHub]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVirtualHubsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVirtualWANsResult(msrest.serialization.Model):
"""Result of the request to list VirtualWANs. It contains a list of VirtualWANs and a URL nextLink to get the next set of results.
:param value: List of VirtualWANs.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualWAN]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualWAN]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVirtualWANsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVpnConnectionsResult(msrest.serialization.Model):
"""Result of the request to list all vpn connections to a virtual wan vpn gateway. It contains a list of Vpn Connections and a URL nextLink to get the next set of results.
:param value: List of Vpn Connections.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VpnConnection]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VpnConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVpnConnectionsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVpnGatewaysResult(msrest.serialization.Model):
"""Result of the request to list VpnGateways. It contains a list of VpnGateways and a URL nextLink to get the next set of results.
:param value: List of VpnGateways.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VpnGateway]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VpnGateway]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVpnGatewaysResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVpnServerConfigurationsResult(msrest.serialization.Model):
"""Result of the request to list all VpnServerConfigurations. It contains a list of VpnServerConfigurations and a URL nextLink to get the next set of results.
:param value: List of VpnServerConfigurations.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VpnServerConfiguration]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VpnServerConfiguration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVpnServerConfigurationsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVpnSiteLinkConnectionsResult(msrest.serialization.Model):
"""Result of the request to list all vpn connections to a virtual wan vpn gateway. It contains a list of Vpn Connections and a URL nextLink to get the next set of results.
:param value: List of VpnSiteLinkConnections.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VpnSiteLinkConnection]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VpnSiteLinkConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVpnSiteLinkConnectionsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVpnSiteLinksResult(msrest.serialization.Model):
"""Result of the request to list VpnSiteLinks. It contains a list of VpnSiteLinks and a URL nextLink to get the next set of results.
:param value: List of VpnSitesLinks.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VpnSiteLink]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VpnSiteLink]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVpnSiteLinksResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ListVpnSitesResult(msrest.serialization.Model):
"""Result of the request to list VpnSites. It contains a list of VpnSites and a URL nextLink to get the next set of results.
:param value: List of VpnSites.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VpnSite]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VpnSite]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ListVpnSitesResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class LoadBalancer(Resource):
"""LoadBalancer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The load balancer SKU.
:type sku: ~azure.mgmt.network.v2020_04_01.models.LoadBalancerSku
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param frontend_ip_configurations: Object representing the frontend IPs to be used for the load
balancer.
:type frontend_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.FrontendIPConfiguration]
:param backend_address_pools: Collection of backend address pools used by a load balancer.
:type backend_address_pools: list[~azure.mgmt.network.v2020_04_01.models.BackendAddressPool]
:param load_balancing_rules: Object collection representing the load balancing rules Gets the
provisioning.
:type load_balancing_rules: list[~azure.mgmt.network.v2020_04_01.models.LoadBalancingRule]
:param probes: Collection of probe objects used in the load balancer.
:type probes: list[~azure.mgmt.network.v2020_04_01.models.Probe]
:param inbound_nat_rules: Collection of inbound NAT Rules used by a load balancer. Defining
inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT
pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are
associated with individual virtual machines cannot reference an Inbound NAT pool. They have to
reference individual inbound NAT rules.
:type inbound_nat_rules: list[~azure.mgmt.network.v2020_04_01.models.InboundNatRule]
:param inbound_nat_pools: Defines an external port range for inbound NAT to a single backend
port on NICs associated with a load balancer. Inbound NAT rules are created automatically for
each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules.
Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with
individual virtual machines cannot reference an inbound NAT pool. They have to reference
individual inbound NAT rules.
:type inbound_nat_pools: list[~azure.mgmt.network.v2020_04_01.models.InboundNatPool]
:param outbound_rules: The outbound rules.
:type outbound_rules: list[~azure.mgmt.network.v2020_04_01.models.OutboundRule]
:ivar resource_guid: The resource GUID property of the load balancer resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the load balancer resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'LoadBalancerSku'},
'etag': {'key': 'etag', 'type': 'str'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[FrontendIPConfiguration]'},
'backend_address_pools': {'key': 'properties.backendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[LoadBalancingRule]'},
'probes': {'key': 'properties.probes', 'type': '[Probe]'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[InboundNatRule]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[InboundNatPool]'},
'outbound_rules': {'key': 'properties.outboundRules', 'type': '[OutboundRule]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancer, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.etag = None
self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None)
self.backend_address_pools = kwargs.get('backend_address_pools', None)
self.load_balancing_rules = kwargs.get('load_balancing_rules', None)
self.probes = kwargs.get('probes', None)
self.inbound_nat_rules = kwargs.get('inbound_nat_rules', None)
self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None)
self.outbound_rules = kwargs.get('outbound_rules', None)
self.resource_guid = None
self.provisioning_state = None
class LoadBalancerBackendAddress(msrest.serialization.Model):
"""Load balancer backend addresses.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Name of the backend address.
:type name: str
:param virtual_network: Reference to an existing virtual network.
:type virtual_network: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param ip_address: IP Address belonging to the referenced virtual network.
:type ip_address: str
:ivar network_interface_ip_configuration: Reference to IP address defined in network
interfaces.
:vartype network_interface_ip_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
"""
_validation = {
'network_interface_ip_configuration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'SubResource'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'str'},
'network_interface_ip_configuration': {'key': 'properties.networkInterfaceIPConfiguration', 'type': 'SubResource'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerBackendAddress, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.virtual_network = kwargs.get('virtual_network', None)
self.ip_address = kwargs.get('ip_address', None)
self.network_interface_ip_configuration = None
class LoadBalancerBackendAddressPoolListResult(msrest.serialization.Model):
"""Response for ListBackendAddressPool API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of backend address pools in a load balancer.
:type value: list[~azure.mgmt.network.v2020_04_01.models.BackendAddressPool]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BackendAddressPool]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerBackendAddressPoolListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class LoadBalancerFrontendIPConfigurationListResult(msrest.serialization.Model):
"""Response for ListFrontendIPConfiguration API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of frontend IP configurations in a load balancer.
:type value: list[~azure.mgmt.network.v2020_04_01.models.FrontendIPConfiguration]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[FrontendIPConfiguration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerFrontendIPConfigurationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class LoadBalancerListResult(msrest.serialization.Model):
"""Response for ListLoadBalancers API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of load balancers in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.LoadBalancer]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[LoadBalancer]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class LoadBalancerLoadBalancingRuleListResult(msrest.serialization.Model):
"""Response for ListLoadBalancingRule API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of load balancing rules in a load balancer.
:type value: list[~azure.mgmt.network.v2020_04_01.models.LoadBalancingRule]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[LoadBalancingRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerLoadBalancingRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class LoadBalancerOutboundRuleListResult(msrest.serialization.Model):
"""Response for ListOutboundRule API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of outbound rules in a load balancer.
:type value: list[~azure.mgmt.network.v2020_04_01.models.OutboundRule]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[OutboundRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerOutboundRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class LoadBalancerProbeListResult(msrest.serialization.Model):
"""Response for ListProbe API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of probes in a load balancer.
:type value: list[~azure.mgmt.network.v2020_04_01.models.Probe]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Probe]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerProbeListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class LoadBalancerSku(msrest.serialization.Model):
"""SKU of a load balancer.
:param name: Name of a load balancer SKU. Possible values include: "Basic", "Standard".
:type name: str or ~azure.mgmt.network.v2020_04_01.models.LoadBalancerSkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancerSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class LoadBalancingRule(SubResource):
"""A load balancing rule for a load balancer.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the set of load balancing rules
used by the load balancer. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param backend_address_pool: A reference to a pool of DIPs. Inbound traffic is randomly load
balanced across IPs in the backend IPs.
:type backend_address_pool: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param probe: The reference to the load balancer probe used by the load balancing rule.
:type probe: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param protocol: The reference to the transport protocol used by the load balancing rule.
Possible values include: "Udp", "Tcp", "All".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.TransportProtocol
:param load_distribution: The load distribution policy for this rule. Possible values include:
"Default", "SourceIP", "SourceIPProtocol".
:type load_distribution: str or ~azure.mgmt.network.v2020_04_01.models.LoadDistribution
:param frontend_port: The port for the external endpoint. Port numbers for each rule must be
unique within the Load Balancer. Acceptable values are between 0 and 65534. Note that value 0
enables "Any Port".
:type frontend_port: int
:param backend_port: The port used for internal connections on the endpoint. Acceptable values
are between 0 and 65535. Note that value 0 enables "Any Port".
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection. The value can be set
between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the
protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the floating IP
capability required to configure a SQL AlwaysOn Availability Group. This setting is required
when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed
after you create the endpoint.
:type enable_floating_ip: bool
:param enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected
connection termination. This element is only used when the protocol is set to TCP.
:type enable_tcp_reset: bool
:param disable_outbound_snat: Configures SNAT for the VMs in the backend pool to use the
publicIP address specified in the frontend of the load balancing rule.
:type disable_outbound_snat: bool
:ivar provisioning_state: The provisioning state of the load balancing rule resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'probe': {'key': 'properties.probe', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'load_distribution': {'key': 'properties.loadDistribution', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'enable_tcp_reset': {'key': 'properties.enableTcpReset', 'type': 'bool'},
'disable_outbound_snat': {'key': 'properties.disableOutboundSnat', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LoadBalancingRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.frontend_ip_configuration = kwargs.get('frontend_ip_configuration', None)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.probe = kwargs.get('probe', None)
self.protocol = kwargs.get('protocol', None)
self.load_distribution = kwargs.get('load_distribution', None)
self.frontend_port = kwargs.get('frontend_port', None)
self.backend_port = kwargs.get('backend_port', None)
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
self.enable_floating_ip = kwargs.get('enable_floating_ip', None)
self.enable_tcp_reset = kwargs.get('enable_tcp_reset', None)
self.disable_outbound_snat = kwargs.get('disable_outbound_snat', None)
self.provisioning_state = None
class LocalNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param local_network_address_space: Local network site address space.
:type local_network_address_space: ~azure.mgmt.network.v2020_04_01.models.AddressSpace
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param fqdn: FQDN of local network gateway.
:type fqdn: str
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2020_04_01.models.BgpSettings
:ivar resource_guid: The resource GUID property of the local network gateway resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the local network gateway resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'},
'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LocalNetworkGateway, self).__init__(**kwargs)
self.etag = None
self.local_network_address_space = kwargs.get('local_network_address_space', None)
self.gateway_ip_address = kwargs.get('gateway_ip_address', None)
self.fqdn = kwargs.get('fqdn', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.resource_guid = None
self.provisioning_state = None
class LocalNetworkGatewayListResult(msrest.serialization.Model):
"""Response for ListLocalNetworkGateways API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of local network gateways that exists in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.LocalNetworkGateway]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[LocalNetworkGateway]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LocalNetworkGatewayListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class LogSpecification(msrest.serialization.Model):
"""Description of logging specification.
:param name: The name of the specification.
:type name: str
:param display_name: The display name of the specification.
:type display_name: str
:param blob_duration: Duration of the blob.
:type blob_duration: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.blob_duration = kwargs.get('blob_duration', None)
class ManagedRuleGroupOverride(msrest.serialization.Model):
"""Defines a managed rule group override setting.
All required parameters must be populated in order to send to Azure.
:param rule_group_name: Required. The managed rule group to override.
:type rule_group_name: str
:param rules: List of rules that will be disabled. If none specified, all rules in the group
will be disabled.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.ManagedRuleOverride]
"""
_validation = {
'rule_group_name': {'required': True},
}
_attribute_map = {
'rule_group_name': {'key': 'ruleGroupName', 'type': 'str'},
'rules': {'key': 'rules', 'type': '[ManagedRuleOverride]'},
}
def __init__(
self,
**kwargs
):
super(ManagedRuleGroupOverride, self).__init__(**kwargs)
self.rule_group_name = kwargs['rule_group_name']
self.rules = kwargs.get('rules', None)
class ManagedRuleOverride(msrest.serialization.Model):
"""Defines a managed rule group override setting.
All required parameters must be populated in order to send to Azure.
:param rule_id: Required. Identifier for the managed rule.
:type rule_id: str
:param state: The state of the managed rule. Defaults to Disabled if not specified. Possible
values include: "Disabled".
:type state: str or ~azure.mgmt.network.v2020_04_01.models.ManagedRuleEnabledState
"""
_validation = {
'rule_id': {'required': True},
}
_attribute_map = {
'rule_id': {'key': 'ruleId', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedRuleOverride, self).__init__(**kwargs)
self.rule_id = kwargs['rule_id']
self.state = kwargs.get('state', None)
class ManagedRulesDefinition(msrest.serialization.Model):
"""Allow to exclude some variable satisfy the condition for the WAF check.
All required parameters must be populated in order to send to Azure.
:param exclusions: The Exclusions that are applied on the policy.
:type exclusions: list[~azure.mgmt.network.v2020_04_01.models.OwaspCrsExclusionEntry]
:param managed_rule_sets: Required. The managed rule sets that are associated with the policy.
:type managed_rule_sets: list[~azure.mgmt.network.v2020_04_01.models.ManagedRuleSet]
"""
_validation = {
'managed_rule_sets': {'required': True},
}
_attribute_map = {
'exclusions': {'key': 'exclusions', 'type': '[OwaspCrsExclusionEntry]'},
'managed_rule_sets': {'key': 'managedRuleSets', 'type': '[ManagedRuleSet]'},
}
def __init__(
self,
**kwargs
):
super(ManagedRulesDefinition, self).__init__(**kwargs)
self.exclusions = kwargs.get('exclusions', None)
self.managed_rule_sets = kwargs['managed_rule_sets']
class ManagedRuleSet(msrest.serialization.Model):
"""Defines a managed rule set.
All required parameters must be populated in order to send to Azure.
:param rule_set_type: Required. Defines the rule set type to use.
:type rule_set_type: str
:param rule_set_version: Required. Defines the version of the rule set to use.
:type rule_set_version: str
:param rule_group_overrides: Defines the rule group overrides to apply to the rule set.
:type rule_group_overrides:
list[~azure.mgmt.network.v2020_04_01.models.ManagedRuleGroupOverride]
"""
_validation = {
'rule_set_type': {'required': True},
'rule_set_version': {'required': True},
}
_attribute_map = {
'rule_set_type': {'key': 'ruleSetType', 'type': 'str'},
'rule_set_version': {'key': 'ruleSetVersion', 'type': 'str'},
'rule_group_overrides': {'key': 'ruleGroupOverrides', 'type': '[ManagedRuleGroupOverride]'},
}
def __init__(
self,
**kwargs
):
super(ManagedRuleSet, self).__init__(**kwargs)
self.rule_set_type = kwargs['rule_set_type']
self.rule_set_version = kwargs['rule_set_version']
self.rule_group_overrides = kwargs.get('rule_group_overrides', None)
class ManagedServiceIdentity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the system assigned identity. This property will only
be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the system assigned identity. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:param type: The type of identity used for the resource. The type 'SystemAssigned,
UserAssigned' includes both an implicitly created identity and a set of user assigned
identities. The type 'None' will remove any identities from the virtual machine. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:type type: str or ~azure.mgmt.network.v2020_04_01.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with resource. The user
identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.network.v2020_04_01.models.Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties}'},
}
def __init__(
self,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class MatchCondition(msrest.serialization.Model):
"""Define match conditions.
All required parameters must be populated in order to send to Azure.
:param match_variables: Required. List of match variables.
:type match_variables: list[~azure.mgmt.network.v2020_04_01.models.MatchVariable]
:param operator: Required. The operator to be matched. Possible values include: "IPMatch",
"Equal", "Contains", "LessThan", "GreaterThan", "LessThanOrEqual", "GreaterThanOrEqual",
"BeginsWith", "EndsWith", "Regex", "GeoMatch".
:type operator: str or ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallOperator
:param negation_conditon: Whether this is negate condition or not.
:type negation_conditon: bool
:param match_values: Required. Match value.
:type match_values: list[str]
:param transforms: List of transforms.
:type transforms: list[str or
~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallTransform]
"""
_validation = {
'match_variables': {'required': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'match_variables': {'key': 'matchVariables', 'type': '[MatchVariable]'},
'operator': {'key': 'operator', 'type': 'str'},
'negation_conditon': {'key': 'negationConditon', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MatchCondition, self).__init__(**kwargs)
self.match_variables = kwargs['match_variables']
self.operator = kwargs['operator']
self.negation_conditon = kwargs.get('negation_conditon', None)
self.match_values = kwargs['match_values']
self.transforms = kwargs.get('transforms', None)
class MatchedRule(msrest.serialization.Model):
"""Matched rule.
:param rule_name: Name of the matched network security rule.
:type rule_name: str
:param action: The network traffic is allowed or denied. Possible values are 'Allow' and
'Deny'.
:type action: str
"""
_attribute_map = {
'rule_name': {'key': 'ruleName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MatchedRule, self).__init__(**kwargs)
self.rule_name = kwargs.get('rule_name', None)
self.action = kwargs.get('action', None)
class MatchVariable(msrest.serialization.Model):
"""Define match variables.
All required parameters must be populated in order to send to Azure.
:param variable_name: Required. Match Variable. Possible values include: "RemoteAddr",
"RequestMethod", "QueryString", "PostArgs", "RequestUri", "RequestHeaders", "RequestBody",
"RequestCookies".
:type variable_name: str or
~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallMatchVariable
:param selector: The selector of match variable.
:type selector: str
"""
_validation = {
'variable_name': {'required': True},
}
_attribute_map = {
'variable_name': {'key': 'variableName', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MatchVariable, self).__init__(**kwargs)
self.variable_name = kwargs['variable_name']
self.selector = kwargs.get('selector', None)
class MetricSpecification(msrest.serialization.Model):
"""Description of metrics specification.
:param name: The name of the metric.
:type name: str
:param display_name: The display name of the metric.
:type display_name: str
:param display_description: The description of the metric.
:type display_description: str
:param unit: Units the metric to be displayed in.
:type unit: str
:param aggregation_type: The aggregation type.
:type aggregation_type: str
:param availabilities: List of availability.
:type availabilities: list[~azure.mgmt.network.v2020_04_01.models.Availability]
:param enable_regional_mdm_account: Whether regional MDM account enabled.
:type enable_regional_mdm_account: bool
:param fill_gap_with_zero: Whether gaps would be filled with zeros.
:type fill_gap_with_zero: bool
:param metric_filter_pattern: Pattern for the filter of the metric.
:type metric_filter_pattern: str
:param dimensions: List of dimensions.
:type dimensions: list[~azure.mgmt.network.v2020_04_01.models.Dimension]
:param is_internal: Whether the metric is internal.
:type is_internal: bool
:param source_mdm_account: The source MDM account.
:type source_mdm_account: str
:param source_mdm_namespace: The source MDM namespace.
:type source_mdm_namespace: str
:param resource_id_dimension_name_override: The resource Id dimension name override.
:type resource_id_dimension_name_override: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'availabilities': {'key': 'availabilities', 'type': '[Availability]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.display_description = kwargs.get('display_description', None)
self.unit = kwargs.get('unit', None)
self.aggregation_type = kwargs.get('aggregation_type', None)
self.availabilities = kwargs.get('availabilities', None)
self.enable_regional_mdm_account = kwargs.get('enable_regional_mdm_account', None)
self.fill_gap_with_zero = kwargs.get('fill_gap_with_zero', None)
self.metric_filter_pattern = kwargs.get('metric_filter_pattern', None)
self.dimensions = kwargs.get('dimensions', None)
self.is_internal = kwargs.get('is_internal', None)
self.source_mdm_account = kwargs.get('source_mdm_account', None)
self.source_mdm_namespace = kwargs.get('source_mdm_namespace', None)
self.resource_id_dimension_name_override = kwargs.get('resource_id_dimension_name_override', None)
class NatGateway(Resource):
"""Nat Gateway resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The nat gateway SKU.
:type sku: ~azure.mgmt.network.v2020_04_01.models.NatGatewaySku
:param zones: A list of availability zones denoting the zone in which Nat Gateway should be
deployed.
:type zones: list[str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param idle_timeout_in_minutes: The idle timeout of the nat gateway.
:type idle_timeout_in_minutes: int
:param public_ip_addresses: An array of public ip addresses associated with the nat gateway
resource.
:type public_ip_addresses: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param public_ip_prefixes: An array of public ip prefixes associated with the nat gateway
resource.
:type public_ip_prefixes: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar subnets: An array of references to the subnets using this nat gateway resource.
:vartype subnets: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar resource_guid: The resource GUID property of the NAT gateway resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the NAT gateway resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'subnets': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'NatGatewaySku'},
'zones': {'key': 'zones', 'type': '[str]'},
'etag': {'key': 'etag', 'type': 'str'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'public_ip_addresses': {'key': 'properties.publicIpAddresses', 'type': '[SubResource]'},
'public_ip_prefixes': {'key': 'properties.publicIpPrefixes', 'type': '[SubResource]'},
'subnets': {'key': 'properties.subnets', 'type': '[SubResource]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NatGateway, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.zones = kwargs.get('zones', None)
self.etag = None
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
self.public_ip_addresses = kwargs.get('public_ip_addresses', None)
self.public_ip_prefixes = kwargs.get('public_ip_prefixes', None)
self.subnets = None
self.resource_guid = None
self.provisioning_state = None
class NatGatewayListResult(msrest.serialization.Model):
"""Response for ListNatGateways API service call.
:param value: A list of Nat Gateways that exists in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NatGateway]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[NatGateway]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NatGatewayListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class NatGatewaySku(msrest.serialization.Model):
"""SKU of nat gateway.
:param name: Name of Nat Gateway SKU. Possible values include: "Standard".
:type name: str or ~azure.mgmt.network.v2020_04_01.models.NatGatewaySkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NatGatewaySku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class NatRuleCondition(FirewallPolicyRuleCondition):
"""Rule condition of type nat.
All required parameters must be populated in order to send to Azure.
:param name: Name of the rule condition.
:type name: str
:param description: Description of the rule condition.
:type description: str
:param rule_condition_type: Required. Rule Condition Type.Constant filled by server. Possible
values include: "ApplicationRuleCondition", "NetworkRuleCondition", "NatRuleCondition".
:type rule_condition_type: str or
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionType
:param ip_protocols: Array of FirewallPolicyRuleConditionNetworkProtocols.
:type ip_protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionNetworkProtocol]
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param destination_addresses: List of destination IP addresses or Service Tags.
:type destination_addresses: list[str]
:param destination_ports: List of destination ports.
:type destination_ports: list[str]
:param source_ip_groups: List of source IpGroups for this rule.
:type source_ip_groups: list[str]
"""
_validation = {
'rule_condition_type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'rule_condition_type': {'key': 'ruleConditionType', 'type': 'str'},
'ip_protocols': {'key': 'ipProtocols', 'type': '[str]'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'destination_ports': {'key': 'destinationPorts', 'type': '[str]'},
'source_ip_groups': {'key': 'sourceIpGroups', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(NatRuleCondition, self).__init__(**kwargs)
self.rule_condition_type = 'NatRuleCondition' # type: str
self.ip_protocols = kwargs.get('ip_protocols', None)
self.source_addresses = kwargs.get('source_addresses', None)
self.destination_addresses = kwargs.get('destination_addresses', None)
self.destination_ports = kwargs.get('destination_ports', None)
self.source_ip_groups = kwargs.get('source_ip_groups', None)
class NetworkConfigurationDiagnosticParameters(msrest.serialization.Model):
"""Parameters to get network configuration diagnostic.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The ID of the target resource to perform network
configuration diagnostic. Valid options are VM, NetworkInterface, VMSS/NetworkInterface and
Application Gateway.
:type target_resource_id: str
:param verbosity_level: Verbosity level. Possible values include: "Normal", "Minimum", "Full".
:type verbosity_level: str or ~azure.mgmt.network.v2020_04_01.models.VerbosityLevel
:param profiles: Required. List of network configuration diagnostic profiles.
:type profiles:
list[~azure.mgmt.network.v2020_04_01.models.NetworkConfigurationDiagnosticProfile]
"""
_validation = {
'target_resource_id': {'required': True},
'profiles': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'verbosity_level': {'key': 'verbosityLevel', 'type': 'str'},
'profiles': {'key': 'profiles', 'type': '[NetworkConfigurationDiagnosticProfile]'},
}
def __init__(
self,
**kwargs
):
super(NetworkConfigurationDiagnosticParameters, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
self.verbosity_level = kwargs.get('verbosity_level', None)
self.profiles = kwargs['profiles']
class NetworkConfigurationDiagnosticProfile(msrest.serialization.Model):
"""Parameters to compare with network configuration.
All required parameters must be populated in order to send to Azure.
:param direction: Required. The direction of the traffic. Possible values include: "Inbound",
"Outbound".
:type direction: str or ~azure.mgmt.network.v2020_04_01.models.Direction
:param protocol: Required. Protocol to be verified on. Accepted values are '*', TCP, UDP.
:type protocol: str
:param source: Required. Traffic source. Accepted values are '*', IP Address/CIDR, Service Tag.
:type source: str
:param destination: Required. Traffic destination. Accepted values are: '*', IP Address/CIDR,
Service Tag.
:type destination: str
:param destination_port: Required. Traffic destination port. Accepted values are '*' and a
single port in the range (0 - 65535).
:type destination_port: str
"""
_validation = {
'direction': {'required': True},
'protocol': {'required': True},
'source': {'required': True},
'destination': {'required': True},
'destination_port': {'required': True},
}
_attribute_map = {
'direction': {'key': 'direction', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'destination': {'key': 'destination', 'type': 'str'},
'destination_port': {'key': 'destinationPort', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkConfigurationDiagnosticProfile, self).__init__(**kwargs)
self.direction = kwargs['direction']
self.protocol = kwargs['protocol']
self.source = kwargs['source']
self.destination = kwargs['destination']
self.destination_port = kwargs['destination_port']
class NetworkConfigurationDiagnosticResponse(msrest.serialization.Model):
"""Results of network configuration diagnostic on the target resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar results: List of network configuration diagnostic results.
:vartype results:
list[~azure.mgmt.network.v2020_04_01.models.NetworkConfigurationDiagnosticResult]
"""
_validation = {
'results': {'readonly': True},
}
_attribute_map = {
'results': {'key': 'results', 'type': '[NetworkConfigurationDiagnosticResult]'},
}
def __init__(
self,
**kwargs
):
super(NetworkConfigurationDiagnosticResponse, self).__init__(**kwargs)
self.results = None
class NetworkConfigurationDiagnosticResult(msrest.serialization.Model):
"""Network configuration diagnostic result corresponded to provided traffic query.
:param profile: Network configuration diagnostic profile.
:type profile: ~azure.mgmt.network.v2020_04_01.models.NetworkConfigurationDiagnosticProfile
:param network_security_group_result: Network security group result.
:type network_security_group_result:
~azure.mgmt.network.v2020_04_01.models.NetworkSecurityGroupResult
"""
_attribute_map = {
'profile': {'key': 'profile', 'type': 'NetworkConfigurationDiagnosticProfile'},
'network_security_group_result': {'key': 'networkSecurityGroupResult', 'type': 'NetworkSecurityGroupResult'},
}
def __init__(
self,
**kwargs
):
super(NetworkConfigurationDiagnosticResult, self).__init__(**kwargs)
self.profile = kwargs.get('profile', None)
self.network_security_group_result = kwargs.get('network_security_group_result', None)
class NetworkIntentPolicy(Resource):
"""Network Intent Policy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkIntentPolicy, self).__init__(**kwargs)
self.etag = None
class NetworkIntentPolicyConfiguration(msrest.serialization.Model):
"""Details of NetworkIntentPolicyConfiguration for PrepareNetworkPoliciesRequest.
:param network_intent_policy_name: The name of the Network Intent Policy for storing in target
subscription.
:type network_intent_policy_name: str
:param source_network_intent_policy: Source network intent policy.
:type source_network_intent_policy: ~azure.mgmt.network.v2020_04_01.models.NetworkIntentPolicy
"""
_attribute_map = {
'network_intent_policy_name': {'key': 'networkIntentPolicyName', 'type': 'str'},
'source_network_intent_policy': {'key': 'sourceNetworkIntentPolicy', 'type': 'NetworkIntentPolicy'},
}
def __init__(
self,
**kwargs
):
super(NetworkIntentPolicyConfiguration, self).__init__(**kwargs)
self.network_intent_policy_name = kwargs.get('network_intent_policy_name', None)
self.source_network_intent_policy = kwargs.get('source_network_intent_policy', None)
class NetworkInterface(Resource):
"""A network interface in a resource group.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar virtual_machine: The reference to a virtual machine.
:vartype virtual_machine: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param network_security_group: The reference to the NetworkSecurityGroup resource.
:type network_security_group: ~azure.mgmt.network.v2020_04_01.models.NetworkSecurityGroup
:ivar private_endpoint: A reference to the private endpoint to which the network interface is
linked.
:vartype private_endpoint: ~azure.mgmt.network.v2020_04_01.models.PrivateEndpoint
:param ip_configurations: A list of IPConfigurations of the network interface.
:type ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfiguration]
:ivar tap_configurations: A list of TapConfigurations of the network interface.
:vartype tap_configurations:
list[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceTapConfiguration]
:param dns_settings: The DNS settings in network interface.
:type dns_settings: ~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceDnsSettings
:ivar mac_address: The MAC address of the network interface.
:vartype mac_address: str
:ivar primary: Whether this is a primary network interface on a virtual machine.
:vartype primary: bool
:param enable_accelerated_networking: If the network interface is accelerated networking
enabled.
:type enable_accelerated_networking: bool
:param enable_ip_forwarding: Indicates whether IP forwarding is enabled on this network
interface.
:type enable_ip_forwarding: bool
:ivar hosted_workloads: A list of references to linked BareMetal resources.
:vartype hosted_workloads: list[str]
:ivar resource_guid: The resource GUID property of the network interface resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the network interface resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'virtual_machine': {'readonly': True},
'private_endpoint': {'readonly': True},
'tap_configurations': {'readonly': True},
'mac_address': {'readonly': True},
'primary': {'readonly': True},
'hosted_workloads': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_machine': {'key': 'properties.virtualMachine', 'type': 'SubResource'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'NetworkSecurityGroup'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'tap_configurations': {'key': 'properties.tapConfigurations', 'type': '[NetworkInterfaceTapConfiguration]'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'NetworkInterfaceDnsSettings'},
'mac_address': {'key': 'properties.macAddress', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'enable_ip_forwarding': {'key': 'properties.enableIPForwarding', 'type': 'bool'},
'hosted_workloads': {'key': 'properties.hostedWorkloads', 'type': '[str]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterface, self).__init__(**kwargs)
self.etag = None
self.virtual_machine = None
self.network_security_group = kwargs.get('network_security_group', None)
self.private_endpoint = None
self.ip_configurations = kwargs.get('ip_configurations', None)
self.tap_configurations = None
self.dns_settings = kwargs.get('dns_settings', None)
self.mac_address = None
self.primary = None
self.enable_accelerated_networking = kwargs.get('enable_accelerated_networking', None)
self.enable_ip_forwarding = kwargs.get('enable_ip_forwarding', None)
self.hosted_workloads = None
self.resource_guid = None
self.provisioning_state = None
class NetworkInterfaceAssociation(msrest.serialization.Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Network interface ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules: list[~azure.mgmt.network.v2020_04_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceAssociation, self).__init__(**kwargs)
self.id = None
self.security_rules = kwargs.get('security_rules', None)
class NetworkInterfaceDnsSettings(msrest.serialization.Model):
"""DNS settings of a network interface.
Variables are only populated by the server, and will be ignored when sending a request.
:param dns_servers: List of DNS servers IP addresses. Use 'AzureProvidedDNS' to switch to azure
provided DNS resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be
the only value in dnsServers collection.
:type dns_servers: list[str]
:ivar applied_dns_servers: If the VM that uses this NIC is part of an Availability Set, then
this list will have the union of all DNS servers from all NICs that are part of the
Availability Set. This property is what is configured on each of those VMs.
:vartype applied_dns_servers: list[str]
:param internal_dns_name_label: Relative DNS name for this NIC used for internal communications
between VMs in the same virtual network.
:type internal_dns_name_label: str
:ivar internal_fqdn: Fully qualified DNS name supporting internal communications between VMs in
the same virtual network.
:vartype internal_fqdn: str
:ivar internal_domain_name_suffix: Even if internalDnsNameLabel is not specified, a DNS entry
is created for the primary NIC of the VM. This DNS name can be constructed by concatenating the
VM name with the value of internalDomainNameSuffix.
:vartype internal_domain_name_suffix: str
"""
_validation = {
'applied_dns_servers': {'readonly': True},
'internal_fqdn': {'readonly': True},
'internal_domain_name_suffix': {'readonly': True},
}
_attribute_map = {
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
'applied_dns_servers': {'key': 'appliedDnsServers', 'type': '[str]'},
'internal_dns_name_label': {'key': 'internalDnsNameLabel', 'type': 'str'},
'internal_fqdn': {'key': 'internalFqdn', 'type': 'str'},
'internal_domain_name_suffix': {'key': 'internalDomainNameSuffix', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceDnsSettings, self).__init__(**kwargs)
self.dns_servers = kwargs.get('dns_servers', None)
self.applied_dns_servers = None
self.internal_dns_name_label = kwargs.get('internal_dns_name_label', None)
self.internal_fqdn = None
self.internal_domain_name_suffix = None
class NetworkInterfaceIPConfiguration(SubResource):
"""IPConfiguration in a network interface.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param virtual_network_taps: The reference to Virtual Network Taps.
:type virtual_network_taps: list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkTap]
:param application_gateway_backend_address_pools: The reference to
ApplicationGatewayBackendAddressPool resource.
:type application_gateway_backend_address_pools:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationGatewayBackendAddressPool]
:param load_balancer_backend_address_pools: The reference to LoadBalancerBackendAddressPool
resource.
:type load_balancer_backend_address_pools:
list[~azure.mgmt.network.v2020_04_01.models.BackendAddressPool]
:param load_balancer_inbound_nat_rules: A list of references of LoadBalancerInboundNatRules.
:type load_balancer_inbound_nat_rules:
list[~azure.mgmt.network.v2020_04_01.models.InboundNatRule]
:param private_ip_address: Private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP address allocation method. Possible values
include: "Static", "Dynamic".
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
:param private_ip_address_version: Whether the specific IP configuration is IPv4 or IPv6.
Default is IPv4. Possible values include: "IPv4", "IPv6".
:type private_ip_address_version: str or ~azure.mgmt.network.v2020_04_01.models.IPVersion
:param subnet: Subnet bound to the IP configuration.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.Subnet
:param primary: Whether this is a primary customer address on the network interface.
:type primary: bool
:param public_ip_address: Public IP address bound to the IP configuration.
:type public_ip_address: ~azure.mgmt.network.v2020_04_01.models.PublicIPAddress
:param application_security_groups: Application security groups in which the IP configuration
is included.
:type application_security_groups:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationSecurityGroup]
:ivar provisioning_state: The provisioning state of the network interface IP configuration.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar private_link_connection_properties: PrivateLinkConnection properties for the network
interface.
:vartype private_link_connection_properties:
~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_link_connection_properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_network_taps': {'key': 'properties.virtualNetworkTaps', 'type': '[VirtualNetworkTap]'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancer_inbound_nat_rules': {'key': 'properties.loadBalancerInboundNatRules', 'type': '[InboundNatRule]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'application_security_groups': {'key': 'properties.applicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_link_connection_properties': {'key': 'properties.privateLinkConnectionProperties', 'type': 'NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceIPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.virtual_network_taps = kwargs.get('virtual_network_taps', None)
self.application_gateway_backend_address_pools = kwargs.get('application_gateway_backend_address_pools', None)
self.load_balancer_backend_address_pools = kwargs.get('load_balancer_backend_address_pools', None)
self.load_balancer_inbound_nat_rules = kwargs.get('load_balancer_inbound_nat_rules', None)
self.private_ip_address = kwargs.get('private_ip_address', None)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.private_ip_address_version = kwargs.get('private_ip_address_version', None)
self.subnet = kwargs.get('subnet', None)
self.primary = kwargs.get('primary', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.application_security_groups = kwargs.get('application_security_groups', None)
self.provisioning_state = None
self.private_link_connection_properties = None
class NetworkInterfaceIPConfigurationListResult(msrest.serialization.Model):
"""Response for list ip configurations API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of ip configurations.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfiguration]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkInterfaceIPConfiguration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceIPConfigurationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties(msrest.serialization.Model):
"""PrivateLinkConnection properties for the network interface.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar group_id: The group ID for current private link connection.
:vartype group_id: str
:ivar required_member_name: The required member name for current private link connection.
:vartype required_member_name: str
:ivar fqdns: List of FQDNs for current private link connection.
:vartype fqdns: list[str]
"""
_validation = {
'group_id': {'readonly': True},
'required_member_name': {'readonly': True},
'fqdns': {'readonly': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_member_name': {'key': 'requiredMemberName', 'type': 'str'},
'fqdns': {'key': 'fqdns', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties, self).__init__(**kwargs)
self.group_id = None
self.required_member_name = None
self.fqdns = None
class NetworkInterfaceListResult(msrest.serialization.Model):
"""Response for the ListNetworkInterface API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of network interfaces in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NetworkInterface]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkInterface]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class NetworkInterfaceLoadBalancerListResult(msrest.serialization.Model):
"""Response for list ip configurations API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of load balancers.
:type value: list[~azure.mgmt.network.v2020_04_01.models.LoadBalancer]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[LoadBalancer]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceLoadBalancerListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class NetworkInterfaceTapConfiguration(SubResource):
"""Tap configuration in a Network Interface.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Sub Resource type.
:vartype type: str
:param virtual_network_tap: The reference to the Virtual Network Tap resource.
:type virtual_network_tap: ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkTap
:ivar provisioning_state: The provisioning state of the network interface tap configuration
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'virtual_network_tap': {'key': 'properties.virtualNetworkTap', 'type': 'VirtualNetworkTap'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceTapConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.virtual_network_tap = kwargs.get('virtual_network_tap', None)
self.provisioning_state = None
class NetworkInterfaceTapConfigurationListResult(msrest.serialization.Model):
"""Response for list tap configurations API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of tap configurations.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceTapConfiguration]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkInterfaceTapConfiguration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkInterfaceTapConfigurationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class NetworkProfile(Resource):
"""Network profile resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar container_network_interfaces: List of child container network interfaces.
:vartype container_network_interfaces:
list[~azure.mgmt.network.v2020_04_01.models.ContainerNetworkInterface]
:param container_network_interface_configurations: List of chid container network interface
configurations.
:type container_network_interface_configurations:
list[~azure.mgmt.network.v2020_04_01.models.ContainerNetworkInterfaceConfiguration]
:ivar resource_guid: The resource GUID property of the network profile resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the network profile resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'container_network_interfaces': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'container_network_interfaces': {'key': 'properties.containerNetworkInterfaces', 'type': '[ContainerNetworkInterface]'},
'container_network_interface_configurations': {'key': 'properties.containerNetworkInterfaceConfigurations', 'type': '[ContainerNetworkInterfaceConfiguration]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkProfile, self).__init__(**kwargs)
self.etag = None
self.container_network_interfaces = None
self.container_network_interface_configurations = kwargs.get('container_network_interface_configurations', None)
self.resource_guid = None
self.provisioning_state = None
class NetworkProfileListResult(msrest.serialization.Model):
"""Response for ListNetworkProfiles API service call.
:param value: A list of network profiles that exist in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NetworkProfile]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkProfile]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkProfileListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class NetworkRuleCondition(FirewallPolicyRuleCondition):
"""Rule condition of type network.
All required parameters must be populated in order to send to Azure.
:param name: Name of the rule condition.
:type name: str
:param description: Description of the rule condition.
:type description: str
:param rule_condition_type: Required. Rule Condition Type.Constant filled by server. Possible
values include: "ApplicationRuleCondition", "NetworkRuleCondition", "NatRuleCondition".
:type rule_condition_type: str or
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionType
:param ip_protocols: Array of FirewallPolicyRuleConditionNetworkProtocols.
:type ip_protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.FirewallPolicyRuleConditionNetworkProtocol]
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param destination_addresses: List of destination IP addresses or Service Tags.
:type destination_addresses: list[str]
:param destination_ports: List of destination ports.
:type destination_ports: list[str]
:param source_ip_groups: List of source IpGroups for this rule.
:type source_ip_groups: list[str]
:param destination_ip_groups: List of destination IpGroups for this rule.
:type destination_ip_groups: list[str]
"""
_validation = {
'rule_condition_type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'rule_condition_type': {'key': 'ruleConditionType', 'type': 'str'},
'ip_protocols': {'key': 'ipProtocols', 'type': '[str]'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'destination_ports': {'key': 'destinationPorts', 'type': '[str]'},
'source_ip_groups': {'key': 'sourceIpGroups', 'type': '[str]'},
'destination_ip_groups': {'key': 'destinationIpGroups', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(NetworkRuleCondition, self).__init__(**kwargs)
self.rule_condition_type = 'NetworkRuleCondition' # type: str
self.ip_protocols = kwargs.get('ip_protocols', None)
self.source_addresses = kwargs.get('source_addresses', None)
self.destination_addresses = kwargs.get('destination_addresses', None)
self.destination_ports = kwargs.get('destination_ports', None)
self.source_ip_groups = kwargs.get('source_ip_groups', None)
self.destination_ip_groups = kwargs.get('destination_ip_groups', None)
class NetworkSecurityGroup(Resource):
"""NetworkSecurityGroup resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param security_rules: A collection of security rules of the network security group.
:type security_rules: list[~azure.mgmt.network.v2020_04_01.models.SecurityRule]
:ivar default_security_rules: The default security rules of network security group.
:vartype default_security_rules: list[~azure.mgmt.network.v2020_04_01.models.SecurityRule]
:ivar network_interfaces: A collection of references to network interfaces.
:vartype network_interfaces: list[~azure.mgmt.network.v2020_04_01.models.NetworkInterface]
:ivar subnets: A collection of references to subnets.
:vartype subnets: list[~azure.mgmt.network.v2020_04_01.models.Subnet]
:ivar flow_logs: A collection of references to flow log resources.
:vartype flow_logs: list[~azure.mgmt.network.v2020_04_01.models.FlowLog]
:ivar resource_guid: The resource GUID property of the network security group resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the network security group resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'default_security_rules': {'readonly': True},
'network_interfaces': {'readonly': True},
'subnets': {'readonly': True},
'flow_logs': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'security_rules': {'key': 'properties.securityRules', 'type': '[SecurityRule]'},
'default_security_rules': {'key': 'properties.defaultSecurityRules', 'type': '[SecurityRule]'},
'network_interfaces': {'key': 'properties.networkInterfaces', 'type': '[NetworkInterface]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'flow_logs': {'key': 'properties.flowLogs', 'type': '[FlowLog]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkSecurityGroup, self).__init__(**kwargs)
self.etag = None
self.security_rules = kwargs.get('security_rules', None)
self.default_security_rules = None
self.network_interfaces = None
self.subnets = None
self.flow_logs = None
self.resource_guid = None
self.provisioning_state = None
class NetworkSecurityGroupListResult(msrest.serialization.Model):
"""Response for ListNetworkSecurityGroups API service call.
:param value: A list of NetworkSecurityGroup resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NetworkSecurityGroup]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkSecurityGroupListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class NetworkSecurityGroupResult(msrest.serialization.Model):
"""Network configuration diagnostic result corresponded provided traffic query.
Variables are only populated by the server, and will be ignored when sending a request.
:param security_rule_access_result: The network traffic is allowed or denied. Possible values
include: "Allow", "Deny".
:type security_rule_access_result: str or
~azure.mgmt.network.v2020_04_01.models.SecurityRuleAccess
:ivar evaluated_network_security_groups: List of results network security groups diagnostic.
:vartype evaluated_network_security_groups:
list[~azure.mgmt.network.v2020_04_01.models.EvaluatedNetworkSecurityGroup]
"""
_validation = {
'evaluated_network_security_groups': {'readonly': True},
}
_attribute_map = {
'security_rule_access_result': {'key': 'securityRuleAccessResult', 'type': 'str'},
'evaluated_network_security_groups': {'key': 'evaluatedNetworkSecurityGroups', 'type': '[EvaluatedNetworkSecurityGroup]'},
}
def __init__(
self,
**kwargs
):
super(NetworkSecurityGroupResult, self).__init__(**kwargs)
self.security_rule_access_result = kwargs.get('security_rule_access_result', None)
self.evaluated_network_security_groups = None
class NetworkSecurityRulesEvaluationResult(msrest.serialization.Model):
"""Network security rules evaluation result.
:param name: Name of the network security rule.
:type name: str
:param protocol_matched: Value indicating whether protocol is matched.
:type protocol_matched: bool
:param source_matched: Value indicating whether source is matched.
:type source_matched: bool
:param source_port_matched: Value indicating whether source port is matched.
:type source_port_matched: bool
:param destination_matched: Value indicating whether destination is matched.
:type destination_matched: bool
:param destination_port_matched: Value indicating whether destination port is matched.
:type destination_port_matched: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol_matched': {'key': 'protocolMatched', 'type': 'bool'},
'source_matched': {'key': 'sourceMatched', 'type': 'bool'},
'source_port_matched': {'key': 'sourcePortMatched', 'type': 'bool'},
'destination_matched': {'key': 'destinationMatched', 'type': 'bool'},
'destination_port_matched': {'key': 'destinationPortMatched', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(NetworkSecurityRulesEvaluationResult, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.protocol_matched = kwargs.get('protocol_matched', None)
self.source_matched = kwargs.get('source_matched', None)
self.source_port_matched = kwargs.get('source_port_matched', None)
self.destination_matched = kwargs.get('destination_matched', None)
self.destination_port_matched = kwargs.get('destination_port_matched', None)
class NetworkVirtualAppliance(Resource):
"""NetworkVirtualAppliance Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The service principal that has read access to cloud-init and config blob.
:type identity: ~azure.mgmt.network.v2020_04_01.models.ManagedServiceIdentity
:param sku: Network Virtual Appliance SKU.
:type sku: ~azure.mgmt.network.v2020_04_01.models.VirtualApplianceSkuProperties
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param boot_strap_configuration_blob: BootStrapConfigurationBlob storage URLs.
:type boot_strap_configuration_blob: list[str]
:param virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.
:type virtual_hub: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param cloud_init_configuration_blob: CloudInitConfigurationBlob storage URLs.
:type cloud_init_configuration_blob: list[str]
:param virtual_appliance_asn: VirtualAppliance ASN.
:type virtual_appliance_asn: long
:ivar virtual_appliance_nics: List of Virtual Appliance Network Interfaces.
:vartype virtual_appliance_nics:
list[~azure.mgmt.network.v2020_04_01.models.VirtualApplianceNicProperties]
:ivar provisioning_state: The provisioning state of the resource. Possible values include:
"Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'virtual_appliance_asn': {'maximum': 4294967295, 'minimum': 0},
'virtual_appliance_nics': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'sku': {'key': 'sku', 'type': 'VirtualApplianceSkuProperties'},
'etag': {'key': 'etag', 'type': 'str'},
'boot_strap_configuration_blob': {'key': 'properties.bootStrapConfigurationBlob', 'type': '[str]'},
'virtual_hub': {'key': 'properties.virtualHub', 'type': 'SubResource'},
'cloud_init_configuration_blob': {'key': 'properties.cloudInitConfigurationBlob', 'type': '[str]'},
'virtual_appliance_asn': {'key': 'properties.virtualApplianceAsn', 'type': 'long'},
'virtual_appliance_nics': {'key': 'properties.virtualApplianceNics', 'type': '[VirtualApplianceNicProperties]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkVirtualAppliance, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.sku = kwargs.get('sku', None)
self.etag = None
self.boot_strap_configuration_blob = kwargs.get('boot_strap_configuration_blob', None)
self.virtual_hub = kwargs.get('virtual_hub', None)
self.cloud_init_configuration_blob = kwargs.get('cloud_init_configuration_blob', None)
self.virtual_appliance_asn = kwargs.get('virtual_appliance_asn', None)
self.virtual_appliance_nics = None
self.provisioning_state = None
class NetworkVirtualApplianceListResult(msrest.serialization.Model):
"""Response for ListNetworkVirtualAppliances API service call.
:param value: List of Network Virtual Appliances.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NetworkVirtualAppliance]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkVirtualAppliance]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkVirtualApplianceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class NetworkWatcher(Resource):
"""Network watcher in a resource group.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar provisioning_state: The provisioning state of the network watcher resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NetworkWatcher, self).__init__(**kwargs)
self.etag = None
self.provisioning_state = None
class NetworkWatcherListResult(msrest.serialization.Model):
"""Response for ListNetworkWatchers API service call.
:param value: List of network watcher resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.NetworkWatcher]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[NetworkWatcher]'},
}
def __init__(
self,
**kwargs
):
super(NetworkWatcherListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class NextHopParameters(msrest.serialization.Model):
"""Parameters that define the source and destination endpoint.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The resource identifier of the target resource against
which the action is to be performed.
:type target_resource_id: str
:param source_ip_address: Required. The source IP address.
:type source_ip_address: str
:param destination_ip_address: Required. The destination IP address.
:type destination_ip_address: str
:param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP forwarding is
enabled on any of the nics, then this parameter must be specified. Otherwise optional).
:type target_nic_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
'source_ip_address': {'required': True},
'destination_ip_address': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'source_ip_address': {'key': 'sourceIPAddress', 'type': 'str'},
'destination_ip_address': {'key': 'destinationIPAddress', 'type': 'str'},
'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NextHopParameters, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
self.source_ip_address = kwargs['source_ip_address']
self.destination_ip_address = kwargs['destination_ip_address']
self.target_nic_resource_id = kwargs.get('target_nic_resource_id', None)
class NextHopResult(msrest.serialization.Model):
"""The information about next hop from the specified VM.
:param next_hop_type: Next hop type. Possible values include: "Internet", "VirtualAppliance",
"VirtualNetworkGateway", "VnetLocal", "HyperNetGateway", "None".
:type next_hop_type: str or ~azure.mgmt.network.v2020_04_01.models.NextHopType
:param next_hop_ip_address: Next hop IP Address.
:type next_hop_ip_address: str
:param route_table_id: The resource identifier for the route table associated with the route
being returned. If the route being returned does not correspond to any user created routes then
this field will be the string 'System Route'.
:type route_table_id: str
"""
_attribute_map = {
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': 'str'},
'route_table_id': {'key': 'routeTableId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NextHopResult, self).__init__(**kwargs)
self.next_hop_type = kwargs.get('next_hop_type', None)
self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None)
self.route_table_id = kwargs.get('route_table_id', None)
class Operation(msrest.serialization.Model):
"""Network REST API operation definition.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: Display metadata associated with the operation.
:type display: ~azure.mgmt.network.v2020_04_01.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param service_specification: Specification of the service.
:type service_specification:
~azure.mgmt.network.v2020_04_01.models.OperationPropertiesFormatServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'OperationPropertiesFormatServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.service_specification = kwargs.get('service_specification', None)
class OperationDisplay(msrest.serialization.Model):
"""Display metadata associated with the operation.
:param provider: Service provider: Microsoft Network.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: Type of the operation: get, read, delete, etc.
:type operation: str
:param description: Description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list Network operations. It contains a list of operations and a URL link to get the next set of results.
:param value: List of Network operations supported by the Network resource provider.
:type value: list[~azure.mgmt.network.v2020_04_01.models.Operation]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class OperationPropertiesFormatServiceSpecification(msrest.serialization.Model):
"""Specification of the service.
:param metric_specifications: Operation service specification.
:type metric_specifications: list[~azure.mgmt.network.v2020_04_01.models.MetricSpecification]
:param log_specifications: Operation log specification.
:type log_specifications: list[~azure.mgmt.network.v2020_04_01.models.LogSpecification]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
}
def __init__(
self,
**kwargs
):
super(OperationPropertiesFormatServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = kwargs.get('metric_specifications', None)
self.log_specifications = kwargs.get('log_specifications', None)
class OutboundRule(SubResource):
"""Outbound rule of the load balancer.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the set of outbound rules used by
the load balancer. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param allocated_outbound_ports: The number of outbound ports to be used for NAT.
:type allocated_outbound_ports: int
:param frontend_ip_configurations: The Frontend IP addresses of the load balancer.
:type frontend_ip_configurations: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param backend_address_pool: A reference to a pool of DIPs. Outbound traffic is randomly load
balanced across IPs in the backend IPs.
:type backend_address_pool: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar provisioning_state: The provisioning state of the outbound rule resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param protocol: The protocol for the outbound rule in load balancer. Possible values include:
"Tcp", "Udp", "All".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.LoadBalancerOutboundRuleProtocol
:param enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected
connection termination. This element is only used when the protocol is set to TCP.
:type enable_tcp_reset: bool
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
:type idle_timeout_in_minutes: int
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'allocated_outbound_ports': {'key': 'properties.allocatedOutboundPorts', 'type': 'int'},
'frontend_ip_configurations': {'key': 'properties.frontendIPConfigurations', 'type': '[SubResource]'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'enable_tcp_reset': {'key': 'properties.enableTcpReset', 'type': 'bool'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(OutboundRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.allocated_outbound_ports = kwargs.get('allocated_outbound_ports', None)
self.frontend_ip_configurations = kwargs.get('frontend_ip_configurations', None)
self.backend_address_pool = kwargs.get('backend_address_pool', None)
self.provisioning_state = None
self.protocol = kwargs.get('protocol', None)
self.enable_tcp_reset = kwargs.get('enable_tcp_reset', None)
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
class OwaspCrsExclusionEntry(msrest.serialization.Model):
"""Allow to exclude some variable satisfy the condition for the WAF check.
All required parameters must be populated in order to send to Azure.
:param match_variable: Required. The variable to be excluded. Possible values include:
"RequestHeaderNames", "RequestCookieNames", "RequestArgNames".
:type match_variable: str or
~azure.mgmt.network.v2020_04_01.models.OwaspCrsExclusionEntryMatchVariable
:param selector_match_operator: Required. When matchVariable is a collection, operate on the
selector to specify which elements in the collection this exclusion applies to. Possible values
include: "Equals", "Contains", "StartsWith", "EndsWith", "EqualsAny".
:type selector_match_operator: str or
~azure.mgmt.network.v2020_04_01.models.OwaspCrsExclusionEntrySelectorMatchOperator
:param selector: Required. When matchVariable is a collection, operator used to specify which
elements in the collection this exclusion applies to.
:type selector: str
"""
_validation = {
'match_variable': {'required': True},
'selector_match_operator': {'required': True},
'selector': {'required': True},
}
_attribute_map = {
'match_variable': {'key': 'matchVariable', 'type': 'str'},
'selector_match_operator': {'key': 'selectorMatchOperator', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OwaspCrsExclusionEntry, self).__init__(**kwargs)
self.match_variable = kwargs['match_variable']
self.selector_match_operator = kwargs['selector_match_operator']
self.selector = kwargs['selector']
class P2SConnectionConfiguration(SubResource):
"""P2SConnectionConfiguration Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param vpn_client_address_pool: The reference to the address space resource which represents
Address space for P2S VpnClient.
:type vpn_client_address_pool: ~azure.mgmt.network.v2020_04_01.models.AddressSpace
:param routing_configuration: The Routing Configuration indicating the associated and
propagated route tables on this connection.
:type routing_configuration: ~azure.mgmt.network.v2020_04_01.models.RoutingConfiguration
:ivar provisioning_state: The provisioning state of the P2SConnectionConfiguration resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'vpn_client_address_pool': {'key': 'properties.vpnClientAddressPool', 'type': 'AddressSpace'},
'routing_configuration': {'key': 'properties.routingConfiguration', 'type': 'RoutingConfiguration'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(P2SConnectionConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.vpn_client_address_pool = kwargs.get('vpn_client_address_pool', None)
self.routing_configuration = kwargs.get('routing_configuration', None)
self.provisioning_state = None
class P2SVpnConnectionHealth(msrest.serialization.Model):
"""P2S Vpn connection detailed health written to sas url.
:param sas_url: Returned sas url of the blob to which the p2s vpn connection detailed health
will be written.
:type sas_url: str
"""
_attribute_map = {
'sas_url': {'key': 'sasUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(P2SVpnConnectionHealth, self).__init__(**kwargs)
self.sas_url = kwargs.get('sas_url', None)
class P2SVpnConnectionHealthRequest(msrest.serialization.Model):
"""List of P2S Vpn connection health request.
:param vpn_user_names_filter: The list of p2s vpn user names whose p2s vpn connection detailed
health to retrieve for.
:type vpn_user_names_filter: list[str]
:param output_blob_sas_url: The sas-url to download the P2S Vpn connection health detail.
:type output_blob_sas_url: str
"""
_attribute_map = {
'vpn_user_names_filter': {'key': 'vpnUserNamesFilter', 'type': '[str]'},
'output_blob_sas_url': {'key': 'outputBlobSasUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(P2SVpnConnectionHealthRequest, self).__init__(**kwargs)
self.vpn_user_names_filter = kwargs.get('vpn_user_names_filter', None)
self.output_blob_sas_url = kwargs.get('output_blob_sas_url', None)
class P2SVpnConnectionRequest(msrest.serialization.Model):
"""List of p2s vpn connections to be disconnected.
:param vpn_connection_ids: List of p2s vpn connection Ids.
:type vpn_connection_ids: list[str]
"""
_attribute_map = {
'vpn_connection_ids': {'key': 'vpnConnectionIds', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(P2SVpnConnectionRequest, self).__init__(**kwargs)
self.vpn_connection_ids = kwargs.get('vpn_connection_ids', None)
class P2SVpnGateway(Resource):
"""P2SVpnGateway Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param virtual_hub: The VirtualHub to which the gateway belongs.
:type virtual_hub: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param p2_s_connection_configurations: List of all p2s connection configurations of the
gateway.
:type p2_s_connection_configurations:
list[~azure.mgmt.network.v2020_04_01.models.P2SConnectionConfiguration]
:ivar provisioning_state: The provisioning state of the P2S VPN gateway resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param vpn_gateway_scale_unit: The scale unit for this p2s vpn gateway.
:type vpn_gateway_scale_unit: int
:param vpn_server_configuration: The VpnServerConfiguration to which the p2sVpnGateway is
attached to.
:type vpn_server_configuration: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar vpn_client_connection_health: All P2S VPN clients' connection health status.
:vartype vpn_client_connection_health:
~azure.mgmt.network.v2020_04_01.models.VpnClientConnectionHealth
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'vpn_client_connection_health': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_hub': {'key': 'properties.virtualHub', 'type': 'SubResource'},
'p2_s_connection_configurations': {'key': 'properties.p2SConnectionConfigurations', 'type': '[P2SConnectionConfiguration]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'vpn_gateway_scale_unit': {'key': 'properties.vpnGatewayScaleUnit', 'type': 'int'},
'vpn_server_configuration': {'key': 'properties.vpnServerConfiguration', 'type': 'SubResource'},
'vpn_client_connection_health': {'key': 'properties.vpnClientConnectionHealth', 'type': 'VpnClientConnectionHealth'},
}
def __init__(
self,
**kwargs
):
super(P2SVpnGateway, self).__init__(**kwargs)
self.etag = None
self.virtual_hub = kwargs.get('virtual_hub', None)
self.p2_s_connection_configurations = kwargs.get('p2_s_connection_configurations', None)
self.provisioning_state = None
self.vpn_gateway_scale_unit = kwargs.get('vpn_gateway_scale_unit', None)
self.vpn_server_configuration = kwargs.get('vpn_server_configuration', None)
self.vpn_client_connection_health = None
class P2SVpnProfileParameters(msrest.serialization.Model):
"""Vpn Client Parameters for package generation.
:param authentication_method: VPN client authentication method. Possible values include:
"EAPTLS", "EAPMSCHAPv2".
:type authentication_method: str or ~azure.mgmt.network.v2020_04_01.models.AuthenticationMethod
"""
_attribute_map = {
'authentication_method': {'key': 'authenticationMethod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(P2SVpnProfileParameters, self).__init__(**kwargs)
self.authentication_method = kwargs.get('authentication_method', None)
class PacketCapture(msrest.serialization.Model):
"""Parameters that define the create packet capture operation.
All required parameters must be populated in order to send to Azure.
:param target: Required. The ID of the targeted resource, only VM is currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes
are truncated.
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in seconds.
:type time_limit_in_seconds: int
:param storage_location: Required. The storage location for a packet capture session.
:type storage_location: ~azure.mgmt.network.v2020_04_01.models.PacketCaptureStorageLocation
:param filters: A list of packet capture filters.
:type filters: list[~azure.mgmt.network.v2020_04_01.models.PacketCaptureFilter]
"""
_validation = {
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'target': {'key': 'properties.target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'},
}
def __init__(
self,
**kwargs
):
super(PacketCapture, self).__init__(**kwargs)
self.target = kwargs['target']
self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0)
self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824)
self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000)
self.storage_location = kwargs['storage_location']
self.filters = kwargs.get('filters', None)
class PacketCaptureFilter(msrest.serialization.Model):
"""Filter that is applied to packet capture request. Multiple filters can be applied.
:param protocol: Protocol to be filtered on. Possible values include: "TCP", "UDP", "Any".
Default value: "Any".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.PcProtocol
:param local_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single
address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5"? for multiple entries.
Multiple ranges not currently supported. Mixing ranges with multiple entries not currently
supported. Default = null.
:type local_ip_address: str
:param remote_ip_address: Local IP Address to be filtered on. Notation: "127.0.0.1" for single
address entry. "127.0.0.1-127.0.0.255" for range. "127.0.0.1;127.0.0.5;" for multiple entries.
Multiple ranges not currently supported. Mixing ranges with multiple entries not currently
supported. Default = null.
:type remote_ip_address: str
:param local_port: Local port to be filtered on. Notation: "80" for single port entry."80-85"
for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing
ranges with multiple entries not currently supported. Default = null.
:type local_port: str
:param remote_port: Remote port to be filtered on. Notation: "80" for single port entry."80-85"
for range. "80;443;" for multiple entries. Multiple ranges not currently supported. Mixing
ranges with multiple entries not currently supported. Default = null.
:type remote_port: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'local_ip_address': {'key': 'localIPAddress', 'type': 'str'},
'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'},
'local_port': {'key': 'localPort', 'type': 'str'},
'remote_port': {'key': 'remotePort', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PacketCaptureFilter, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', "Any")
self.local_ip_address = kwargs.get('local_ip_address', None)
self.remote_ip_address = kwargs.get('remote_ip_address', None)
self.local_port = kwargs.get('local_port', None)
self.remote_port = kwargs.get('remote_port', None)
class PacketCaptureListResult(msrest.serialization.Model):
"""List of packet capture sessions.
:param value: Information about packet capture sessions.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PacketCaptureResult]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PacketCaptureResult]'},
}
def __init__(
self,
**kwargs
):
super(PacketCaptureListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PacketCaptureParameters(msrest.serialization.Model):
"""Parameters that define the create packet capture operation.
All required parameters must be populated in order to send to Azure.
:param target: Required. The ID of the targeted resource, only VM is currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes
are truncated.
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in seconds.
:type time_limit_in_seconds: int
:param storage_location: Required. The storage location for a packet capture session.
:type storage_location: ~azure.mgmt.network.v2020_04_01.models.PacketCaptureStorageLocation
:param filters: A list of packet capture filters.
:type filters: list[~azure.mgmt.network.v2020_04_01.models.PacketCaptureFilter]
"""
_validation = {
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'target': {'key': 'target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'filters', 'type': '[PacketCaptureFilter]'},
}
def __init__(
self,
**kwargs
):
super(PacketCaptureParameters, self).__init__(**kwargs)
self.target = kwargs['target']
self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0)
self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824)
self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000)
self.storage_location = kwargs['storage_location']
self.filters = kwargs.get('filters', None)
class PacketCaptureQueryStatusResult(msrest.serialization.Model):
"""Status of packet capture session.
:param name: The name of the packet capture resource.
:type name: str
:param id: The ID of the packet capture resource.
:type id: str
:param capture_start_time: The start time of the packet capture session.
:type capture_start_time: ~datetime.datetime
:param packet_capture_status: The status of the packet capture session. Possible values
include: "NotStarted", "Running", "Stopped", "Error", "Unknown".
:type packet_capture_status: str or ~azure.mgmt.network.v2020_04_01.models.PcStatus
:param stop_reason: The reason the current packet capture session was stopped.
:type stop_reason: str
:param packet_capture_error: List of errors of packet capture session.
:type packet_capture_error: list[str or ~azure.mgmt.network.v2020_04_01.models.PcError]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'capture_start_time': {'key': 'captureStartTime', 'type': 'iso-8601'},
'packet_capture_status': {'key': 'packetCaptureStatus', 'type': 'str'},
'stop_reason': {'key': 'stopReason', 'type': 'str'},
'packet_capture_error': {'key': 'packetCaptureError', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PacketCaptureQueryStatusResult, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.capture_start_time = kwargs.get('capture_start_time', None)
self.packet_capture_status = kwargs.get('packet_capture_status', None)
self.stop_reason = kwargs.get('stop_reason', None)
self.packet_capture_error = kwargs.get('packet_capture_error', None)
class PacketCaptureResult(msrest.serialization.Model):
"""Information about packet capture session.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the packet capture session.
:vartype name: str
:ivar id: ID of the packet capture operation.
:vartype id: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param target: The ID of the targeted resource, only VM is currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes
are truncated.
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in seconds.
:type time_limit_in_seconds: int
:param storage_location: The storage location for a packet capture session.
:type storage_location: ~azure.mgmt.network.v2020_04_01.models.PacketCaptureStorageLocation
:param filters: A list of packet capture filters.
:type filters: list[~azure.mgmt.network.v2020_04_01.models.PacketCaptureFilter]
:ivar provisioning_state: The provisioning state of the packet capture session. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PacketCaptureResult, self).__init__(**kwargs)
self.name = None
self.id = None
self.etag = None
self.target = kwargs.get('target', None)
self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0)
self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824)
self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000)
self.storage_location = kwargs.get('storage_location', None)
self.filters = kwargs.get('filters', None)
self.provisioning_state = None
class PacketCaptureResultProperties(PacketCaptureParameters):
"""The properties of a packet capture session.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param target: Required. The ID of the targeted resource, only VM is currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet, the remaining bytes
are truncated.
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in seconds.
:type time_limit_in_seconds: int
:param storage_location: Required. The storage location for a packet capture session.
:type storage_location: ~azure.mgmt.network.v2020_04_01.models.PacketCaptureStorageLocation
:param filters: A list of packet capture filters.
:type filters: list[~azure.mgmt.network.v2020_04_01.models.PacketCaptureFilter]
:ivar provisioning_state: The provisioning state of the packet capture session. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'target': {'required': True},
'storage_location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'target': {'key': 'target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'filters', 'type': '[PacketCaptureFilter]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PacketCaptureResultProperties, self).__init__(**kwargs)
self.provisioning_state = None
class PacketCaptureStorageLocation(msrest.serialization.Model):
"""The storage location for a packet capture session.
:param storage_id: The ID of the storage account to save the packet capture session. Required
if no local file path is provided.
:type storage_id: str
:param storage_path: The URI of the storage path to save the packet capture. Must be a
well-formed URI describing the location to save the packet capture.
:type storage_path: str
:param file_path: A valid local path on the targeting VM. Must include the name of the capture
file (*.cap). For linux virtual machine it must start with /var/captures. Required if no
storage ID is provided, otherwise optional.
:type file_path: str
"""
_attribute_map = {
'storage_id': {'key': 'storageId', 'type': 'str'},
'storage_path': {'key': 'storagePath', 'type': 'str'},
'file_path': {'key': 'filePath', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PacketCaptureStorageLocation, self).__init__(**kwargs)
self.storage_id = kwargs.get('storage_id', None)
self.storage_path = kwargs.get('storage_path', None)
self.file_path = kwargs.get('file_path', None)
class PatchRouteFilter(SubResource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param rules: Collection of RouteFilterRules contained within a route filter.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.RouteFilterRule]
:ivar peerings: A collection of references to express route circuit peerings.
:vartype peerings: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering]
:ivar ipv6_peerings: A collection of references to express route circuit ipv6 peerings.
:vartype ipv6_peerings: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the route filter resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
'peerings': {'readonly': True},
'ipv6_peerings': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'ipv6_peerings': {'key': 'properties.ipv6Peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PatchRouteFilter, self).__init__(**kwargs)
self.name = None
self.etag = None
self.type = None
self.tags = kwargs.get('tags', None)
self.rules = kwargs.get('rules', None)
self.peerings = None
self.ipv6_peerings = None
self.provisioning_state = None
class PatchRouteFilterRule(SubResource):
"""Route Filter Rule Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param access: The access type of the rule. Possible values include: "Allow", "Deny".
:type access: str or ~azure.mgmt.network.v2020_04_01.models.Access
:param route_filter_rule_type: The rule type of the rule. Possible values include: "Community".
:type route_filter_rule_type: str or ~azure.mgmt.network.v2020_04_01.models.RouteFilterRuleType
:param communities: The collection for bgp community values to filter on. e.g.
['12076:5010','12076:5020'].
:type communities: list[str]
:ivar provisioning_state: The provisioning state of the route filter rule resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'route_filter_rule_type': {'key': 'properties.routeFilterRuleType', 'type': 'str'},
'communities': {'key': 'properties.communities', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PatchRouteFilterRule, self).__init__(**kwargs)
self.name = None
self.etag = None
self.access = kwargs.get('access', None)
self.route_filter_rule_type = kwargs.get('route_filter_rule_type', None)
self.communities = kwargs.get('communities', None)
self.provisioning_state = None
class PeerExpressRouteCircuitConnection(SubResource):
"""Peer Express Route Circuit Connection in an ExpressRouteCircuitPeering resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:param express_route_circuit_peering: Reference to Express Route Circuit Private Peering
Resource of the circuit.
:type express_route_circuit_peering: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param peer_express_route_circuit_peering: Reference to Express Route Circuit Private Peering
Resource of the peered circuit.
:type peer_express_route_circuit_peering: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param address_prefix: /29 IP address space to carve out Customer addresses for tunnels.
:type address_prefix: str
:ivar circuit_connection_status: Express Route Circuit connection state. Possible values
include: "Connected", "Connecting", "Disconnected".
:vartype circuit_connection_status: str or
~azure.mgmt.network.v2020_04_01.models.CircuitConnectionStatus
:param connection_name: The name of the express route circuit connection resource.
:type connection_name: str
:param auth_resource_guid: The resource guid of the authorization used for the express route
circuit connection.
:type auth_resource_guid: str
:ivar provisioning_state: The provisioning state of the peer express route circuit connection
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'circuit_connection_status': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'express_route_circuit_peering': {'key': 'properties.expressRouteCircuitPeering', 'type': 'SubResource'},
'peer_express_route_circuit_peering': {'key': 'properties.peerExpressRouteCircuitPeering', 'type': 'SubResource'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'circuit_connection_status': {'key': 'properties.circuitConnectionStatus', 'type': 'str'},
'connection_name': {'key': 'properties.connectionName', 'type': 'str'},
'auth_resource_guid': {'key': 'properties.authResourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PeerExpressRouteCircuitConnection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.express_route_circuit_peering = kwargs.get('express_route_circuit_peering', None)
self.peer_express_route_circuit_peering = kwargs.get('peer_express_route_circuit_peering', None)
self.address_prefix = kwargs.get('address_prefix', None)
self.circuit_connection_status = None
self.connection_name = kwargs.get('connection_name', None)
self.auth_resource_guid = kwargs.get('auth_resource_guid', None)
self.provisioning_state = None
class PeerExpressRouteCircuitConnectionListResult(msrest.serialization.Model):
"""Response for ListPeeredConnections API service call retrieves all global reach peer circuit connections that belongs to a Private Peering for an ExpressRouteCircuit.
:param value: The global reach peer circuit connection associated with Private Peering in an
ExpressRoute Circuit.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PeerExpressRouteCircuitConnection]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PeerExpressRouteCircuitConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PeerExpressRouteCircuitConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PolicySettings(msrest.serialization.Model):
"""Defines contents of a web application firewall global configuration.
:param state: The state of the policy. Possible values include: "Disabled", "Enabled".
:type state: str or ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallEnabledState
:param mode: The mode of the policy. Possible values include: "Prevention", "Detection".
:type mode: str or ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallMode
:param request_body_check: Whether to allow WAF to check request Body.
:type request_body_check: bool
:param max_request_body_size_in_kb: Maximum request body size in Kb for WAF.
:type max_request_body_size_in_kb: int
:param file_upload_limit_in_mb: Maximum file upload size in Mb for WAF.
:type file_upload_limit_in_mb: int
"""
_validation = {
'max_request_body_size_in_kb': {'maximum': 128, 'minimum': 8},
'file_upload_limit_in_mb': {'minimum': 0},
}
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'request_body_check': {'key': 'requestBodyCheck', 'type': 'bool'},
'max_request_body_size_in_kb': {'key': 'maxRequestBodySizeInKb', 'type': 'int'},
'file_upload_limit_in_mb': {'key': 'fileUploadLimitInMb', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(PolicySettings, self).__init__(**kwargs)
self.state = kwargs.get('state', None)
self.mode = kwargs.get('mode', None)
self.request_body_check = kwargs.get('request_body_check', None)
self.max_request_body_size_in_kb = kwargs.get('max_request_body_size_in_kb', None)
self.file_upload_limit_in_mb = kwargs.get('file_upload_limit_in_mb', None)
class PrepareNetworkPoliciesRequest(msrest.serialization.Model):
"""Details of PrepareNetworkPolicies for Subnet.
:param service_name: The name of the service for which subnet is being prepared for.
:type service_name: str
:param network_intent_policy_configurations: A list of NetworkIntentPolicyConfiguration.
:type network_intent_policy_configurations:
list[~azure.mgmt.network.v2020_04_01.models.NetworkIntentPolicyConfiguration]
"""
_attribute_map = {
'service_name': {'key': 'serviceName', 'type': 'str'},
'network_intent_policy_configurations': {'key': 'networkIntentPolicyConfigurations', 'type': '[NetworkIntentPolicyConfiguration]'},
}
def __init__(
self,
**kwargs
):
super(PrepareNetworkPoliciesRequest, self).__init__(**kwargs)
self.service_name = kwargs.get('service_name', None)
self.network_intent_policy_configurations = kwargs.get('network_intent_policy_configurations', None)
class PrivateDnsZoneConfig(msrest.serialization.Model):
"""PrivateDnsZoneConfig resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Name of the resource that is unique within a resource group. This name can be used
to access the resource.
:type name: str
:param private_dns_zone_id: The resource id of the private dns zone.
:type private_dns_zone_id: str
:ivar record_sets: A collection of information regarding a recordSet, holding information to
identify private resources.
:vartype record_sets: list[~azure.mgmt.network.v2020_04_01.models.RecordSet]
"""
_validation = {
'record_sets': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'private_dns_zone_id': {'key': 'properties.privateDnsZoneId', 'type': 'str'},
'record_sets': {'key': 'properties.recordSets', 'type': '[RecordSet]'},
}
def __init__(
self,
**kwargs
):
super(PrivateDnsZoneConfig, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.private_dns_zone_id = kwargs.get('private_dns_zone_id', None)
self.record_sets = None
class PrivateDnsZoneGroup(SubResource):
"""Private dns zone group resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the resource that is unique within a resource group. This name can be used
to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar provisioning_state: The provisioning state of the private dns zone group resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param private_dns_zone_configs: A collection of private dns zone configurations of the private
dns zone group.
:type private_dns_zone_configs:
list[~azure.mgmt.network.v2020_04_01.models.PrivateDnsZoneConfig]
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_dns_zone_configs': {'key': 'properties.privateDnsZoneConfigs', 'type': '[PrivateDnsZoneConfig]'},
}
def __init__(
self,
**kwargs
):
super(PrivateDnsZoneGroup, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.provisioning_state = None
self.private_dns_zone_configs = kwargs.get('private_dns_zone_configs', None)
class PrivateDnsZoneGroupListResult(msrest.serialization.Model):
"""Response for the ListPrivateDnsZoneGroups API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of private dns zone group resources in a private endpoint.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PrivateDnsZoneGroup]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateDnsZoneGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateDnsZoneGroupListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class PrivateEndpoint(Resource):
"""Private endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param subnet: The ID of the subnet from which the private IP will be allocated.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.Subnet
:ivar network_interfaces: An array of references to the network interfaces created for this
private endpoint.
:vartype network_interfaces: list[~azure.mgmt.network.v2020_04_01.models.NetworkInterface]
:ivar provisioning_state: The provisioning state of the private endpoint resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param private_link_service_connections: A grouping of information about the connection to the
remote resource.
:type private_link_service_connections:
list[~azure.mgmt.network.v2020_04_01.models.PrivateLinkServiceConnection]
:param manual_private_link_service_connections: A grouping of information about the connection
to the remote resource. Used when the network admin does not have access to approve connections
to the remote resource.
:type manual_private_link_service_connections:
list[~azure.mgmt.network.v2020_04_01.models.PrivateLinkServiceConnection]
:param custom_dns_configs: An array of custom dns configurations.
:type custom_dns_configs:
list[~azure.mgmt.network.v2020_04_01.models.CustomDnsConfigPropertiesFormat]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'network_interfaces': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'network_interfaces': {'key': 'properties.networkInterfaces', 'type': '[NetworkInterface]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_link_service_connections': {'key': 'properties.privateLinkServiceConnections', 'type': '[PrivateLinkServiceConnection]'},
'manual_private_link_service_connections': {'key': 'properties.manualPrivateLinkServiceConnections', 'type': '[PrivateLinkServiceConnection]'},
'custom_dns_configs': {'key': 'properties.customDnsConfigs', 'type': '[CustomDnsConfigPropertiesFormat]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.etag = None
self.subnet = kwargs.get('subnet', None)
self.network_interfaces = None
self.provisioning_state = None
self.private_link_service_connections = kwargs.get('private_link_service_connections', None)
self.manual_private_link_service_connections = kwargs.get('manual_private_link_service_connections', None)
self.custom_dns_configs = kwargs.get('custom_dns_configs', None)
class PrivateEndpointConnection(SubResource):
"""PrivateEndpointConnection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar private_endpoint: The resource of private end point.
:vartype private_endpoint: ~azure.mgmt.network.v2020_04_01.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~azure.mgmt.network.v2020_04_01.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar link_identifier: The consumer link id.
:vartype link_identifier: str
"""
_validation = {
'type': {'readonly': True},
'etag': {'readonly': True},
'private_endpoint': {'readonly': True},
'provisioning_state': {'readonly': True},
'link_identifier': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'link_identifier': {'key': 'properties.linkIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = None
self.etag = None
self.private_endpoint = None
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
self.link_identifier = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""Response for the ListPrivateEndpointConnection API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of PrivateEndpointConnection resources for a specific private link
service.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PrivateEndpointConnection]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class PrivateEndpointListResult(msrest.serialization.Model):
"""Response for the ListPrivateEndpoints API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of private endpoint resources in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PrivateEndpoint]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpoint]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class PrivateLinkService(Resource):
"""Private link service resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param load_balancer_frontend_ip_configurations: An array of references to the load balancer IP
configurations.
:type load_balancer_frontend_ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.FrontendIPConfiguration]
:param ip_configurations: An array of private link service IP configurations.
:type ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.PrivateLinkServiceIpConfiguration]
:ivar network_interfaces: An array of references to the network interfaces created for this
private link service.
:vartype network_interfaces: list[~azure.mgmt.network.v2020_04_01.models.NetworkInterface]
:ivar provisioning_state: The provisioning state of the private link service resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar private_endpoint_connections: An array of list about connections to the private endpoint.
:vartype private_endpoint_connections:
list[~azure.mgmt.network.v2020_04_01.models.PrivateEndpointConnection]
:param visibility: The visibility list of the private link service.
:type visibility: ~azure.mgmt.network.v2020_04_01.models.PrivateLinkServicePropertiesVisibility
:param auto_approval: The auto-approval list of the private link service.
:type auto_approval:
~azure.mgmt.network.v2020_04_01.models.PrivateLinkServicePropertiesAutoApproval
:param fqdns: The list of Fqdn.
:type fqdns: list[str]
:ivar alias: The alias of the private link service.
:vartype alias: str
:param enable_proxy_protocol: Whether the private link service is enabled for proxy protocol or
not.
:type enable_proxy_protocol: bool
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'network_interfaces': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
'alias': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'load_balancer_frontend_ip_configurations': {'key': 'properties.loadBalancerFrontendIpConfigurations', 'type': '[FrontendIPConfiguration]'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[PrivateLinkServiceIpConfiguration]'},
'network_interfaces': {'key': 'properties.networkInterfaces', 'type': '[NetworkInterface]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'visibility': {'key': 'properties.visibility', 'type': 'PrivateLinkServicePropertiesVisibility'},
'auto_approval': {'key': 'properties.autoApproval', 'type': 'PrivateLinkServicePropertiesAutoApproval'},
'fqdns': {'key': 'properties.fqdns', 'type': '[str]'},
'alias': {'key': 'properties.alias', 'type': 'str'},
'enable_proxy_protocol': {'key': 'properties.enableProxyProtocol', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkService, self).__init__(**kwargs)
self.etag = None
self.load_balancer_frontend_ip_configurations = kwargs.get('load_balancer_frontend_ip_configurations', None)
self.ip_configurations = kwargs.get('ip_configurations', None)
self.network_interfaces = None
self.provisioning_state = None
self.private_endpoint_connections = None
self.visibility = kwargs.get('visibility', None)
self.auto_approval = kwargs.get('auto_approval', None)
self.fqdns = kwargs.get('fqdns', None)
self.alias = None
self.enable_proxy_protocol = kwargs.get('enable_proxy_protocol', None)
class PrivateLinkServiceConnection(SubResource):
"""PrivateLinkServiceConnection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar type: The resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar provisioning_state: The provisioning state of the private link service connection
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param private_link_service_id: The resource id of private link service.
:type private_link_service_id: str
:param group_ids: The ID(s) of the group(s) obtained from the remote resource that this private
endpoint should connect to.
:type group_ids: list[str]
:param request_message: A message passed to the owner of the remote resource with this
connection request. Restricted to 140 chars.
:type request_message: str
:param private_link_service_connection_state: A collection of read-only information about the
state of the connection to the remote resource.
:type private_link_service_connection_state:
~azure.mgmt.network.v2020_04_01.models.PrivateLinkServiceConnectionState
"""
_validation = {
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_link_service_id': {'key': 'properties.privateLinkServiceId', 'type': 'str'},
'group_ids': {'key': 'properties.groupIds', 'type': '[str]'},
'request_message': {'key': 'properties.requestMessage', 'type': 'str'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = None
self.etag = None
self.provisioning_state = None
self.private_link_service_id = kwargs.get('private_link_service_id', None)
self.group_ids = kwargs.get('group_ids', None)
self.request_message = kwargs.get('request_message', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service.
:type status: str
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class PrivateLinkServiceIpConfiguration(SubResource):
"""The private link service ip configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of private link service ip configuration.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The private IP address allocation method. Possible values
include: "Static", "Dynamic".
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
:param subnet: The reference to the subnet resource.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.Subnet
:param primary: Whether the ip configuration is primary or not.
:type primary: bool
:ivar provisioning_state: The provisioning state of the private link service IP configuration
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param private_ip_address_version: Whether the specific IP configuration is IPv4 or IPv6.
Default is IPv4. Possible values include: "IPv4", "IPv6".
:type private_ip_address_version: str or ~azure.mgmt.network.v2020_04_01.models.IPVersion
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceIpConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.private_ip_address = kwargs.get('private_ip_address', None)
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.subnet = kwargs.get('subnet', None)
self.primary = kwargs.get('primary', None)
self.provisioning_state = None
self.private_ip_address_version = kwargs.get('private_ip_address_version', None)
class PrivateLinkServiceListResult(msrest.serialization.Model):
"""Response for the ListPrivateLinkService API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of PrivateLinkService resources in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PrivateLinkService]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkService]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ResourceSet(msrest.serialization.Model):
"""The base resource set for visibility and auto-approval.
:param subscriptions: The list of subscriptions.
:type subscriptions: list[str]
"""
_attribute_map = {
'subscriptions': {'key': 'subscriptions', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ResourceSet, self).__init__(**kwargs)
self.subscriptions = kwargs.get('subscriptions', None)
class PrivateLinkServicePropertiesAutoApproval(ResourceSet):
"""The auto-approval list of the private link service.
:param subscriptions: The list of subscriptions.
:type subscriptions: list[str]
"""
_attribute_map = {
'subscriptions': {'key': 'subscriptions', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServicePropertiesAutoApproval, self).__init__(**kwargs)
class PrivateLinkServicePropertiesVisibility(ResourceSet):
"""The visibility list of the private link service.
:param subscriptions: The list of subscriptions.
:type subscriptions: list[str]
"""
_attribute_map = {
'subscriptions': {'key': 'subscriptions', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServicePropertiesVisibility, self).__init__(**kwargs)
class PrivateLinkServiceVisibility(msrest.serialization.Model):
"""Response for the CheckPrivateLinkServiceVisibility API service call.
:param visible: Private Link Service Visibility (True/False).
:type visible: bool
"""
_attribute_map = {
'visible': {'key': 'visible', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceVisibility, self).__init__(**kwargs)
self.visible = kwargs.get('visible', None)
class Probe(SubResource):
"""A load balancer probe.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within the set of probes used by the load
balancer. This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Type of the resource.
:vartype type: str
:ivar load_balancing_rules: The load balancer rules that use this probe.
:vartype load_balancing_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param protocol: The protocol of the end point. If 'Tcp' is specified, a received ACK is
required for the probe to be successful. If 'Http' or 'Https' is specified, a 200 OK response
from the specifies URI is required for the probe to be successful. Possible values include:
"Http", "Tcp", "Https".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.ProbeProtocol
:param port: The port for communicating the probe. Possible values range from 1 to 65535,
inclusive.
:type port: int
:param interval_in_seconds: The interval, in seconds, for how frequently to probe the endpoint
for health status. Typically, the interval is slightly less than half the allocated timeout
period (in seconds) which allows two full probes before taking the instance out of rotation.
The default value is 15, the minimum value is 5.
:type interval_in_seconds: int
:param number_of_probes: The number of probes where if no response, will result in stopping
further traffic from being delivered to the endpoint. This values allows endpoints to be taken
out of rotation faster or slower than the typical times used in Azure.
:type number_of_probes: int
:param request_path: The URI used for requesting health status from the VM. Path is required if
a protocol is set to http. Otherwise, it is not allowed. There is no default value.
:type request_path: str
:ivar provisioning_state: The provisioning state of the probe resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'load_balancing_rules': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'interval_in_seconds': {'key': 'properties.intervalInSeconds', 'type': 'int'},
'number_of_probes': {'key': 'properties.numberOfProbes', 'type': 'int'},
'request_path': {'key': 'properties.requestPath', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Probe, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.load_balancing_rules = None
self.protocol = kwargs.get('protocol', None)
self.port = kwargs.get('port', None)
self.interval_in_seconds = kwargs.get('interval_in_seconds', None)
self.number_of_probes = kwargs.get('number_of_probes', None)
self.request_path = kwargs.get('request_path', None)
self.provisioning_state = None
class PropagatedRouteTable(msrest.serialization.Model):
"""The list of RouteTables to advertise the routes to.
:param labels: The list of labels.
:type labels: list[str]
:param ids: The list of resource ids of all the RouteTables.
:type ids: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
"""
_attribute_map = {
'labels': {'key': 'labels', 'type': '[str]'},
'ids': {'key': 'ids', 'type': '[SubResource]'},
}
def __init__(
self,
**kwargs
):
super(PropagatedRouteTable, self).__init__(**kwargs)
self.labels = kwargs.get('labels', None)
self.ids = kwargs.get('ids', None)
class ProtocolConfiguration(msrest.serialization.Model):
"""Configuration of the protocol.
:param http_configuration: HTTP configuration of the connectivity check.
:type http_configuration: ~azure.mgmt.network.v2020_04_01.models.HTTPConfiguration
"""
_attribute_map = {
'http_configuration': {'key': 'HTTPConfiguration', 'type': 'HTTPConfiguration'},
}
def __init__(
self,
**kwargs
):
super(ProtocolConfiguration, self).__init__(**kwargs)
self.http_configuration = kwargs.get('http_configuration', None)
class ProtocolCustomSettingsFormat(msrest.serialization.Model):
"""DDoS custom policy properties.
:param protocol: The protocol for which the DDoS protection policy is being customized.
Possible values include: "Tcp", "Udp", "Syn".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicyProtocol
:param trigger_rate_override: The customized DDoS protection trigger rate.
:type trigger_rate_override: str
:param source_rate_override: The customized DDoS protection source rate.
:type source_rate_override: str
:param trigger_sensitivity_override: The customized DDoS protection trigger rate sensitivity
degrees. High: Trigger rate set with most sensitivity w.r.t. normal traffic. Default: Trigger
rate set with moderate sensitivity w.r.t. normal traffic. Low: Trigger rate set with less
sensitivity w.r.t. normal traffic. Relaxed: Trigger rate set with least sensitivity w.r.t.
normal traffic. Possible values include: "Relaxed", "Low", "Default", "High".
:type trigger_sensitivity_override: str or
~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicyTriggerSensitivityOverride
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'trigger_rate_override': {'key': 'triggerRateOverride', 'type': 'str'},
'source_rate_override': {'key': 'sourceRateOverride', 'type': 'str'},
'trigger_sensitivity_override': {'key': 'triggerSensitivityOverride', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProtocolCustomSettingsFormat, self).__init__(**kwargs)
self.protocol = kwargs.get('protocol', None)
self.trigger_rate_override = kwargs.get('trigger_rate_override', None)
self.source_rate_override = kwargs.get('source_rate_override', None)
self.trigger_sensitivity_override = kwargs.get('trigger_sensitivity_override', None)
class PublicIPAddress(Resource):
"""Public IP address resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The public IP address SKU.
:type sku: ~azure.mgmt.network.v2020_04_01.models.PublicIPAddressSku
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param zones: A list of availability zones denoting the IP allocated for the resource needs to
come from.
:type zones: list[str]
:param public_ip_allocation_method: The public IP address allocation method. Possible values
include: "Static", "Dynamic".
:type public_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
:param public_ip_address_version: The public IP address version. Possible values include:
"IPv4", "IPv6".
:type public_ip_address_version: str or ~azure.mgmt.network.v2020_04_01.models.IPVersion
:ivar ip_configuration: The IP configuration associated with the public IP address.
:vartype ip_configuration: ~azure.mgmt.network.v2020_04_01.models.IPConfiguration
:param dns_settings: The FQDN of the DNS record associated with the public IP address.
:type dns_settings: ~azure.mgmt.network.v2020_04_01.models.PublicIPAddressDnsSettings
:param ddos_settings: The DDoS protection custom policy associated with the public IP address.
:type ddos_settings: ~azure.mgmt.network.v2020_04_01.models.DdosSettings
:param ip_tags: The list of tags associated with the public IP address.
:type ip_tags: list[~azure.mgmt.network.v2020_04_01.models.IpTag]
:param ip_address: The IP address associated with the public IP address resource.
:type ip_address: str
:param public_ip_prefix: The Public IP Prefix this Public IP Address should be allocated from.
:type public_ip_prefix: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param idle_timeout_in_minutes: The idle timeout of the public IP address.
:type idle_timeout_in_minutes: int
:ivar resource_guid: The resource GUID property of the public IP address resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the public IP address resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'ip_configuration': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'PublicIPAddressSku'},
'etag': {'key': 'etag', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'public_ip_allocation_method': {'key': 'properties.publicIPAllocationMethod', 'type': 'str'},
'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'},
'ip_configuration': {'key': 'properties.ipConfiguration', 'type': 'IPConfiguration'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'PublicIPAddressDnsSettings'},
'ddos_settings': {'key': 'properties.ddosSettings', 'type': 'DdosSettings'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[IpTag]'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'str'},
'public_ip_prefix': {'key': 'properties.publicIPPrefix', 'type': 'SubResource'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PublicIPAddress, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.etag = None
self.zones = kwargs.get('zones', None)
self.public_ip_allocation_method = kwargs.get('public_ip_allocation_method', None)
self.public_ip_address_version = kwargs.get('public_ip_address_version', None)
self.ip_configuration = None
self.dns_settings = kwargs.get('dns_settings', None)
self.ddos_settings = kwargs.get('ddos_settings', None)
self.ip_tags = kwargs.get('ip_tags', None)
self.ip_address = kwargs.get('ip_address', None)
self.public_ip_prefix = kwargs.get('public_ip_prefix', None)
self.idle_timeout_in_minutes = kwargs.get('idle_timeout_in_minutes', None)
self.resource_guid = None
self.provisioning_state = None
class PublicIPAddressDnsSettings(msrest.serialization.Model):
"""Contains FQDN of the DNS record associated with the public IP address.
:param domain_name_label: The domain name label. The concatenation of the domain name label and
the regionalized DNS zone make up the fully qualified domain name associated with the public IP
address. If a domain name label is specified, an A DNS record is created for the public IP in
the Microsoft Azure DNS system.
:type domain_name_label: str
:param fqdn: The Fully Qualified Domain Name of the A DNS record associated with the public IP.
This is the concatenation of the domainNameLabel and the regionalized DNS zone.
:type fqdn: str
:param reverse_fqdn: The reverse FQDN. A user-visible, fully qualified domain name that
resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is
created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
:type reverse_fqdn: str
"""
_attribute_map = {
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'reverse_fqdn': {'key': 'reverseFqdn', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PublicIPAddressDnsSettings, self).__init__(**kwargs)
self.domain_name_label = kwargs.get('domain_name_label', None)
self.fqdn = kwargs.get('fqdn', None)
self.reverse_fqdn = kwargs.get('reverse_fqdn', None)
class PublicIPAddressListResult(msrest.serialization.Model):
"""Response for ListPublicIpAddresses API service call.
:param value: A list of public IP addresses that exists in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PublicIPAddress]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PublicIPAddress]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PublicIPAddressListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PublicIPAddressSku(msrest.serialization.Model):
"""SKU of a public IP address.
:param name: Name of a public IP address SKU. Possible values include: "Basic", "Standard".
:type name: str or ~azure.mgmt.network.v2020_04_01.models.PublicIPAddressSkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PublicIPAddressSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class PublicIPPrefix(Resource):
"""Public IP prefix resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The public IP prefix SKU.
:type sku: ~azure.mgmt.network.v2020_04_01.models.PublicIPPrefixSku
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param zones: A list of availability zones denoting the IP allocated for the resource needs to
come from.
:type zones: list[str]
:param public_ip_address_version: The public IP address version. Possible values include:
"IPv4", "IPv6".
:type public_ip_address_version: str or ~azure.mgmt.network.v2020_04_01.models.IPVersion
:param ip_tags: The list of tags associated with the public IP prefix.
:type ip_tags: list[~azure.mgmt.network.v2020_04_01.models.IpTag]
:param prefix_length: The Length of the Public IP Prefix.
:type prefix_length: int
:ivar ip_prefix: The allocated Prefix.
:vartype ip_prefix: str
:ivar public_ip_addresses: The list of all referenced PublicIPAddresses.
:vartype public_ip_addresses:
list[~azure.mgmt.network.v2020_04_01.models.ReferencedPublicIpAddress]
:ivar load_balancer_frontend_ip_configuration: The reference to load balancer frontend IP
configuration associated with the public IP prefix.
:vartype load_balancer_frontend_ip_configuration:
~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar resource_guid: The resource GUID property of the public IP prefix resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the public IP prefix resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'ip_prefix': {'readonly': True},
'public_ip_addresses': {'readonly': True},
'load_balancer_frontend_ip_configuration': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'PublicIPPrefixSku'},
'etag': {'key': 'etag', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'public_ip_address_version': {'key': 'properties.publicIPAddressVersion', 'type': 'str'},
'ip_tags': {'key': 'properties.ipTags', 'type': '[IpTag]'},
'prefix_length': {'key': 'properties.prefixLength', 'type': 'int'},
'ip_prefix': {'key': 'properties.ipPrefix', 'type': 'str'},
'public_ip_addresses': {'key': 'properties.publicIPAddresses', 'type': '[ReferencedPublicIpAddress]'},
'load_balancer_frontend_ip_configuration': {'key': 'properties.loadBalancerFrontendIpConfiguration', 'type': 'SubResource'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PublicIPPrefix, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.etag = None
self.zones = kwargs.get('zones', None)
self.public_ip_address_version = kwargs.get('public_ip_address_version', None)
self.ip_tags = kwargs.get('ip_tags', None)
self.prefix_length = kwargs.get('prefix_length', None)
self.ip_prefix = None
self.public_ip_addresses = None
self.load_balancer_frontend_ip_configuration = None
self.resource_guid = None
self.provisioning_state = None
class PublicIPPrefixListResult(msrest.serialization.Model):
"""Response for ListPublicIpPrefixes API service call.
:param value: A list of public IP prefixes that exists in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.PublicIPPrefix]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PublicIPPrefix]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PublicIPPrefixListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PublicIPPrefixSku(msrest.serialization.Model):
"""SKU of a public IP prefix.
:param name: Name of a public IP prefix SKU. Possible values include: "Standard".
:type name: str or ~azure.mgmt.network.v2020_04_01.models.PublicIPPrefixSkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PublicIPPrefixSku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class QueryTroubleshootingParameters(msrest.serialization.Model):
"""Parameters that define the resource to query the troubleshooting result.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The target resource ID to query the troubleshooting
result.
:type target_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(QueryTroubleshootingParameters, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
class RadiusServer(msrest.serialization.Model):
"""Radius Server Settings.
All required parameters must be populated in order to send to Azure.
:param radius_server_address: Required. The address of this radius server.
:type radius_server_address: str
:param radius_server_score: The initial score assigned to this radius server.
:type radius_server_score: long
:param radius_server_secret: The secret used for this radius server.
:type radius_server_secret: str
"""
_validation = {
'radius_server_address': {'required': True},
}
_attribute_map = {
'radius_server_address': {'key': 'radiusServerAddress', 'type': 'str'},
'radius_server_score': {'key': 'radiusServerScore', 'type': 'long'},
'radius_server_secret': {'key': 'radiusServerSecret', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RadiusServer, self).__init__(**kwargs)
self.radius_server_address = kwargs['radius_server_address']
self.radius_server_score = kwargs.get('radius_server_score', None)
self.radius_server_secret = kwargs.get('radius_server_secret', None)
class RecordSet(msrest.serialization.Model):
"""A collective group of information about the record set information.
Variables are only populated by the server, and will be ignored when sending a request.
:param record_type: Resource record type.
:type record_type: str
:param record_set_name: Recordset name.
:type record_set_name: str
:param fqdn: Fqdn that resolves to private endpoint ip address.
:type fqdn: str
:ivar provisioning_state: The provisioning state of the recordset. Possible values include:
"Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param ttl: Recordset time to live.
:type ttl: int
:param ip_addresses: The private ip address of the private endpoint.
:type ip_addresses: list[str]
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'record_type': {'key': 'recordType', 'type': 'str'},
'record_set_name': {'key': 'recordSetName', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'ttl': {'key': 'ttl', 'type': 'int'},
'ip_addresses': {'key': 'ipAddresses', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(RecordSet, self).__init__(**kwargs)
self.record_type = kwargs.get('record_type', None)
self.record_set_name = kwargs.get('record_set_name', None)
self.fqdn = kwargs.get('fqdn', None)
self.provisioning_state = None
self.ttl = kwargs.get('ttl', None)
self.ip_addresses = kwargs.get('ip_addresses', None)
class ReferencedPublicIpAddress(msrest.serialization.Model):
"""Reference to a public IP address.
:param id: The PublicIPAddress Reference.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ReferencedPublicIpAddress, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class ResourceNavigationLink(SubResource):
"""ResourceNavigationLink resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the resource that is unique within a resource group. This name can be used
to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param linked_resource_type: Resource type of the linked resource.
:type linked_resource_type: str
:param link: Link to the external resource.
:type link: str
:ivar provisioning_state: The provisioning state of the resource navigation link resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'linked_resource_type': {'key': 'properties.linkedResourceType', 'type': 'str'},
'link': {'key': 'properties.link', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceNavigationLink, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.linked_resource_type = kwargs.get('linked_resource_type', None)
self.link = kwargs.get('link', None)
self.provisioning_state = None
class ResourceNavigationLinksListResult(msrest.serialization.Model):
"""Response for ResourceNavigationLinks_List operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The resource navigation links in a subnet.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ResourceNavigationLink]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceNavigationLink]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceNavigationLinksListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class RetentionPolicyParameters(msrest.serialization.Model):
"""Parameters that define the retention policy for flow log.
:param days: Number of days to retain flow log records.
:type days: int
:param enabled: Flag to enable/disable retention.
:type enabled: bool
"""
_attribute_map = {
'days': {'key': 'days', 'type': 'int'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RetentionPolicyParameters, self).__init__(**kwargs)
self.days = kwargs.get('days', 0)
self.enabled = kwargs.get('enabled', False)
class Route(SubResource):
"""Route resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param address_prefix: The destination CIDR to which the route applies.
:type address_prefix: str
:param next_hop_type: The type of Azure hop the packet should be sent to. Possible values
include: "VirtualNetworkGateway", "VnetLocal", "Internet", "VirtualAppliance", "None".
:type next_hop_type: str or ~azure.mgmt.network.v2020_04_01.models.RouteNextHopType
:param next_hop_ip_address: The IP address packets should be forwarded to. Next hop values are
only allowed in routes where the next hop type is VirtualAppliance.
:type next_hop_ip_address: str
:ivar provisioning_state: The provisioning state of the route resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'next_hop_type': {'key': 'properties.nextHopType', 'type': 'str'},
'next_hop_ip_address': {'key': 'properties.nextHopIpAddress', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Route, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.address_prefix = kwargs.get('address_prefix', None)
self.next_hop_type = kwargs.get('next_hop_type', None)
self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None)
self.provisioning_state = None
class RouteFilter(Resource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param rules: Collection of RouteFilterRules contained within a route filter.
:type rules: list[~azure.mgmt.network.v2020_04_01.models.RouteFilterRule]
:ivar peerings: A collection of references to express route circuit peerings.
:vartype peerings: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering]
:ivar ipv6_peerings: A collection of references to express route circuit ipv6 peerings.
:vartype ipv6_peerings: list[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the route filter resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'peerings': {'readonly': True},
'ipv6_peerings': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'ipv6_peerings': {'key': 'properties.ipv6Peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RouteFilter, self).__init__(**kwargs)
self.etag = None
self.rules = kwargs.get('rules', None)
self.peerings = None
self.ipv6_peerings = None
self.provisioning_state = None
class RouteFilterListResult(msrest.serialization.Model):
"""Response for the ListRouteFilters API service call.
:param value: A list of route filters in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.RouteFilter]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RouteFilter]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RouteFilterListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class RouteFilterRule(SubResource):
"""Route Filter Rule Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:param location: Resource location.
:type location: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param access: The access type of the rule. Possible values include: "Allow", "Deny".
:type access: str or ~azure.mgmt.network.v2020_04_01.models.Access
:param route_filter_rule_type: The rule type of the rule. Possible values include: "Community".
:type route_filter_rule_type: str or ~azure.mgmt.network.v2020_04_01.models.RouteFilterRuleType
:param communities: The collection for bgp community values to filter on. e.g.
['12076:5010','12076:5020'].
:type communities: list[str]
:ivar provisioning_state: The provisioning state of the route filter rule resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'route_filter_rule_type': {'key': 'properties.routeFilterRuleType', 'type': 'str'},
'communities': {'key': 'properties.communities', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RouteFilterRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.location = kwargs.get('location', None)
self.etag = None
self.access = kwargs.get('access', None)
self.route_filter_rule_type = kwargs.get('route_filter_rule_type', None)
self.communities = kwargs.get('communities', None)
self.provisioning_state = None
class RouteFilterRuleListResult(msrest.serialization.Model):
"""Response for the ListRouteFilterRules API service call.
:param value: A list of RouteFilterRules in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.RouteFilterRule]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RouteFilterRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RouteFilterRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class RouteListResult(msrest.serialization.Model):
"""Response for the ListRoute API service call.
:param value: A list of routes in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.Route]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Route]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RouteListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param routes: Collection of routes contained within a route table.
:type routes: list[~azure.mgmt.network.v2020_04_01.models.Route]
:ivar subnets: A collection of references to subnets.
:vartype subnets: list[~azure.mgmt.network.v2020_04_01.models.Subnet]
:param disable_bgp_route_propagation: Whether to disable the routes learned by BGP on that
route table. True means disable.
:type disable_bgp_route_propagation: bool
:ivar provisioning_state: The provisioning state of the route table resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'subnets': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'disable_bgp_route_propagation': {'key': 'properties.disableBgpRoutePropagation', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RouteTable, self).__init__(**kwargs)
self.etag = None
self.routes = kwargs.get('routes', None)
self.subnets = None
self.disable_bgp_route_propagation = kwargs.get('disable_bgp_route_propagation', None)
self.provisioning_state = None
class RouteTableListResult(msrest.serialization.Model):
"""Response for the ListRouteTable API service call.
:param value: A list of route tables in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.RouteTable]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RouteTable]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RouteTableListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class RoutingConfiguration(msrest.serialization.Model):
"""Routing Configuration indicating the associated and propagated route tables for this connection.
:param associated_route_table: The resource id RouteTable associated with this
RoutingConfiguration.
:type associated_route_table: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param propagated_route_tables: The list of RouteTables to advertise the routes to.
:type propagated_route_tables: ~azure.mgmt.network.v2020_04_01.models.PropagatedRouteTable
:param vnet_routes: List of routes that control routing from VirtualHub into a virtual network
connection.
:type vnet_routes: ~azure.mgmt.network.v2020_04_01.models.VnetRoute
"""
_attribute_map = {
'associated_route_table': {'key': 'associatedRouteTable', 'type': 'SubResource'},
'propagated_route_tables': {'key': 'propagatedRouteTables', 'type': 'PropagatedRouteTable'},
'vnet_routes': {'key': 'vnetRoutes', 'type': 'VnetRoute'},
}
def __init__(
self,
**kwargs
):
super(RoutingConfiguration, self).__init__(**kwargs)
self.associated_route_table = kwargs.get('associated_route_table', None)
self.propagated_route_tables = kwargs.get('propagated_route_tables', None)
self.vnet_routes = kwargs.get('vnet_routes', None)
class SecurityGroupNetworkInterface(msrest.serialization.Model):
"""Network interface and all its associated security rules.
:param id: ID of the network interface.
:type id: str
:param security_rule_associations: All security rules associated with the network interface.
:type security_rule_associations:
~azure.mgmt.network.v2020_04_01.models.SecurityRuleAssociations
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rule_associations': {'key': 'securityRuleAssociations', 'type': 'SecurityRuleAssociations'},
}
def __init__(
self,
**kwargs
):
super(SecurityGroupNetworkInterface, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.security_rule_associations = kwargs.get('security_rule_associations', None)
class SecurityGroupViewParameters(msrest.serialization.Model):
"""Parameters that define the VM to check security groups for.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. ID of the target VM.
:type target_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecurityGroupViewParameters, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
class SecurityGroupViewResult(msrest.serialization.Model):
"""The information about security rules applied to the specified VM.
:param network_interfaces: List of network interfaces on the specified VM.
:type network_interfaces:
list[~azure.mgmt.network.v2020_04_01.models.SecurityGroupNetworkInterface]
"""
_attribute_map = {
'network_interfaces': {'key': 'networkInterfaces', 'type': '[SecurityGroupNetworkInterface]'},
}
def __init__(
self,
**kwargs
):
super(SecurityGroupViewResult, self).__init__(**kwargs)
self.network_interfaces = kwargs.get('network_interfaces', None)
class SecurityPartnerProvider(Resource):
"""Security Partner Provider resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar provisioning_state: The provisioning state of the Security Partner Provider resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param security_provider_name: The security provider name. Possible values include: "ZScaler",
"IBoss", "Checkpoint".
:type security_provider_name: str or
~azure.mgmt.network.v2020_04_01.models.SecurityProviderName
:ivar connection_status: The connection status with the Security Partner Provider. Possible
values include: "Unknown", "PartiallyConnected", "Connected", "NotConnected".
:vartype connection_status: str or
~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProviderConnectionStatus
:param virtual_hub: The virtualHub to which the Security Partner Provider belongs.
:type virtual_hub: ~azure.mgmt.network.v2020_04_01.models.SubResource
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'connection_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'security_provider_name': {'key': 'properties.securityProviderName', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'virtual_hub': {'key': 'properties.virtualHub', 'type': 'SubResource'},
}
def __init__(
self,
**kwargs
):
super(SecurityPartnerProvider, self).__init__(**kwargs)
self.etag = None
self.provisioning_state = None
self.security_provider_name = kwargs.get('security_provider_name', None)
self.connection_status = None
self.virtual_hub = kwargs.get('virtual_hub', None)
class SecurityPartnerProviderListResult(msrest.serialization.Model):
"""Response for ListSecurityPartnerProviders API service call.
:param value: List of Security Partner Providers in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProvider]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SecurityPartnerProvider]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecurityPartnerProviderListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class SecurityRule(SubResource):
"""Network security rule.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Network protocol this rule applies to. Possible values include: "Tcp", "Udp",
"Icmp", "Esp", "*", "Ah".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range between 0 and 65535.
Asterisk '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or range between 0 and
65535. Asterisk '*' can also be used to match all ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterisk '*' can also be used to
match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet'
can also be used. If this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_address_prefixes: The CIDR or source IP ranges.
:type source_address_prefixes: list[str]
:param source_application_security_groups: The application security group specified as source.
:type source_application_security_groups:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationSecurityGroup]
:param destination_address_prefix: The destination address prefix. CIDR or destination IP
range. Asterisk '*' can also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:type destination_address_prefix: str
:param destination_address_prefixes: The destination address prefixes. CIDR or destination IP
ranges.
:type destination_address_prefixes: list[str]
:param destination_application_security_groups: The application security group specified as
destination.
:type destination_application_security_groups:
list[~azure.mgmt.network.v2020_04_01.models.ApplicationSecurityGroup]
:param source_port_ranges: The source port ranges.
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges.
:type destination_port_ranges: list[str]
:param access: The network traffic is allowed or denied. Possible values include: "Allow",
"Deny".
:type access: str or ~azure.mgmt.network.v2020_04_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100 and 4096. The priority
number must be unique for each rule in the collection. The lower the priority number, the
higher the priority of the rule.
:type priority: int
:param direction: The direction of the rule. The direction specifies if rule will be evaluated
on incoming or outgoing traffic. Possible values include: "Inbound", "Outbound".
:type direction: str or ~azure.mgmt.network.v2020_04_01.models.SecurityRuleDirection
:ivar provisioning_state: The provisioning state of the security rule resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'},
'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'},
'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecurityRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.description = kwargs.get('description', None)
self.protocol = kwargs.get('protocol', None)
self.source_port_range = kwargs.get('source_port_range', None)
self.destination_port_range = kwargs.get('destination_port_range', None)
self.source_address_prefix = kwargs.get('source_address_prefix', None)
self.source_address_prefixes = kwargs.get('source_address_prefixes', None)
self.source_application_security_groups = kwargs.get('source_application_security_groups', None)
self.destination_address_prefix = kwargs.get('destination_address_prefix', None)
self.destination_address_prefixes = kwargs.get('destination_address_prefixes', None)
self.destination_application_security_groups = kwargs.get('destination_application_security_groups', None)
self.source_port_ranges = kwargs.get('source_port_ranges', None)
self.destination_port_ranges = kwargs.get('destination_port_ranges', None)
self.access = kwargs.get('access', None)
self.priority = kwargs.get('priority', None)
self.direction = kwargs.get('direction', None)
self.provisioning_state = None
class SecurityRuleAssociations(msrest.serialization.Model):
"""All security rules associated with the network interface.
:param network_interface_association: Network interface and it's custom security rules.
:type network_interface_association:
~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceAssociation
:param subnet_association: Subnet and it's custom security rules.
:type subnet_association: ~azure.mgmt.network.v2020_04_01.models.SubnetAssociation
:param default_security_rules: Collection of default security rules of the network security
group.
:type default_security_rules: list[~azure.mgmt.network.v2020_04_01.models.SecurityRule]
:param effective_security_rules: Collection of effective security rules.
:type effective_security_rules:
list[~azure.mgmt.network.v2020_04_01.models.EffectiveNetworkSecurityRule]
"""
_attribute_map = {
'network_interface_association': {'key': 'networkInterfaceAssociation', 'type': 'NetworkInterfaceAssociation'},
'subnet_association': {'key': 'subnetAssociation', 'type': 'SubnetAssociation'},
'default_security_rules': {'key': 'defaultSecurityRules', 'type': '[SecurityRule]'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(
self,
**kwargs
):
super(SecurityRuleAssociations, self).__init__(**kwargs)
self.network_interface_association = kwargs.get('network_interface_association', None)
self.subnet_association = kwargs.get('subnet_association', None)
self.default_security_rules = kwargs.get('default_security_rules', None)
self.effective_security_rules = kwargs.get('effective_security_rules', None)
class SecurityRuleListResult(msrest.serialization.Model):
"""Response for ListSecurityRule API service call. Retrieves all security rules that belongs to a network security group.
:param value: The security rules in a network security group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.SecurityRule]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SecurityRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecurityRuleListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ServiceAssociationLink(SubResource):
"""ServiceAssociationLink resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the resource that is unique within a resource group. This name can be used
to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param linked_resource_type: Resource type of the linked resource.
:type linked_resource_type: str
:param link: Link to the external resource.
:type link: str
:ivar provisioning_state: The provisioning state of the service association link resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param allow_delete: If true, the resource can be deleted.
:type allow_delete: bool
:param locations: A list of locations.
:type locations: list[str]
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'linked_resource_type': {'key': 'properties.linkedResourceType', 'type': 'str'},
'link': {'key': 'properties.link', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'allow_delete': {'key': 'properties.allowDelete', 'type': 'bool'},
'locations': {'key': 'properties.locations', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ServiceAssociationLink, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.linked_resource_type = kwargs.get('linked_resource_type', None)
self.link = kwargs.get('link', None)
self.provisioning_state = None
self.allow_delete = kwargs.get('allow_delete', None)
self.locations = kwargs.get('locations', None)
class ServiceAssociationLinksListResult(msrest.serialization.Model):
"""Response for ServiceAssociationLinks_List operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The service association links in a subnet.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ServiceAssociationLink]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ServiceAssociationLink]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceAssociationLinksListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ServiceEndpointPolicy(Resource):
"""Service End point policy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param service_endpoint_policy_definitions: A collection of service endpoint policy definitions
of the service endpoint policy.
:type service_endpoint_policy_definitions:
list[~azure.mgmt.network.v2020_04_01.models.ServiceEndpointPolicyDefinition]
:ivar subnets: A collection of references to subnets.
:vartype subnets: list[~azure.mgmt.network.v2020_04_01.models.Subnet]
:ivar resource_guid: The resource GUID property of the service endpoint policy resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the service endpoint policy resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'subnets': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'service_endpoint_policy_definitions': {'key': 'properties.serviceEndpointPolicyDefinitions', 'type': '[ServiceEndpointPolicyDefinition]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceEndpointPolicy, self).__init__(**kwargs)
self.etag = None
self.service_endpoint_policy_definitions = kwargs.get('service_endpoint_policy_definitions', None)
self.subnets = None
self.resource_guid = None
self.provisioning_state = None
class ServiceEndpointPolicyDefinition(SubResource):
"""Service Endpoint policy definitions.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param service: Service endpoint name.
:type service: str
:param service_resources: A list of service resources.
:type service_resources: list[str]
:ivar provisioning_state: The provisioning state of the service endpoint policy definition
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'service': {'key': 'properties.service', 'type': 'str'},
'service_resources': {'key': 'properties.serviceResources', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceEndpointPolicyDefinition, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.description = kwargs.get('description', None)
self.service = kwargs.get('service', None)
self.service_resources = kwargs.get('service_resources', None)
self.provisioning_state = None
class ServiceEndpointPolicyDefinitionListResult(msrest.serialization.Model):
"""Response for ListServiceEndpointPolicyDefinition API service call. Retrieves all service endpoint policy definition that belongs to a service endpoint policy.
:param value: The service endpoint policy definition in a service endpoint policy.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ServiceEndpointPolicyDefinition]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ServiceEndpointPolicyDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceEndpointPolicyDefinitionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ServiceEndpointPolicyListResult(msrest.serialization.Model):
"""Response for ListServiceEndpointPolicies API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of ServiceEndpointPolicy resources.
:type value: list[~azure.mgmt.network.v2020_04_01.models.ServiceEndpointPolicy]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ServiceEndpointPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceEndpointPolicyListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class ServiceEndpointPropertiesFormat(msrest.serialization.Model):
"""The service endpoint properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param service: The type of the endpoint service.
:type service: str
:param locations: A list of locations.
:type locations: list[str]
:ivar provisioning_state: The provisioning state of the service endpoint resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'service': {'key': 'service', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceEndpointPropertiesFormat, self).__init__(**kwargs)
self.service = kwargs.get('service', None)
self.locations = kwargs.get('locations', None)
self.provisioning_state = None
class ServiceTagInformation(msrest.serialization.Model):
"""The service tag information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: Properties of the service tag information.
:vartype properties:
~azure.mgmt.network.v2020_04_01.models.ServiceTagInformationPropertiesFormat
:ivar name: The name of service tag.
:vartype name: str
:ivar id: The ID of service tag.
:vartype id: str
"""
_validation = {
'properties': {'readonly': True},
'name': {'readonly': True},
'id': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'ServiceTagInformationPropertiesFormat'},
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceTagInformation, self).__init__(**kwargs)
self.properties = None
self.name = None
self.id = None
class ServiceTagInformationPropertiesFormat(msrest.serialization.Model):
"""Properties of the service tag information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar change_number: The iteration number of service tag.
:vartype change_number: str
:ivar region: The region of service tag.
:vartype region: str
:ivar system_service: The name of system service.
:vartype system_service: str
:ivar address_prefixes: The list of IP address prefixes.
:vartype address_prefixes: list[str]
"""
_validation = {
'change_number': {'readonly': True},
'region': {'readonly': True},
'system_service': {'readonly': True},
'address_prefixes': {'readonly': True},
}
_attribute_map = {
'change_number': {'key': 'changeNumber', 'type': 'str'},
'region': {'key': 'region', 'type': 'str'},
'system_service': {'key': 'systemService', 'type': 'str'},
'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ServiceTagInformationPropertiesFormat, self).__init__(**kwargs)
self.change_number = None
self.region = None
self.system_service = None
self.address_prefixes = None
class ServiceTagsListResult(msrest.serialization.Model):
"""Response for the ListServiceTags API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the cloud.
:vartype name: str
:ivar id: The ID of the cloud.
:vartype id: str
:ivar type: The azure resource type.
:vartype type: str
:ivar change_number: The iteration number.
:vartype change_number: str
:ivar cloud: The name of the cloud.
:vartype cloud: str
:ivar values: The list of service tag information resources.
:vartype values: list[~azure.mgmt.network.v2020_04_01.models.ServiceTagInformation]
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'change_number': {'readonly': True},
'cloud': {'readonly': True},
'values': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'change_number': {'key': 'changeNumber', 'type': 'str'},
'cloud': {'key': 'cloud', 'type': 'str'},
'values': {'key': 'values', 'type': '[ServiceTagInformation]'},
}
def __init__(
self,
**kwargs
):
super(ServiceTagsListResult, self).__init__(**kwargs)
self.name = None
self.id = None
self.type = None
self.change_number = None
self.cloud = None
self.values = None
class SessionIds(msrest.serialization.Model):
"""List of session IDs.
:param session_ids: List of session IDs.
:type session_ids: list[str]
"""
_attribute_map = {
'session_ids': {'key': 'sessionIds', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(SessionIds, self).__init__(**kwargs)
self.session_ids = kwargs.get('session_ids', None)
class StaticRoute(msrest.serialization.Model):
"""List of all Static Routes.
:param name: The name of the StaticRoute that is unique within a VnetRoute.
:type name: str
:param address_prefixes: List of all address prefixes.
:type address_prefixes: list[str]
:param next_hop_ip_address: The ip address of the next hop.
:type next_hop_ip_address: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StaticRoute, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.address_prefixes = kwargs.get('address_prefixes', None)
self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None)
class Subnet(SubResource):
"""Subnet in a virtual network resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param address_prefix: The address prefix for the subnet.
:type address_prefix: str
:param address_prefixes: List of address prefixes for the subnet.
:type address_prefixes: list[str]
:param network_security_group: The reference to the NetworkSecurityGroup resource.
:type network_security_group: ~azure.mgmt.network.v2020_04_01.models.NetworkSecurityGroup
:param route_table: The reference to the RouteTable resource.
:type route_table: ~azure.mgmt.network.v2020_04_01.models.RouteTable
:param nat_gateway: Nat gateway associated with this subnet.
:type nat_gateway: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param service_endpoints: An array of service endpoints.
:type service_endpoints:
list[~azure.mgmt.network.v2020_04_01.models.ServiceEndpointPropertiesFormat]
:param service_endpoint_policies: An array of service endpoint policies.
:type service_endpoint_policies:
list[~azure.mgmt.network.v2020_04_01.models.ServiceEndpointPolicy]
:ivar private_endpoints: An array of references to private endpoints.
:vartype private_endpoints: list[~azure.mgmt.network.v2020_04_01.models.PrivateEndpoint]
:ivar ip_configurations: An array of references to the network interface IP configurations
using subnet.
:vartype ip_configurations: list[~azure.mgmt.network.v2020_04_01.models.IPConfiguration]
:ivar ip_configuration_profiles: Array of IP configuration profiles which reference this
subnet.
:vartype ip_configuration_profiles:
list[~azure.mgmt.network.v2020_04_01.models.IPConfigurationProfile]
:param ip_allocations: Array of IpAllocation which reference this subnet.
:type ip_allocations: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar resource_navigation_links: An array of references to the external resources using subnet.
:vartype resource_navigation_links:
list[~azure.mgmt.network.v2020_04_01.models.ResourceNavigationLink]
:ivar service_association_links: An array of references to services injecting into this subnet.
:vartype service_association_links:
list[~azure.mgmt.network.v2020_04_01.models.ServiceAssociationLink]
:param delegations: An array of references to the delegations on the subnet.
:type delegations: list[~azure.mgmt.network.v2020_04_01.models.Delegation]
:ivar purpose: A read-only string identifying the intention of use for this subnet based on
delegations and other user-defined properties.
:vartype purpose: str
:ivar provisioning_state: The provisioning state of the subnet resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param private_endpoint_network_policies: Enable or Disable apply network policies on private
end point in the subnet.
:type private_endpoint_network_policies: str
:param private_link_service_network_policies: Enable or Disable apply network policies on
private link service in the subnet.
:type private_link_service_network_policies: str
"""
_validation = {
'etag': {'readonly': True},
'private_endpoints': {'readonly': True},
'ip_configurations': {'readonly': True},
'ip_configuration_profiles': {'readonly': True},
'resource_navigation_links': {'readonly': True},
'service_association_links': {'readonly': True},
'purpose': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'address_prefixes': {'key': 'properties.addressPrefixes', 'type': '[str]'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'NetworkSecurityGroup'},
'route_table': {'key': 'properties.routeTable', 'type': 'RouteTable'},
'nat_gateway': {'key': 'properties.natGateway', 'type': 'SubResource'},
'service_endpoints': {'key': 'properties.serviceEndpoints', 'type': '[ServiceEndpointPropertiesFormat]'},
'service_endpoint_policies': {'key': 'properties.serviceEndpointPolicies', 'type': '[ServiceEndpointPolicy]'},
'private_endpoints': {'key': 'properties.privateEndpoints', 'type': '[PrivateEndpoint]'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[IPConfiguration]'},
'ip_configuration_profiles': {'key': 'properties.ipConfigurationProfiles', 'type': '[IPConfigurationProfile]'},
'ip_allocations': {'key': 'properties.ipAllocations', 'type': '[SubResource]'},
'resource_navigation_links': {'key': 'properties.resourceNavigationLinks', 'type': '[ResourceNavigationLink]'},
'service_association_links': {'key': 'properties.serviceAssociationLinks', 'type': '[ServiceAssociationLink]'},
'delegations': {'key': 'properties.delegations', 'type': '[Delegation]'},
'purpose': {'key': 'properties.purpose', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_network_policies': {'key': 'properties.privateEndpointNetworkPolicies', 'type': 'str'},
'private_link_service_network_policies': {'key': 'properties.privateLinkServiceNetworkPolicies', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Subnet, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.address_prefix = kwargs.get('address_prefix', None)
self.address_prefixes = kwargs.get('address_prefixes', None)
self.network_security_group = kwargs.get('network_security_group', None)
self.route_table = kwargs.get('route_table', None)
self.nat_gateway = kwargs.get('nat_gateway', None)
self.service_endpoints = kwargs.get('service_endpoints', None)
self.service_endpoint_policies = kwargs.get('service_endpoint_policies', None)
self.private_endpoints = None
self.ip_configurations = None
self.ip_configuration_profiles = None
self.ip_allocations = kwargs.get('ip_allocations', None)
self.resource_navigation_links = None
self.service_association_links = None
self.delegations = kwargs.get('delegations', None)
self.purpose = None
self.provisioning_state = None
self.private_endpoint_network_policies = kwargs.get('private_endpoint_network_policies', None)
self.private_link_service_network_policies = kwargs.get('private_link_service_network_policies', None)
class SubnetAssociation(msrest.serialization.Model):
"""Subnet and it's custom security rules.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Subnet ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules: list[~azure.mgmt.network.v2020_04_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(
self,
**kwargs
):
super(SubnetAssociation, self).__init__(**kwargs)
self.id = None
self.security_rules = kwargs.get('security_rules', None)
class SubnetListResult(msrest.serialization.Model):
"""Response for ListSubnets API service callRetrieves all subnet that belongs to a virtual network.
:param value: The subnets in a virtual network.
:type value: list[~azure.mgmt.network.v2020_04_01.models.Subnet]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Subnet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubnetListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class TagsObject(msrest.serialization.Model):
"""Tags object for patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(TagsObject, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class Topology(msrest.serialization.Model):
"""Topology of the specified resource group.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: GUID representing the operation id.
:vartype id: str
:ivar created_date_time: The datetime when the topology was initially created for the resource
group.
:vartype created_date_time: ~datetime.datetime
:ivar last_modified: The datetime when the topology was last modified.
:vartype last_modified: ~datetime.datetime
:param resources: A list of topology resources.
:type resources: list[~azure.mgmt.network.v2020_04_01.models.TopologyResource]
"""
_validation = {
'id': {'readonly': True},
'created_date_time': {'readonly': True},
'last_modified': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'resources': {'key': 'resources', 'type': '[TopologyResource]'},
}
def __init__(
self,
**kwargs
):
super(Topology, self).__init__(**kwargs)
self.id = None
self.created_date_time = None
self.last_modified = None
self.resources = kwargs.get('resources', None)
class TopologyAssociation(msrest.serialization.Model):
"""Resources that have an association with the parent resource.
:param name: The name of the resource that is associated with the parent resource.
:type name: str
:param resource_id: The ID of the resource that is associated with the parent resource.
:type resource_id: str
:param association_type: The association type of the child resource to the parent resource.
Possible values include: "Associated", "Contains".
:type association_type: str or ~azure.mgmt.network.v2020_04_01.models.AssociationType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'association_type': {'key': 'associationType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TopologyAssociation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.resource_id = kwargs.get('resource_id', None)
self.association_type = kwargs.get('association_type', None)
class TopologyParameters(msrest.serialization.Model):
"""Parameters that define the representation of topology.
:param target_resource_group_name: The name of the target resource group to perform topology
on.
:type target_resource_group_name: str
:param target_virtual_network: The reference to the Virtual Network resource.
:type target_virtual_network: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param target_subnet: The reference to the Subnet resource.
:type target_subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
"""
_attribute_map = {
'target_resource_group_name': {'key': 'targetResourceGroupName', 'type': 'str'},
'target_virtual_network': {'key': 'targetVirtualNetwork', 'type': 'SubResource'},
'target_subnet': {'key': 'targetSubnet', 'type': 'SubResource'},
}
def __init__(
self,
**kwargs
):
super(TopologyParameters, self).__init__(**kwargs)
self.target_resource_group_name = kwargs.get('target_resource_group_name', None)
self.target_virtual_network = kwargs.get('target_virtual_network', None)
self.target_subnet = kwargs.get('target_subnet', None)
class TopologyResource(msrest.serialization.Model):
"""The network resource topology information for the given resource group.
:param name: Name of the resource.
:type name: str
:param id: ID of the resource.
:type id: str
:param location: Resource location.
:type location: str
:param associations: Holds the associations the resource has with other resources in the
resource group.
:type associations: list[~azure.mgmt.network.v2020_04_01.models.TopologyAssociation]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'associations': {'key': 'associations', 'type': '[TopologyAssociation]'},
}
def __init__(
self,
**kwargs
):
super(TopologyResource, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.id = kwargs.get('id', None)
self.location = kwargs.get('location', None)
self.associations = kwargs.get('associations', None)
class TrafficAnalyticsConfigurationProperties(msrest.serialization.Model):
"""Parameters that define the configuration of traffic analytics.
:param enabled: Flag to enable/disable traffic analytics.
:type enabled: bool
:param workspace_id: The resource guid of the attached workspace.
:type workspace_id: str
:param workspace_region: The location of the attached workspace.
:type workspace_region: str
:param workspace_resource_id: Resource Id of the attached workspace.
:type workspace_resource_id: str
:param traffic_analytics_interval: The interval in minutes which would decide how frequently TA
service should do flow analytics.
:type traffic_analytics_interval: int
"""
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'workspace_region': {'key': 'workspaceRegion', 'type': 'str'},
'workspace_resource_id': {'key': 'workspaceResourceId', 'type': 'str'},
'traffic_analytics_interval': {'key': 'trafficAnalyticsInterval', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(TrafficAnalyticsConfigurationProperties, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.workspace_id = kwargs.get('workspace_id', None)
self.workspace_region = kwargs.get('workspace_region', None)
self.workspace_resource_id = kwargs.get('workspace_resource_id', None)
self.traffic_analytics_interval = kwargs.get('traffic_analytics_interval', None)
class TrafficAnalyticsProperties(msrest.serialization.Model):
"""Parameters that define the configuration of traffic analytics.
:param network_watcher_flow_analytics_configuration: Parameters that define the configuration
of traffic analytics.
:type network_watcher_flow_analytics_configuration:
~azure.mgmt.network.v2020_04_01.models.TrafficAnalyticsConfigurationProperties
"""
_attribute_map = {
'network_watcher_flow_analytics_configuration': {'key': 'networkWatcherFlowAnalyticsConfiguration', 'type': 'TrafficAnalyticsConfigurationProperties'},
}
def __init__(
self,
**kwargs
):
super(TrafficAnalyticsProperties, self).__init__(**kwargs)
self.network_watcher_flow_analytics_configuration = kwargs.get('network_watcher_flow_analytics_configuration', None)
class TrafficSelectorPolicy(msrest.serialization.Model):
"""An traffic selector policy for a virtual network gateway connection.
All required parameters must be populated in order to send to Azure.
:param local_address_ranges: Required. A collection of local address spaces in CIDR format.
:type local_address_ranges: list[str]
:param remote_address_ranges: Required. A collection of remote address spaces in CIDR format.
:type remote_address_ranges: list[str]
"""
_validation = {
'local_address_ranges': {'required': True},
'remote_address_ranges': {'required': True},
}
_attribute_map = {
'local_address_ranges': {'key': 'localAddressRanges', 'type': '[str]'},
'remote_address_ranges': {'key': 'remoteAddressRanges', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(TrafficSelectorPolicy, self).__init__(**kwargs)
self.local_address_ranges = kwargs['local_address_ranges']
self.remote_address_ranges = kwargs['remote_address_ranges']
class TroubleshootingDetails(msrest.serialization.Model):
"""Information gained from troubleshooting of specified resource.
:param id: The id of the get troubleshoot operation.
:type id: str
:param reason_type: Reason type of failure.
:type reason_type: str
:param summary: A summary of troubleshooting.
:type summary: str
:param detail: Details on troubleshooting results.
:type detail: str
:param recommended_actions: List of recommended actions.
:type recommended_actions:
list[~azure.mgmt.network.v2020_04_01.models.TroubleshootingRecommendedActions]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'reason_type': {'key': 'reasonType', 'type': 'str'},
'summary': {'key': 'summary', 'type': 'str'},
'detail': {'key': 'detail', 'type': 'str'},
'recommended_actions': {'key': 'recommendedActions', 'type': '[TroubleshootingRecommendedActions]'},
}
def __init__(
self,
**kwargs
):
super(TroubleshootingDetails, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.reason_type = kwargs.get('reason_type', None)
self.summary = kwargs.get('summary', None)
self.detail = kwargs.get('detail', None)
self.recommended_actions = kwargs.get('recommended_actions', None)
class TroubleshootingParameters(msrest.serialization.Model):
"""Parameters that define the resource to troubleshoot.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The target resource to troubleshoot.
:type target_resource_id: str
:param storage_id: Required. The ID for the storage account to save the troubleshoot result.
:type storage_id: str
:param storage_path: Required. The path to the blob to save the troubleshoot result in.
:type storage_path: str
"""
_validation = {
'target_resource_id': {'required': True},
'storage_id': {'required': True},
'storage_path': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'storage_id': {'key': 'properties.storageId', 'type': 'str'},
'storage_path': {'key': 'properties.storagePath', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TroubleshootingParameters, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
self.storage_id = kwargs['storage_id']
self.storage_path = kwargs['storage_path']
class TroubleshootingRecommendedActions(msrest.serialization.Model):
"""Recommended actions based on discovered issues.
:param action_id: ID of the recommended action.
:type action_id: str
:param action_text: Description of recommended actions.
:type action_text: str
:param action_uri: The uri linking to a documentation for the recommended troubleshooting
actions.
:type action_uri: str
:param action_uri_text: The information from the URI for the recommended troubleshooting
actions.
:type action_uri_text: str
"""
_attribute_map = {
'action_id': {'key': 'actionId', 'type': 'str'},
'action_text': {'key': 'actionText', 'type': 'str'},
'action_uri': {'key': 'actionUri', 'type': 'str'},
'action_uri_text': {'key': 'actionUriText', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TroubleshootingRecommendedActions, self).__init__(**kwargs)
self.action_id = kwargs.get('action_id', None)
self.action_text = kwargs.get('action_text', None)
self.action_uri = kwargs.get('action_uri', None)
self.action_uri_text = kwargs.get('action_uri_text', None)
class TroubleshootingResult(msrest.serialization.Model):
"""Troubleshooting information gained from specified resource.
:param start_time: The start time of the troubleshooting.
:type start_time: ~datetime.datetime
:param end_time: The end time of the troubleshooting.
:type end_time: ~datetime.datetime
:param code: The result code of the troubleshooting.
:type code: str
:param results: Information from troubleshooting.
:type results: list[~azure.mgmt.network.v2020_04_01.models.TroubleshootingDetails]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'results': {'key': 'results', 'type': '[TroubleshootingDetails]'},
}
def __init__(
self,
**kwargs
):
super(TroubleshootingResult, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.code = kwargs.get('code', None)
self.results = kwargs.get('results', None)
class TunnelConnectionHealth(msrest.serialization.Model):
"""VirtualNetworkGatewayConnection properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar tunnel: Tunnel name.
:vartype tunnel: str
:ivar connection_status: Virtual Network Gateway connection status. Possible values include:
"Unknown", "Connecting", "Connected", "NotConnected".
:vartype connection_status: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionStatus
:ivar ingress_bytes_transferred: The Ingress Bytes Transferred in this connection.
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: The Egress Bytes Transferred in this connection.
:vartype egress_bytes_transferred: long
:ivar last_connection_established_utc_time: The time at which connection was established in Utc
format.
:vartype last_connection_established_utc_time: str
"""
_validation = {
'tunnel': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'last_connection_established_utc_time': {'readonly': True},
}
_attribute_map = {
'tunnel': {'key': 'tunnel', 'type': 'str'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'},
'last_connection_established_utc_time': {'key': 'lastConnectionEstablishedUtcTime', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TunnelConnectionHealth, self).__init__(**kwargs)
self.tunnel = None
self.connection_status = None
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.last_connection_established_utc_time = None
class UnprepareNetworkPoliciesRequest(msrest.serialization.Model):
"""Details of UnprepareNetworkPolicies for Subnet.
:param service_name: The name of the service for which subnet is being unprepared for.
:type service_name: str
"""
_attribute_map = {
'service_name': {'key': 'serviceName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UnprepareNetworkPoliciesRequest, self).__init__(**kwargs)
self.service_name = kwargs.get('service_name', None)
class Usage(msrest.serialization.Model):
"""The network resource usage.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource identifier.
:vartype id: str
:param unit: Required. An enum describing the unit of measurement. Possible values include:
"Count".
:type unit: str or ~azure.mgmt.network.v2020_04_01.models.UsageUnit
:param current_value: Required. The current value of the usage.
:type current_value: long
:param limit: Required. The limit of usage.
:type limit: long
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.network.v2020_04_01.models.UsageName
"""
_validation = {
'id': {'readonly': True},
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(
self,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.id = None
self.unit = kwargs['unit']
self.current_value = kwargs['current_value']
self.limit = kwargs['limit']
self.name = kwargs['name']
class UsageName(msrest.serialization.Model):
"""The usage names.
:param value: A string describing the resource name.
:type value: str
:param localized_value: A localized string describing the resource name.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsageName, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.localized_value = kwargs.get('localized_value', None)
class UsagesListResult(msrest.serialization.Model):
"""The list usages operation response.
:param value: The list network resource usages.
:type value: list[~azure.mgmt.network.v2020_04_01.models.Usage]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsagesListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VerificationIPFlowParameters(msrest.serialization.Model):
"""Parameters that define the IP flow to be verified.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The ID of the target resource to perform next-hop on.
:type target_resource_id: str
:param direction: Required. The direction of the packet represented as a 5-tuple. Possible
values include: "Inbound", "Outbound".
:type direction: str or ~azure.mgmt.network.v2020_04_01.models.Direction
:param protocol: Required. Protocol to be verified on. Possible values include: "TCP", "UDP".
:type protocol: str or ~azure.mgmt.network.v2020_04_01.models.IpFlowProtocol
:param local_port: Required. The local port. Acceptable values are a single integer in the
range (0-65535). Support for * for the source port, which depends on the direction.
:type local_port: str
:param remote_port: Required. The remote port. Acceptable values are a single integer in the
range (0-65535). Support for * for the source port, which depends on the direction.
:type remote_port: str
:param local_ip_address: Required. The local IP address. Acceptable values are valid IPv4
addresses.
:type local_ip_address: str
:param remote_ip_address: Required. The remote IP address. Acceptable values are valid IPv4
addresses.
:type remote_ip_address: str
:param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP forwarding is
enabled on any of them, then this parameter must be specified. Otherwise optional).
:type target_nic_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
'direction': {'required': True},
'protocol': {'required': True},
'local_port': {'required': True},
'remote_port': {'required': True},
'local_ip_address': {'required': True},
'remote_ip_address': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'direction': {'key': 'direction', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'local_port': {'key': 'localPort', 'type': 'str'},
'remote_port': {'key': 'remotePort', 'type': 'str'},
'local_ip_address': {'key': 'localIPAddress', 'type': 'str'},
'remote_ip_address': {'key': 'remoteIPAddress', 'type': 'str'},
'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VerificationIPFlowParameters, self).__init__(**kwargs)
self.target_resource_id = kwargs['target_resource_id']
self.direction = kwargs['direction']
self.protocol = kwargs['protocol']
self.local_port = kwargs['local_port']
self.remote_port = kwargs['remote_port']
self.local_ip_address = kwargs['local_ip_address']
self.remote_ip_address = kwargs['remote_ip_address']
self.target_nic_resource_id = kwargs.get('target_nic_resource_id', None)
class VerificationIPFlowResult(msrest.serialization.Model):
"""Results of IP flow verification on the target resource.
:param access: Indicates whether the traffic is allowed or denied. Possible values include:
"Allow", "Deny".
:type access: str or ~azure.mgmt.network.v2020_04_01.models.Access
:param rule_name: Name of the rule. If input is not matched against any security rule, it is
not displayed.
:type rule_name: str
"""
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'rule_name': {'key': 'ruleName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VerificationIPFlowResult, self).__init__(**kwargs)
self.access = kwargs.get('access', None)
self.rule_name = kwargs.get('rule_name', None)
class VirtualApplianceNicProperties(msrest.serialization.Model):
"""Network Virtual Appliance NIC properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: NIC name.
:vartype name: str
:ivar public_ip_address: Public IP address.
:vartype public_ip_address: str
:ivar private_ip_address: Private IP address.
:vartype private_ip_address: str
"""
_validation = {
'name': {'readonly': True},
'public_ip_address': {'readonly': True},
'private_ip_address': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualApplianceNicProperties, self).__init__(**kwargs)
self.name = None
self.public_ip_address = None
self.private_ip_address = None
class VirtualApplianceSkuProperties(msrest.serialization.Model):
"""Network Virtual Appliance Sku Properties.
:param vendor: Virtual Appliance Vendor.
:type vendor: str
:param bundled_scale_unit: Virtual Appliance Scale Unit.
:type bundled_scale_unit: str
:param market_place_version: Virtual Appliance Version.
:type market_place_version: str
"""
_attribute_map = {
'vendor': {'key': 'vendor', 'type': 'str'},
'bundled_scale_unit': {'key': 'bundledScaleUnit', 'type': 'str'},
'market_place_version': {'key': 'marketPlaceVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualApplianceSkuProperties, self).__init__(**kwargs)
self.vendor = kwargs.get('vendor', None)
self.bundled_scale_unit = kwargs.get('bundled_scale_unit', None)
self.market_place_version = kwargs.get('market_place_version', None)
class VirtualHub(Resource):
"""VirtualHub Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param virtual_wan: The VirtualWAN to which the VirtualHub belongs.
:type virtual_wan: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param vpn_gateway: The VpnGateway associated with this VirtualHub.
:type vpn_gateway: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param p2_s_vpn_gateway: The P2SVpnGateway associated with this VirtualHub.
:type p2_s_vpn_gateway: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param express_route_gateway: The expressRouteGateway associated with this VirtualHub.
:type express_route_gateway: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param azure_firewall: The azureFirewall associated with this VirtualHub.
:type azure_firewall: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param security_partner_provider: The securityPartnerProvider associated with this VirtualHub.
:type security_partner_provider: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param virtual_network_connections: List of all vnet connections with this VirtualHub.
:type virtual_network_connections:
list[~azure.mgmt.network.v2020_04_01.models.HubVirtualNetworkConnection]
:param address_prefix: Address-prefix for this VirtualHub.
:type address_prefix: str
:param route_table: The routeTable associated with this virtual hub.
:type route_table: ~azure.mgmt.network.v2020_04_01.models.VirtualHubRouteTable
:ivar provisioning_state: The provisioning state of the virtual hub resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param security_provider_name: The Security Provider name.
:type security_provider_name: str
:param virtual_hub_route_table_v2_s: List of all virtual hub route table v2s associated with
this VirtualHub.
:type virtual_hub_route_table_v2_s:
list[~azure.mgmt.network.v2020_04_01.models.VirtualHubRouteTableV2]
:param sku: The sku of this VirtualHub.
:type sku: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_wan': {'key': 'properties.virtualWan', 'type': 'SubResource'},
'vpn_gateway': {'key': 'properties.vpnGateway', 'type': 'SubResource'},
'p2_s_vpn_gateway': {'key': 'properties.p2SVpnGateway', 'type': 'SubResource'},
'express_route_gateway': {'key': 'properties.expressRouteGateway', 'type': 'SubResource'},
'azure_firewall': {'key': 'properties.azureFirewall', 'type': 'SubResource'},
'security_partner_provider': {'key': 'properties.securityPartnerProvider', 'type': 'SubResource'},
'virtual_network_connections': {'key': 'properties.virtualNetworkConnections', 'type': '[HubVirtualNetworkConnection]'},
'address_prefix': {'key': 'properties.addressPrefix', 'type': 'str'},
'route_table': {'key': 'properties.routeTable', 'type': 'VirtualHubRouteTable'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'security_provider_name': {'key': 'properties.securityProviderName', 'type': 'str'},
'virtual_hub_route_table_v2_s': {'key': 'properties.virtualHubRouteTableV2s', 'type': '[VirtualHubRouteTableV2]'},
'sku': {'key': 'properties.sku', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualHub, self).__init__(**kwargs)
self.etag = None
self.virtual_wan = kwargs.get('virtual_wan', None)
self.vpn_gateway = kwargs.get('vpn_gateway', None)
self.p2_s_vpn_gateway = kwargs.get('p2_s_vpn_gateway', None)
self.express_route_gateway = kwargs.get('express_route_gateway', None)
self.azure_firewall = kwargs.get('azure_firewall', None)
self.security_partner_provider = kwargs.get('security_partner_provider', None)
self.virtual_network_connections = kwargs.get('virtual_network_connections', None)
self.address_prefix = kwargs.get('address_prefix', None)
self.route_table = kwargs.get('route_table', None)
self.provisioning_state = None
self.security_provider_name = kwargs.get('security_provider_name', None)
self.virtual_hub_route_table_v2_s = kwargs.get('virtual_hub_route_table_v2_s', None)
self.sku = kwargs.get('sku', None)
class VirtualHubId(msrest.serialization.Model):
"""Virtual Hub identifier.
:param id: The resource URI for the Virtual Hub where the ExpressRoute gateway is or will be
deployed. The Virtual Hub resource and the ExpressRoute gateway resource reside in the same
subscription.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualHubId, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
class VirtualHubRoute(msrest.serialization.Model):
"""VirtualHub route.
:param address_prefixes: List of all addressPrefixes.
:type address_prefixes: list[str]
:param next_hop_ip_address: NextHop ip address.
:type next_hop_ip_address: str
"""
_attribute_map = {
'address_prefixes': {'key': 'addressPrefixes', 'type': '[str]'},
'next_hop_ip_address': {'key': 'nextHopIpAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualHubRoute, self).__init__(**kwargs)
self.address_prefixes = kwargs.get('address_prefixes', None)
self.next_hop_ip_address = kwargs.get('next_hop_ip_address', None)
class VirtualHubRouteTable(msrest.serialization.Model):
"""VirtualHub route table.
:param routes: List of all routes.
:type routes: list[~azure.mgmt.network.v2020_04_01.models.VirtualHubRoute]
"""
_attribute_map = {
'routes': {'key': 'routes', 'type': '[VirtualHubRoute]'},
}
def __init__(
self,
**kwargs
):
super(VirtualHubRouteTable, self).__init__(**kwargs)
self.routes = kwargs.get('routes', None)
class VirtualHubRouteTableV2(SubResource):
"""VirtualHubRouteTableV2 Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param routes: List of all routes.
:type routes: list[~azure.mgmt.network.v2020_04_01.models.VirtualHubRouteV2]
:param attached_connections: List of all connections attached to this route table v2.
:type attached_connections: list[str]
:ivar provisioning_state: The provisioning state of the virtual hub route table v2 resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'routes': {'key': 'properties.routes', 'type': '[VirtualHubRouteV2]'},
'attached_connections': {'key': 'properties.attachedConnections', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualHubRouteTableV2, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.routes = kwargs.get('routes', None)
self.attached_connections = kwargs.get('attached_connections', None)
self.provisioning_state = None
class VirtualHubRouteV2(msrest.serialization.Model):
"""VirtualHubRouteTableV2 route.
:param destination_type: The type of destinations.
:type destination_type: str
:param destinations: List of all destinations.
:type destinations: list[str]
:param next_hop_type: The type of next hops.
:type next_hop_type: str
:param next_hops: NextHops ip address.
:type next_hops: list[str]
"""
_attribute_map = {
'destination_type': {'key': 'destinationType', 'type': 'str'},
'destinations': {'key': 'destinations', 'type': '[str]'},
'next_hop_type': {'key': 'nextHopType', 'type': 'str'},
'next_hops': {'key': 'nextHops', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(VirtualHubRouteV2, self).__init__(**kwargs)
self.destination_type = kwargs.get('destination_type', None)
self.destinations = kwargs.get('destinations', None)
self.next_hop_type = kwargs.get('next_hop_type', None)
self.next_hops = kwargs.get('next_hops', None)
class VirtualNetwork(Resource):
"""Virtual Network resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param address_space: The AddressSpace that contains an array of IP address ranges that can be
used by subnets.
:type address_space: ~azure.mgmt.network.v2020_04_01.models.AddressSpace
:param dhcp_options: The dhcpOptions that contains an array of DNS servers available to VMs
deployed in the virtual network.
:type dhcp_options: ~azure.mgmt.network.v2020_04_01.models.DhcpOptions
:param subnets: A list of subnets in a Virtual Network.
:type subnets: list[~azure.mgmt.network.v2020_04_01.models.Subnet]
:param virtual_network_peerings: A list of peerings in a Virtual Network.
:type virtual_network_peerings:
list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkPeering]
:ivar resource_guid: The resourceGuid property of the Virtual Network resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the virtual network resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param enable_ddos_protection: Indicates if DDoS protection is enabled for all the protected
resources in the virtual network. It requires a DDoS protection plan associated with the
resource.
:type enable_ddos_protection: bool
:param enable_vm_protection: Indicates if VM protection is enabled for all the subnets in the
virtual network.
:type enable_vm_protection: bool
:param ddos_protection_plan: The DDoS protection plan associated with the virtual network.
:type ddos_protection_plan: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param bgp_communities: Bgp Communities sent over ExpressRoute with each route corresponding to
a prefix in this VNET.
:type bgp_communities: ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkBgpCommunities
:param ip_allocations: Array of IpAllocation which reference this VNET.
:type ip_allocations: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'dhcp_options': {'key': 'properties.dhcpOptions', 'type': 'DhcpOptions'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'virtual_network_peerings': {'key': 'properties.virtualNetworkPeerings', 'type': '[VirtualNetworkPeering]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'enable_ddos_protection': {'key': 'properties.enableDdosProtection', 'type': 'bool'},
'enable_vm_protection': {'key': 'properties.enableVmProtection', 'type': 'bool'},
'ddos_protection_plan': {'key': 'properties.ddosProtectionPlan', 'type': 'SubResource'},
'bgp_communities': {'key': 'properties.bgpCommunities', 'type': 'VirtualNetworkBgpCommunities'},
'ip_allocations': {'key': 'properties.ipAllocations', 'type': '[SubResource]'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetwork, self).__init__(**kwargs)
self.etag = None
self.address_space = kwargs.get('address_space', None)
self.dhcp_options = kwargs.get('dhcp_options', None)
self.subnets = kwargs.get('subnets', None)
self.virtual_network_peerings = kwargs.get('virtual_network_peerings', None)
self.resource_guid = None
self.provisioning_state = None
self.enable_ddos_protection = kwargs.get('enable_ddos_protection', False)
self.enable_vm_protection = kwargs.get('enable_vm_protection', False)
self.ddos_protection_plan = kwargs.get('ddos_protection_plan', None)
self.bgp_communities = kwargs.get('bgp_communities', None)
self.ip_allocations = kwargs.get('ip_allocations', None)
class VirtualNetworkBgpCommunities(msrest.serialization.Model):
"""Bgp Communities sent over ExpressRoute with each route corresponding to a prefix in this VNET.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param virtual_network_community: Required. The BGP community associated with the virtual
network.
:type virtual_network_community: str
:ivar regional_community: The BGP community associated with the region of the virtual network.
:vartype regional_community: str
"""
_validation = {
'virtual_network_community': {'required': True},
'regional_community': {'readonly': True},
}
_attribute_map = {
'virtual_network_community': {'key': 'virtualNetworkCommunity', 'type': 'str'},
'regional_community': {'key': 'regionalCommunity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkBgpCommunities, self).__init__(**kwargs)
self.virtual_network_community = kwargs['virtual_network_community']
self.regional_community = None
class VirtualNetworkConnectionGatewayReference(msrest.serialization.Model):
"""A reference to VirtualNetworkGateway or LocalNetworkGateway resource.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of VirtualNetworkGateway or LocalNetworkGateway resource.
:type id: str
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkConnectionGatewayReference, self).__init__(**kwargs)
self.id = kwargs['id']
class VirtualNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param ip_configurations: IP configurations for virtual network gateway.
:type ip_configurations:
list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayIPConfiguration]
:param gateway_type: The type of this virtual network gateway. Possible values include: "Vpn",
"ExpressRoute".
:type gateway_type: str or ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayType
:param vpn_type: The type of this virtual network gateway. Possible values include:
"PolicyBased", "RouteBased".
:type vpn_type: str or ~azure.mgmt.network.v2020_04_01.models.VpnType
:param vpn_gateway_generation: The generation for this VirtualNetworkGateway. Must be None if
gatewayType is not VPN. Possible values include: "None", "Generation1", "Generation2".
:type vpn_gateway_generation: str or
~azure.mgmt.network.v2020_04_01.models.VpnGatewayGeneration
:param enable_bgp: Whether BGP is enabled for this virtual network gateway or not.
:type enable_bgp: bool
:param enable_private_ip_address: Whether private IP needs to be enabled on this gateway for
connections or not.
:type enable_private_ip_address: bool
:param active: ActiveActive flag.
:type active: bool
:param gateway_default_site: The reference to the LocalNetworkGateway resource which represents
local network site having default routes. Assign Null value in case of removing existing
default site setting.
:type gateway_default_site: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param sku: The reference to the VirtualNetworkGatewaySku resource which represents the SKU
selected for Virtual network gateway.
:type sku: ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewaySku
:param vpn_client_configuration: The reference to the VpnClientConfiguration resource which
represents the P2S VpnClient configurations.
:type vpn_client_configuration: ~azure.mgmt.network.v2020_04_01.models.VpnClientConfiguration
:param bgp_settings: Virtual network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2020_04_01.models.BgpSettings
:param custom_routes: The reference to the address space resource which represents the custom
routes address space specified by the customer for virtual network gateway and VpnClient.
:type custom_routes: ~azure.mgmt.network.v2020_04_01.models.AddressSpace
:ivar resource_guid: The resource GUID property of the virtual network gateway resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the virtual network gateway resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param enable_dns_forwarding: Whether dns forwarding is enabled or not.
:type enable_dns_forwarding: bool
:ivar inbound_dns_forwarding_endpoint: The IP address allocated by the gateway to which dns
requests can be sent.
:vartype inbound_dns_forwarding_endpoint: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'inbound_dns_forwarding_endpoint': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualNetworkGatewayIPConfiguration]'},
'gateway_type': {'key': 'properties.gatewayType', 'type': 'str'},
'vpn_type': {'key': 'properties.vpnType', 'type': 'str'},
'vpn_gateway_generation': {'key': 'properties.vpnGatewayGeneration', 'type': 'str'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'enable_private_ip_address': {'key': 'properties.enablePrivateIpAddress', 'type': 'bool'},
'active': {'key': 'properties.activeActive', 'type': 'bool'},
'gateway_default_site': {'key': 'properties.gatewayDefaultSite', 'type': 'SubResource'},
'sku': {'key': 'properties.sku', 'type': 'VirtualNetworkGatewaySku'},
'vpn_client_configuration': {'key': 'properties.vpnClientConfiguration', 'type': 'VpnClientConfiguration'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'custom_routes': {'key': 'properties.customRoutes', 'type': 'AddressSpace'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'enable_dns_forwarding': {'key': 'properties.enableDnsForwarding', 'type': 'bool'},
'inbound_dns_forwarding_endpoint': {'key': 'properties.inboundDnsForwardingEndpoint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGateway, self).__init__(**kwargs)
self.etag = None
self.ip_configurations = kwargs.get('ip_configurations', None)
self.gateway_type = kwargs.get('gateway_type', None)
self.vpn_type = kwargs.get('vpn_type', None)
self.vpn_gateway_generation = kwargs.get('vpn_gateway_generation', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.enable_private_ip_address = kwargs.get('enable_private_ip_address', None)
self.active = kwargs.get('active', None)
self.gateway_default_site = kwargs.get('gateway_default_site', None)
self.sku = kwargs.get('sku', None)
self.vpn_client_configuration = kwargs.get('vpn_client_configuration', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.custom_routes = kwargs.get('custom_routes', None)
self.resource_guid = None
self.provisioning_state = None
self.enable_dns_forwarding = kwargs.get('enable_dns_forwarding', None)
self.inbound_dns_forwarding_endpoint = None
class VirtualNetworkGatewayConnection(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual network gateway resource.
:type virtual_network_gateway1: ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGateway
:param virtual_network_gateway2: The reference to virtual network gateway resource.
:type virtual_network_gateway2: ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGateway
:param local_network_gateway2: The reference to local network gateway resource.
:type local_network_gateway2: ~azure.mgmt.network.v2020_04_01.models.LocalNetworkGateway
:param connection_type: Required. Gateway connection type. Possible values include: "IPsec",
"Vnet2Vnet", "ExpressRoute", "VPNClient".
:type connection_type: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionType
:param connection_protocol: Connection protocol used for this connection. Possible values
include: "IKEv2", "IKEv1".
:type connection_protocol: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionProtocol
:param routing_weight: The routing weight.
:type routing_weight: int
:param dpd_timeout_seconds: The dead peer detection timeout of this connection in seconds.
:type dpd_timeout_seconds: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual Network Gateway connection status. Possible values include:
"Unknown", "Connecting", "Connected", "NotConnected".
:vartype connection_status: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2020_04_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param enable_bgp: EnableBgp flag.
:type enable_bgp: bool
:param use_local_azure_ip_address: Use private local Azure IP for the connection.
:type use_local_azure_ip_address: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this connection.
:type ipsec_policies: list[~azure.mgmt.network.v2020_04_01.models.IpsecPolicy]
:param traffic_selector_policies: The Traffic Selector Policies to be considered by this
connection.
:type traffic_selector_policies:
list[~azure.mgmt.network.v2020_04_01.models.TrafficSelectorPolicy]
:ivar resource_guid: The resource GUID property of the virtual network gateway connection
resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the virtual network gateway connection
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param express_route_gateway_bypass: Bypass ExpressRoute Gateway for data forwarding.
:type express_route_gateway_bypass: bool
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkGateway'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkGateway'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'LocalNetworkGateway'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'connection_protocol': {'key': 'properties.connectionProtocol', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'dpd_timeout_seconds': {'key': 'properties.dpdTimeoutSeconds', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_local_azure_ip_address': {'key': 'properties.useLocalAzureIpAddress', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'traffic_selector_policies': {'key': 'properties.trafficSelectorPolicies', 'type': '[TrafficSelectorPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'express_route_gateway_bypass': {'key': 'properties.expressRouteGatewayBypass', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGatewayConnection, self).__init__(**kwargs)
self.etag = None
self.authorization_key = kwargs.get('authorization_key', None)
self.virtual_network_gateway1 = kwargs['virtual_network_gateway1']
self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None)
self.local_network_gateway2 = kwargs.get('local_network_gateway2', None)
self.connection_type = kwargs['connection_type']
self.connection_protocol = kwargs.get('connection_protocol', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.dpd_timeout_seconds = kwargs.get('dpd_timeout_seconds', None)
self.shared_key = kwargs.get('shared_key', None)
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = kwargs.get('peer', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_local_azure_ip_address = kwargs.get('use_local_azure_ip_address', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.traffic_selector_policies = kwargs.get('traffic_selector_policies', None)
self.resource_guid = None
self.provisioning_state = None
self.express_route_gateway_bypass = kwargs.get('express_route_gateway_bypass', None)
class VirtualNetworkGatewayConnectionListEntity(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkConnectionGatewayReference
:param virtual_network_gateway2: The reference to virtual network gateway resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkConnectionGatewayReference
:param local_network_gateway2: The reference to local network gateway resource.
:type local_network_gateway2:
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkConnectionGatewayReference
:param connection_type: Required. Gateway connection type. Possible values include: "IPsec",
"Vnet2Vnet", "ExpressRoute", "VPNClient".
:type connection_type: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionType
:param connection_protocol: Connection protocol used for this connection. Possible values
include: "IKEv2", "IKEv1".
:type connection_protocol: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionProtocol
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual Network Gateway connection status. Possible values include:
"Unknown", "Connecting", "Connected", "NotConnected".
:vartype connection_status: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2020_04_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param enable_bgp: EnableBgp flag.
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this connection.
:type ipsec_policies: list[~azure.mgmt.network.v2020_04_01.models.IpsecPolicy]
:param traffic_selector_policies: The Traffic Selector Policies to be considered by this
connection.
:type traffic_selector_policies:
list[~azure.mgmt.network.v2020_04_01.models.TrafficSelectorPolicy]
:ivar resource_guid: The resource GUID property of the virtual network gateway connection
resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the virtual network gateway connection
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param express_route_gateway_bypass: Bypass ExpressRoute Gateway for data forwarding.
:type express_route_gateway_bypass: bool
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'connection_protocol': {'key': 'properties.connectionProtocol', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'traffic_selector_policies': {'key': 'properties.trafficSelectorPolicies', 'type': '[TrafficSelectorPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'express_route_gateway_bypass': {'key': 'properties.expressRouteGatewayBypass', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGatewayConnectionListEntity, self).__init__(**kwargs)
self.etag = None
self.authorization_key = kwargs.get('authorization_key', None)
self.virtual_network_gateway1 = kwargs['virtual_network_gateway1']
self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None)
self.local_network_gateway2 = kwargs.get('local_network_gateway2', None)
self.connection_type = kwargs['connection_type']
self.connection_protocol = kwargs.get('connection_protocol', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.shared_key = kwargs.get('shared_key', None)
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = kwargs.get('peer', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.traffic_selector_policies = kwargs.get('traffic_selector_policies', None)
self.resource_guid = None
self.provisioning_state = None
self.express_route_gateway_bypass = kwargs.get('express_route_gateway_bypass', None)
class VirtualNetworkGatewayConnectionListResult(msrest.serialization.Model):
"""Response for the ListVirtualNetworkGatewayConnections API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of VirtualNetworkGatewayConnection resources that exists in a resource
group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnection]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetworkGatewayConnection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGatewayConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class VirtualNetworkGatewayIPConfiguration(SubResource):
"""IP configuration for virtual network gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param private_ip_allocation_method: The private IP address allocation method. Possible values
include: "Static", "Dynamic".
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2020_04_01.models.IPAllocationMethod
:param subnet: The reference to the subnet resource.
:type subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param public_ip_address: The reference to the public IP resource.
:type public_ip_address: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar private_ip_address: Private IP Address for this gateway.
:vartype private_ip_address: str
:ivar provisioning_state: The provisioning state of the virtual network gateway IP
configuration resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'private_ip_address': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGatewayIPConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.private_ip_allocation_method = kwargs.get('private_ip_allocation_method', None)
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.private_ip_address = None
self.provisioning_state = None
class VirtualNetworkGatewayListConnectionsResult(msrest.serialization.Model):
"""Response for the VirtualNetworkGatewayListConnections API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of VirtualNetworkGatewayConnection resources that exists in a resource
group.
:type value:
list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionListEntity]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetworkGatewayConnectionListEntity]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGatewayListConnectionsResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class VirtualNetworkGatewayListResult(msrest.serialization.Model):
"""Response for the ListVirtualNetworkGateways API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: A list of VirtualNetworkGateway resources that exists in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGateway]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetworkGateway]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGatewayListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = None
class VirtualNetworkGatewaySku(msrest.serialization.Model):
"""VirtualNetworkGatewaySku details.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Gateway SKU name. Possible values include: "Basic", "HighPerformance", "Standard",
"UltraPerformance", "VpnGw1", "VpnGw2", "VpnGw3", "VpnGw4", "VpnGw5", "VpnGw1AZ", "VpnGw2AZ",
"VpnGw3AZ", "VpnGw4AZ", "VpnGw5AZ", "ErGw1AZ", "ErGw2AZ", "ErGw3AZ".
:type name: str or ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewaySkuName
:param tier: Gateway SKU tier. Possible values include: "Basic", "HighPerformance", "Standard",
"UltraPerformance", "VpnGw1", "VpnGw2", "VpnGw3", "VpnGw4", "VpnGw5", "VpnGw1AZ", "VpnGw2AZ",
"VpnGw3AZ", "VpnGw4AZ", "VpnGw5AZ", "ErGw1AZ", "ErGw2AZ", "ErGw3AZ".
:type tier: str or ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewaySkuTier
:ivar capacity: The capacity.
:vartype capacity: int
"""
_validation = {
'capacity': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkGatewaySku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
self.capacity = None
class VirtualNetworkListResult(msrest.serialization.Model):
"""Response for the ListVirtualNetworks API service call.
:param value: A list of VirtualNetwork resources in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualNetwork]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetwork]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VirtualNetworkListUsageResult(msrest.serialization.Model):
"""Response for the virtual networks GetUsage API service call.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: VirtualNetwork usage stats.
:vartype value: list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkUsage]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetworkUsage]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkListUsageResult, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param allow_virtual_network_access: Whether the VMs in the local virtual network space would
be able to access the VMs in remote virtual network space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs in the local virtual
network will be allowed/disallowed in remote virtual network.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote virtual networking to link
to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual network. If the flag
is set to true, and allowGatewayTransit on remote peering is also true, virtual network will
use gateways of remote virtual network for transit. Only one peering can have this flag set to
true. This flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference to the remote virtual network. The remote virtual
network can be in the same or different region (preview). See here to register for the preview
and learn more
(https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-create-peering).
:type remote_virtual_network: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param remote_address_space: The reference to the remote virtual network address space.
:type remote_address_space: ~azure.mgmt.network.v2020_04_01.models.AddressSpace
:param peering_state: The status of the virtual network peering. Possible values include:
"Initiated", "Connected", "Disconnected".
:type peering_state: str or ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkPeeringState
:ivar provisioning_state: The provisioning state of the virtual network peering resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'remote_address_space': {'key': 'properties.remoteAddressSpace', 'type': 'AddressSpace'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkPeering, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.allow_virtual_network_access = kwargs.get('allow_virtual_network_access', None)
self.allow_forwarded_traffic = kwargs.get('allow_forwarded_traffic', None)
self.allow_gateway_transit = kwargs.get('allow_gateway_transit', None)
self.use_remote_gateways = kwargs.get('use_remote_gateways', None)
self.remote_virtual_network = kwargs.get('remote_virtual_network', None)
self.remote_address_space = kwargs.get('remote_address_space', None)
self.peering_state = kwargs.get('peering_state', None)
self.provisioning_state = None
class VirtualNetworkPeeringListResult(msrest.serialization.Model):
"""Response for ListSubnets API service call. Retrieves all subnets that belong to a virtual network.
:param value: The peerings in a virtual network.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkPeering]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetworkPeering]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkPeeringListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VirtualNetworkTap(Resource):
"""Virtual Network Tap resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar network_interface_tap_configurations: Specifies the list of resource IDs for the network
interface IP configuration that needs to be tapped.
:vartype network_interface_tap_configurations:
list[~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceTapConfiguration]
:ivar resource_guid: The resource GUID property of the virtual network tap resource.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the virtual network tap resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param destination_network_interface_ip_configuration: The reference to the private IP Address
of the collector nic that will receive the tap.
:type destination_network_interface_ip_configuration:
~azure.mgmt.network.v2020_04_01.models.NetworkInterfaceIPConfiguration
:param destination_load_balancer_front_end_ip_configuration: The reference to the private IP
address on the internal Load Balancer that will receive the tap.
:type destination_load_balancer_front_end_ip_configuration:
~azure.mgmt.network.v2020_04_01.models.FrontendIPConfiguration
:param destination_port: The VXLAN destination port that will receive the tapped traffic.
:type destination_port: int
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'network_interface_tap_configurations': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'network_interface_tap_configurations': {'key': 'properties.networkInterfaceTapConfigurations', 'type': '[NetworkInterfaceTapConfiguration]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'destination_network_interface_ip_configuration': {'key': 'properties.destinationNetworkInterfaceIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'destination_load_balancer_front_end_ip_configuration': {'key': 'properties.destinationLoadBalancerFrontEndIPConfiguration', 'type': 'FrontendIPConfiguration'},
'destination_port': {'key': 'properties.destinationPort', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkTap, self).__init__(**kwargs)
self.etag = None
self.network_interface_tap_configurations = None
self.resource_guid = None
self.provisioning_state = None
self.destination_network_interface_ip_configuration = kwargs.get('destination_network_interface_ip_configuration', None)
self.destination_load_balancer_front_end_ip_configuration = kwargs.get('destination_load_balancer_front_end_ip_configuration', None)
self.destination_port = kwargs.get('destination_port', None)
class VirtualNetworkTapListResult(msrest.serialization.Model):
"""Response for ListVirtualNetworkTap API service call.
:param value: A list of VirtualNetworkTaps in a resource group.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkTap]
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualNetworkTap]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkTapListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VirtualNetworkUsage(msrest.serialization.Model):
"""Usage details for subnet.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar current_value: Indicates number of IPs used from the Subnet.
:vartype current_value: float
:ivar id: Subnet identifier.
:vartype id: str
:ivar limit: Indicates the size of the subnet.
:vartype limit: float
:ivar name: The name containing common and localized value for usage.
:vartype name: ~azure.mgmt.network.v2020_04_01.models.VirtualNetworkUsageName
:ivar unit: Usage units. Returns 'Count'.
:vartype unit: str
"""
_validation = {
'current_value': {'readonly': True},
'id': {'readonly': True},
'limit': {'readonly': True},
'name': {'readonly': True},
'unit': {'readonly': True},
}
_attribute_map = {
'current_value': {'key': 'currentValue', 'type': 'float'},
'id': {'key': 'id', 'type': 'str'},
'limit': {'key': 'limit', 'type': 'float'},
'name': {'key': 'name', 'type': 'VirtualNetworkUsageName'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkUsage, self).__init__(**kwargs)
self.current_value = None
self.id = None
self.limit = None
self.name = None
self.unit = None
class VirtualNetworkUsageName(msrest.serialization.Model):
"""Usage strings container.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar localized_value: Localized subnet size and usage string.
:vartype localized_value: str
:ivar value: Subnet size and usage string.
:vartype value: str
"""
_validation = {
'localized_value': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'localized_value': {'key': 'localizedValue', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualNetworkUsageName, self).__init__(**kwargs)
self.localized_value = None
self.value = None
class VirtualRouter(Resource):
"""VirtualRouter Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param virtual_router_asn: VirtualRouter ASN.
:type virtual_router_asn: long
:param virtual_router_ips: VirtualRouter IPs.
:type virtual_router_ips: list[str]
:param hosted_subnet: The Subnet on which VirtualRouter is hosted.
:type hosted_subnet: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param hosted_gateway: The Gateway on which VirtualRouter is hosted.
:type hosted_gateway: ~azure.mgmt.network.v2020_04_01.models.SubResource
:ivar peerings: List of references to VirtualRouterPeerings.
:vartype peerings: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar provisioning_state: The provisioning state of the resource. Possible values include:
"Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'virtual_router_asn': {'maximum': 4294967295, 'minimum': 0},
'peerings': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_router_asn': {'key': 'properties.virtualRouterAsn', 'type': 'long'},
'virtual_router_ips': {'key': 'properties.virtualRouterIps', 'type': '[str]'},
'hosted_subnet': {'key': 'properties.hostedSubnet', 'type': 'SubResource'},
'hosted_gateway': {'key': 'properties.hostedGateway', 'type': 'SubResource'},
'peerings': {'key': 'properties.peerings', 'type': '[SubResource]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualRouter, self).__init__(**kwargs)
self.etag = None
self.virtual_router_asn = kwargs.get('virtual_router_asn', None)
self.virtual_router_ips = kwargs.get('virtual_router_ips', None)
self.hosted_subnet = kwargs.get('hosted_subnet', None)
self.hosted_gateway = kwargs.get('hosted_gateway', None)
self.peerings = None
self.provisioning_state = None
class VirtualRouterListResult(msrest.serialization.Model):
"""Response for ListVirtualRouters API service call.
:param value: List of Virtual Routers.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualRouter]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualRouter]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualRouterListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VirtualRouterPeering(SubResource):
"""Virtual Router Peering resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: Name of the virtual router peering that is unique within a virtual router.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Peering type.
:vartype type: str
:param peer_asn: Peer ASN.
:type peer_asn: long
:param peer_ip: Peer IP.
:type peer_ip: str
:ivar provisioning_state: The provisioning state of the resource. Possible values include:
"Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'peer_asn': {'maximum': 4294967295, 'minimum': 0},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'peer_asn': {'key': 'properties.peerAsn', 'type': 'long'},
'peer_ip': {'key': 'properties.peerIp', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualRouterPeering, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.peer_asn = kwargs.get('peer_asn', None)
self.peer_ip = kwargs.get('peer_ip', None)
self.provisioning_state = None
class VirtualRouterPeeringListResult(msrest.serialization.Model):
"""Response for ListVirtualRouterPeerings API service call.
:param value: List of VirtualRouterPeerings in a VirtualRouter.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VirtualRouterPeering]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VirtualRouterPeering]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualRouterPeeringListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VirtualWAN(Resource):
"""VirtualWAN Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param disable_vpn_encryption: Vpn encryption to be disabled or not.
:type disable_vpn_encryption: bool
:ivar virtual_hubs: List of VirtualHubs in the VirtualWAN.
:vartype virtual_hubs: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar vpn_sites: List of VpnSites in the VirtualWAN.
:vartype vpn_sites: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:param allow_branch_to_branch_traffic: True if branch to branch traffic is allowed.
:type allow_branch_to_branch_traffic: bool
:param allow_vnet_to_vnet_traffic: True if Vnet to Vnet traffic is allowed.
:type allow_vnet_to_vnet_traffic: bool
:ivar office365_local_breakout_category: The office local breakout category. Possible values
include: "Optimize", "OptimizeAndAllow", "All", "None".
:vartype office365_local_breakout_category: str or
~azure.mgmt.network.v2020_04_01.models.OfficeTrafficCategory
:ivar provisioning_state: The provisioning state of the virtual WAN resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param type_properties_type: The type of the VirtualWAN.
:type type_properties_type: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'virtual_hubs': {'readonly': True},
'vpn_sites': {'readonly': True},
'office365_local_breakout_category': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'disable_vpn_encryption': {'key': 'properties.disableVpnEncryption', 'type': 'bool'},
'virtual_hubs': {'key': 'properties.virtualHubs', 'type': '[SubResource]'},
'vpn_sites': {'key': 'properties.vpnSites', 'type': '[SubResource]'},
'allow_branch_to_branch_traffic': {'key': 'properties.allowBranchToBranchTraffic', 'type': 'bool'},
'allow_vnet_to_vnet_traffic': {'key': 'properties.allowVnetToVnetTraffic', 'type': 'bool'},
'office365_local_breakout_category': {'key': 'properties.office365LocalBreakoutCategory', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualWAN, self).__init__(**kwargs)
self.etag = None
self.disable_vpn_encryption = kwargs.get('disable_vpn_encryption', None)
self.virtual_hubs = None
self.vpn_sites = None
self.allow_branch_to_branch_traffic = kwargs.get('allow_branch_to_branch_traffic', None)
self.allow_vnet_to_vnet_traffic = kwargs.get('allow_vnet_to_vnet_traffic', None)
self.office365_local_breakout_category = None
self.provisioning_state = None
self.type_properties_type = kwargs.get('type_properties_type', None)
class VirtualWanSecurityProvider(msrest.serialization.Model):
"""Collection of SecurityProviders.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: Name of the security provider.
:type name: str
:param url: Url of the security provider.
:type url: str
:ivar type: Name of the security provider. Possible values include: "External", "Native".
:vartype type: str or ~azure.mgmt.network.v2020_04_01.models.VirtualWanSecurityProviderType
"""
_validation = {
'type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualWanSecurityProvider, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.url = kwargs.get('url', None)
self.type = None
class VirtualWanSecurityProviders(msrest.serialization.Model):
"""Collection of SecurityProviders.
:param supported_providers: List of VirtualWAN security providers.
:type supported_providers:
list[~azure.mgmt.network.v2020_04_01.models.VirtualWanSecurityProvider]
"""
_attribute_map = {
'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'},
}
def __init__(
self,
**kwargs
):
super(VirtualWanSecurityProviders, self).__init__(**kwargs)
self.supported_providers = kwargs.get('supported_providers', None)
class VirtualWanVpnProfileParameters(msrest.serialization.Model):
"""Virtual Wan Vpn profile parameters Vpn profile generation.
:param vpn_server_configuration_resource_id: VpnServerConfiguration partial resource uri with
which VirtualWan is associated to.
:type vpn_server_configuration_resource_id: str
:param authentication_method: VPN client authentication method. Possible values include:
"EAPTLS", "EAPMSCHAPv2".
:type authentication_method: str or ~azure.mgmt.network.v2020_04_01.models.AuthenticationMethod
"""
_attribute_map = {
'vpn_server_configuration_resource_id': {'key': 'vpnServerConfigurationResourceId', 'type': 'str'},
'authentication_method': {'key': 'authenticationMethod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VirtualWanVpnProfileParameters, self).__init__(**kwargs)
self.vpn_server_configuration_resource_id = kwargs.get('vpn_server_configuration_resource_id', None)
self.authentication_method = kwargs.get('authentication_method', None)
class VM(Resource):
"""Describes a Virtual Machine.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(VM, self).__init__(**kwargs)
class VnetRoute(msrest.serialization.Model):
"""List of routes that control routing from VirtualHub into a virtual network connection.
:param static_routes: List of all Static Routes.
:type static_routes: list[~azure.mgmt.network.v2020_04_01.models.StaticRoute]
"""
_attribute_map = {
'static_routes': {'key': 'staticRoutes', 'type': '[StaticRoute]'},
}
def __init__(
self,
**kwargs
):
super(VnetRoute, self).__init__(**kwargs)
self.static_routes = kwargs.get('static_routes', None)
class VpnClientConfiguration(msrest.serialization.Model):
"""VpnClientConfiguration for P2S client.
:param vpn_client_address_pool: The reference to the address space resource which represents
Address space for P2S VpnClient.
:type vpn_client_address_pool: ~azure.mgmt.network.v2020_04_01.models.AddressSpace
:param vpn_client_root_certificates: VpnClientRootCertificate for virtual network gateway.
:type vpn_client_root_certificates:
list[~azure.mgmt.network.v2020_04_01.models.VpnClientRootCertificate]
:param vpn_client_revoked_certificates: VpnClientRevokedCertificate for Virtual network
gateway.
:type vpn_client_revoked_certificates:
list[~azure.mgmt.network.v2020_04_01.models.VpnClientRevokedCertificate]
:param vpn_client_protocols: VpnClientProtocols for Virtual network gateway.
:type vpn_client_protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.VpnClientProtocol]
:param vpn_client_ipsec_policies: VpnClientIpsecPolicies for virtual network gateway P2S
client.
:type vpn_client_ipsec_policies: list[~azure.mgmt.network.v2020_04_01.models.IpsecPolicy]
:param radius_server_address: The radius server address property of the VirtualNetworkGateway
resource for vpn client connection.
:type radius_server_address: str
:param radius_server_secret: The radius secret property of the VirtualNetworkGateway resource
for vpn client connection.
:type radius_server_secret: str
:param radius_servers: The radiusServers property for multiple radius server configuration.
:type radius_servers: list[~azure.mgmt.network.v2020_04_01.models.RadiusServer]
:param aad_tenant: The AADTenant property of the VirtualNetworkGateway resource for vpn client
connection used for AAD authentication.
:type aad_tenant: str
:param aad_audience: The AADAudience property of the VirtualNetworkGateway resource for vpn
client connection used for AAD authentication.
:type aad_audience: str
:param aad_issuer: The AADIssuer property of the VirtualNetworkGateway resource for vpn client
connection used for AAD authentication.
:type aad_issuer: str
"""
_attribute_map = {
'vpn_client_address_pool': {'key': 'vpnClientAddressPool', 'type': 'AddressSpace'},
'vpn_client_root_certificates': {'key': 'vpnClientRootCertificates', 'type': '[VpnClientRootCertificate]'},
'vpn_client_revoked_certificates': {'key': 'vpnClientRevokedCertificates', 'type': '[VpnClientRevokedCertificate]'},
'vpn_client_protocols': {'key': 'vpnClientProtocols', 'type': '[str]'},
'vpn_client_ipsec_policies': {'key': 'vpnClientIpsecPolicies', 'type': '[IpsecPolicy]'},
'radius_server_address': {'key': 'radiusServerAddress', 'type': 'str'},
'radius_server_secret': {'key': 'radiusServerSecret', 'type': 'str'},
'radius_servers': {'key': 'radiusServers', 'type': '[RadiusServer]'},
'aad_tenant': {'key': 'aadTenant', 'type': 'str'},
'aad_audience': {'key': 'aadAudience', 'type': 'str'},
'aad_issuer': {'key': 'aadIssuer', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnClientConfiguration, self).__init__(**kwargs)
self.vpn_client_address_pool = kwargs.get('vpn_client_address_pool', None)
self.vpn_client_root_certificates = kwargs.get('vpn_client_root_certificates', None)
self.vpn_client_revoked_certificates = kwargs.get('vpn_client_revoked_certificates', None)
self.vpn_client_protocols = kwargs.get('vpn_client_protocols', None)
self.vpn_client_ipsec_policies = kwargs.get('vpn_client_ipsec_policies', None)
self.radius_server_address = kwargs.get('radius_server_address', None)
self.radius_server_secret = kwargs.get('radius_server_secret', None)
self.radius_servers = kwargs.get('radius_servers', None)
self.aad_tenant = kwargs.get('aad_tenant', None)
self.aad_audience = kwargs.get('aad_audience', None)
self.aad_issuer = kwargs.get('aad_issuer', None)
class VpnClientConnectionHealth(msrest.serialization.Model):
"""VpnClientConnectionHealth properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_ingress_bytes_transferred: Total of the Ingress Bytes Transferred in this P2S Vpn
connection.
:vartype total_ingress_bytes_transferred: long
:ivar total_egress_bytes_transferred: Total of the Egress Bytes Transferred in this connection.
:vartype total_egress_bytes_transferred: long
:param vpn_client_connections_count: The total of p2s vpn clients connected at this time to
this P2SVpnGateway.
:type vpn_client_connections_count: int
:param allocated_ip_addresses: List of allocated ip addresses to the connected p2s vpn clients.
:type allocated_ip_addresses: list[str]
"""
_validation = {
'total_ingress_bytes_transferred': {'readonly': True},
'total_egress_bytes_transferred': {'readonly': True},
}
_attribute_map = {
'total_ingress_bytes_transferred': {'key': 'totalIngressBytesTransferred', 'type': 'long'},
'total_egress_bytes_transferred': {'key': 'totalEgressBytesTransferred', 'type': 'long'},
'vpn_client_connections_count': {'key': 'vpnClientConnectionsCount', 'type': 'int'},
'allocated_ip_addresses': {'key': 'allocatedIpAddresses', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(VpnClientConnectionHealth, self).__init__(**kwargs)
self.total_ingress_bytes_transferred = None
self.total_egress_bytes_transferred = None
self.vpn_client_connections_count = kwargs.get('vpn_client_connections_count', None)
self.allocated_ip_addresses = kwargs.get('allocated_ip_addresses', None)
class VpnClientConnectionHealthDetail(msrest.serialization.Model):
"""VPN client connection health detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar vpn_connection_id: The vpn client Id.
:vartype vpn_connection_id: str
:ivar vpn_connection_duration: The duration time of a connected vpn client.
:vartype vpn_connection_duration: long
:ivar vpn_connection_time: The start time of a connected vpn client.
:vartype vpn_connection_time: str
:ivar public_ip_address: The public Ip of a connected vpn client.
:vartype public_ip_address: str
:ivar private_ip_address: The assigned private Ip of a connected vpn client.
:vartype private_ip_address: str
:ivar vpn_user_name: The user name of a connected vpn client.
:vartype vpn_user_name: str
:ivar max_bandwidth: The max band width.
:vartype max_bandwidth: long
:ivar egress_packets_transferred: The egress packets per second.
:vartype egress_packets_transferred: long
:ivar egress_bytes_transferred: The egress bytes per second.
:vartype egress_bytes_transferred: long
:ivar ingress_packets_transferred: The ingress packets per second.
:vartype ingress_packets_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes per second.
:vartype ingress_bytes_transferred: long
:ivar max_packets_per_second: The max packets transferred per second.
:vartype max_packets_per_second: long
"""
_validation = {
'vpn_connection_id': {'readonly': True},
'vpn_connection_duration': {'readonly': True},
'vpn_connection_time': {'readonly': True},
'public_ip_address': {'readonly': True},
'private_ip_address': {'readonly': True},
'vpn_user_name': {'readonly': True},
'max_bandwidth': {'readonly': True},
'egress_packets_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_packets_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'max_packets_per_second': {'readonly': True},
}
_attribute_map = {
'vpn_connection_id': {'key': 'vpnConnectionId', 'type': 'str'},
'vpn_connection_duration': {'key': 'vpnConnectionDuration', 'type': 'long'},
'vpn_connection_time': {'key': 'vpnConnectionTime', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'vpn_user_name': {'key': 'vpnUserName', 'type': 'str'},
'max_bandwidth': {'key': 'maxBandwidth', 'type': 'long'},
'egress_packets_transferred': {'key': 'egressPacketsTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'},
'ingress_packets_transferred': {'key': 'ingressPacketsTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'},
'max_packets_per_second': {'key': 'maxPacketsPerSecond', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(VpnClientConnectionHealthDetail, self).__init__(**kwargs)
self.vpn_connection_id = None
self.vpn_connection_duration = None
self.vpn_connection_time = None
self.public_ip_address = None
self.private_ip_address = None
self.vpn_user_name = None
self.max_bandwidth = None
self.egress_packets_transferred = None
self.egress_bytes_transferred = None
self.ingress_packets_transferred = None
self.ingress_bytes_transferred = None
self.max_packets_per_second = None
class VpnClientConnectionHealthDetailListResult(msrest.serialization.Model):
"""List of virtual network gateway vpn client connection health.
:param value: List of vpn client connection health.
:type value: list[~azure.mgmt.network.v2020_04_01.models.VpnClientConnectionHealthDetail]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VpnClientConnectionHealthDetail]'},
}
def __init__(
self,
**kwargs
):
super(VpnClientConnectionHealthDetailListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class VpnClientIPsecParameters(msrest.serialization.Model):
"""An IPSec parameters for a virtual network gateway P2S connection.
All required parameters must be populated in order to send to Azure.
:param sa_life_time_seconds: Required. The IPSec Security Association (also called Quick Mode
or Phase 2 SA) lifetime in seconds for P2S client.
:type sa_life_time_seconds: int
:param sa_data_size_kilobytes: Required. The IPSec Security Association (also called Quick Mode
or Phase 2 SA) payload size in KB for P2S client..
:type sa_data_size_kilobytes: int
:param ipsec_encryption: Required. The IPSec encryption algorithm (IKE phase 1). Possible
values include: "None", "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES128", "GCMAES192",
"GCMAES256".
:type ipsec_encryption: str or ~azure.mgmt.network.v2020_04_01.models.IpsecEncryption
:param ipsec_integrity: Required. The IPSec integrity algorithm (IKE phase 1). Possible values
include: "MD5", "SHA1", "SHA256", "GCMAES128", "GCMAES192", "GCMAES256".
:type ipsec_integrity: str or ~azure.mgmt.network.v2020_04_01.models.IpsecIntegrity
:param ike_encryption: Required. The IKE encryption algorithm (IKE phase 2). Possible values
include: "DES", "DES3", "AES128", "AES192", "AES256", "GCMAES256", "GCMAES128".
:type ike_encryption: str or ~azure.mgmt.network.v2020_04_01.models.IkeEncryption
:param ike_integrity: Required. The IKE integrity algorithm (IKE phase 2). Possible values
include: "MD5", "SHA1", "SHA256", "SHA384", "GCMAES256", "GCMAES128".
:type ike_integrity: str or ~azure.mgmt.network.v2020_04_01.models.IkeIntegrity
:param dh_group: Required. The DH Group used in IKE Phase 1 for initial SA. Possible values
include: "None", "DHGroup1", "DHGroup2", "DHGroup14", "DHGroup2048", "ECP256", "ECP384",
"DHGroup24".
:type dh_group: str or ~azure.mgmt.network.v2020_04_01.models.DhGroup
:param pfs_group: Required. The Pfs Group used in IKE Phase 2 for new child SA. Possible values
include: "None", "PFS1", "PFS2", "PFS2048", "ECP256", "ECP384", "PFS24", "PFS14", "PFSMM".
:type pfs_group: str or ~azure.mgmt.network.v2020_04_01.models.PfsGroup
"""
_validation = {
'sa_life_time_seconds': {'required': True},
'sa_data_size_kilobytes': {'required': True},
'ipsec_encryption': {'required': True},
'ipsec_integrity': {'required': True},
'ike_encryption': {'required': True},
'ike_integrity': {'required': True},
'dh_group': {'required': True},
'pfs_group': {'required': True},
}
_attribute_map = {
'sa_life_time_seconds': {'key': 'saLifeTimeSeconds', 'type': 'int'},
'sa_data_size_kilobytes': {'key': 'saDataSizeKilobytes', 'type': 'int'},
'ipsec_encryption': {'key': 'ipsecEncryption', 'type': 'str'},
'ipsec_integrity': {'key': 'ipsecIntegrity', 'type': 'str'},
'ike_encryption': {'key': 'ikeEncryption', 'type': 'str'},
'ike_integrity': {'key': 'ikeIntegrity', 'type': 'str'},
'dh_group': {'key': 'dhGroup', 'type': 'str'},
'pfs_group': {'key': 'pfsGroup', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnClientIPsecParameters, self).__init__(**kwargs)
self.sa_life_time_seconds = kwargs['sa_life_time_seconds']
self.sa_data_size_kilobytes = kwargs['sa_data_size_kilobytes']
self.ipsec_encryption = kwargs['ipsec_encryption']
self.ipsec_integrity = kwargs['ipsec_integrity']
self.ike_encryption = kwargs['ike_encryption']
self.ike_integrity = kwargs['ike_integrity']
self.dh_group = kwargs['dh_group']
self.pfs_group = kwargs['pfs_group']
class VpnClientParameters(msrest.serialization.Model):
"""Vpn Client Parameters for package generation.
:param processor_architecture: VPN client Processor Architecture. Possible values include:
"Amd64", "X86".
:type processor_architecture: str or
~azure.mgmt.network.v2020_04_01.models.ProcessorArchitecture
:param authentication_method: VPN client authentication method. Possible values include:
"EAPTLS", "EAPMSCHAPv2".
:type authentication_method: str or ~azure.mgmt.network.v2020_04_01.models.AuthenticationMethod
:param radius_server_auth_certificate: The public certificate data for the radius server
authentication certificate as a Base-64 encoded string. Required only if external radius
authentication has been configured with EAPTLS authentication.
:type radius_server_auth_certificate: str
:param client_root_certificates: A list of client root certificates public certificate data
encoded as Base-64 strings. Optional parameter for external radius based authentication with
EAPTLS.
:type client_root_certificates: list[str]
"""
_attribute_map = {
'processor_architecture': {'key': 'processorArchitecture', 'type': 'str'},
'authentication_method': {'key': 'authenticationMethod', 'type': 'str'},
'radius_server_auth_certificate': {'key': 'radiusServerAuthCertificate', 'type': 'str'},
'client_root_certificates': {'key': 'clientRootCertificates', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(VpnClientParameters, self).__init__(**kwargs)
self.processor_architecture = kwargs.get('processor_architecture', None)
self.authentication_method = kwargs.get('authentication_method', None)
self.radius_server_auth_certificate = kwargs.get('radius_server_auth_certificate', None)
self.client_root_certificates = kwargs.get('client_root_certificates', None)
class VpnClientRevokedCertificate(SubResource):
"""VPN client revoked certificate of virtual network gateway.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param thumbprint: The revoked VPN client certificate thumbprint.
:type thumbprint: str
:ivar provisioning_state: The provisioning state of the VPN client revoked certificate
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnClientRevokedCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.thumbprint = kwargs.get('thumbprint', None)
self.provisioning_state = None
class VpnClientRootCertificate(SubResource):
"""VPN client root certificate of virtual network gateway.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param public_cert_data: Required. The certificate public data.
:type public_cert_data: str
:ivar provisioning_state: The provisioning state of the VPN client root certificate resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'public_cert_data': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'public_cert_data': {'key': 'properties.publicCertData', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnClientRootCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.public_cert_data = kwargs['public_cert_data']
self.provisioning_state = None
class VpnConnection(SubResource):
"""VpnConnection Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param remote_vpn_site: Id of the connected vpn site.
:type remote_vpn_site: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param routing_weight: Routing weight for vpn connection.
:type routing_weight: int
:param dpd_timeout_seconds: The dead peer detection timeout for a vpn connection in seconds.
:type dpd_timeout_seconds: int
:ivar connection_status: The connection status. Possible values include: "Unknown",
"Connecting", "Connected", "NotConnected".
:vartype connection_status: str or ~azure.mgmt.network.v2020_04_01.models.VpnConnectionStatus
:param vpn_connection_protocol_type: Connection protocol used for this connection. Possible
values include: "IKEv2", "IKEv1".
:type vpn_connection_protocol_type: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionProtocol
:ivar ingress_bytes_transferred: Ingress bytes transferred.
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: Egress bytes transferred.
:vartype egress_bytes_transferred: long
:param connection_bandwidth: Expected bandwidth in MBPS.
:type connection_bandwidth: int
:param shared_key: SharedKey for the vpn connection.
:type shared_key: str
:param enable_bgp: EnableBgp flag.
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this connection.
:type ipsec_policies: list[~azure.mgmt.network.v2020_04_01.models.IpsecPolicy]
:param enable_rate_limiting: EnableBgp flag.
:type enable_rate_limiting: bool
:param enable_internet_security: Enable internet security.
:type enable_internet_security: bool
:param use_local_azure_ip_address: Use local azure ip to initiate connection.
:type use_local_azure_ip_address: bool
:ivar provisioning_state: The provisioning state of the VPN connection resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param vpn_link_connections: List of all vpn site link connections to the gateway.
:type vpn_link_connections: list[~azure.mgmt.network.v2020_04_01.models.VpnSiteLinkConnection]
:param routing_configuration: The Routing Configuration indicating the associated and
propagated route tables on this connection.
:type routing_configuration: ~azure.mgmt.network.v2020_04_01.models.RoutingConfiguration
"""
_validation = {
'etag': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'remote_vpn_site': {'key': 'properties.remoteVpnSite', 'type': 'SubResource'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'dpd_timeout_seconds': {'key': 'properties.dpdTimeoutSeconds', 'type': 'int'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'vpn_connection_protocol_type': {'key': 'properties.vpnConnectionProtocolType', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'connection_bandwidth': {'key': 'properties.connectionBandwidth', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'enable_rate_limiting': {'key': 'properties.enableRateLimiting', 'type': 'bool'},
'enable_internet_security': {'key': 'properties.enableInternetSecurity', 'type': 'bool'},
'use_local_azure_ip_address': {'key': 'properties.useLocalAzureIpAddress', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'vpn_link_connections': {'key': 'properties.vpnLinkConnections', 'type': '[VpnSiteLinkConnection]'},
'routing_configuration': {'key': 'properties.routingConfiguration', 'type': 'RoutingConfiguration'},
}
def __init__(
self,
**kwargs
):
super(VpnConnection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.remote_vpn_site = kwargs.get('remote_vpn_site', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.dpd_timeout_seconds = kwargs.get('dpd_timeout_seconds', None)
self.connection_status = None
self.vpn_connection_protocol_type = kwargs.get('vpn_connection_protocol_type', None)
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.connection_bandwidth = kwargs.get('connection_bandwidth', None)
self.shared_key = kwargs.get('shared_key', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.enable_rate_limiting = kwargs.get('enable_rate_limiting', None)
self.enable_internet_security = kwargs.get('enable_internet_security', None)
self.use_local_azure_ip_address = kwargs.get('use_local_azure_ip_address', None)
self.provisioning_state = None
self.vpn_link_connections = kwargs.get('vpn_link_connections', None)
self.routing_configuration = kwargs.get('routing_configuration', None)
class VpnDeviceScriptParameters(msrest.serialization.Model):
"""Vpn device configuration script generation parameters.
:param vendor: The vendor for the vpn device.
:type vendor: str
:param device_family: The device family for the vpn device.
:type device_family: str
:param firmware_version: The firmware version for the vpn device.
:type firmware_version: str
"""
_attribute_map = {
'vendor': {'key': 'vendor', 'type': 'str'},
'device_family': {'key': 'deviceFamily', 'type': 'str'},
'firmware_version': {'key': 'firmwareVersion', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnDeviceScriptParameters, self).__init__(**kwargs)
self.vendor = kwargs.get('vendor', None)
self.device_family = kwargs.get('device_family', None)
self.firmware_version = kwargs.get('firmware_version', None)
class VpnGateway(Resource):
"""VpnGateway Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param virtual_hub: The VirtualHub to which the gateway belongs.
:type virtual_hub: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param connections: List of all vpn connections to the gateway.
:type connections: list[~azure.mgmt.network.v2020_04_01.models.VpnConnection]
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2020_04_01.models.BgpSettings
:ivar provisioning_state: The provisioning state of the VPN gateway resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param vpn_gateway_scale_unit: The scale unit for this vpn gateway.
:type vpn_gateway_scale_unit: int
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_hub': {'key': 'properties.virtualHub', 'type': 'SubResource'},
'connections': {'key': 'properties.connections', 'type': '[VpnConnection]'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'vpn_gateway_scale_unit': {'key': 'properties.vpnGatewayScaleUnit', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VpnGateway, self).__init__(**kwargs)
self.etag = None
self.virtual_hub = kwargs.get('virtual_hub', None)
self.connections = kwargs.get('connections', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.provisioning_state = None
self.vpn_gateway_scale_unit = kwargs.get('vpn_gateway_scale_unit', None)
class VpnLinkBgpSettings(msrest.serialization.Model):
"""BGP settings details for a link.
:param asn: The BGP speaker's ASN.
:type asn: long
:param bgp_peering_address: The BGP peering address and BGP identifier of this BGP speaker.
:type bgp_peering_address: str
"""
_attribute_map = {
'asn': {'key': 'asn', 'type': 'long'},
'bgp_peering_address': {'key': 'bgpPeeringAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnLinkBgpSettings, self).__init__(**kwargs)
self.asn = kwargs.get('asn', None)
self.bgp_peering_address = kwargs.get('bgp_peering_address', None)
class VpnLinkProviderProperties(msrest.serialization.Model):
"""List of properties of a link provider.
:param link_provider_name: Name of the link provider.
:type link_provider_name: str
:param link_speed_in_mbps: Link speed.
:type link_speed_in_mbps: int
"""
_attribute_map = {
'link_provider_name': {'key': 'linkProviderName', 'type': 'str'},
'link_speed_in_mbps': {'key': 'linkSpeedInMbps', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(VpnLinkProviderProperties, self).__init__(**kwargs)
self.link_provider_name = kwargs.get('link_provider_name', None)
self.link_speed_in_mbps = kwargs.get('link_speed_in_mbps', None)
class VpnPacketCaptureStartParameters(msrest.serialization.Model):
"""Start packet capture parameters on virtual network gateway.
:param filter_data: Start Packet capture parameters.
:type filter_data: str
"""
_attribute_map = {
'filter_data': {'key': 'filterData', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnPacketCaptureStartParameters, self).__init__(**kwargs)
self.filter_data = kwargs.get('filter_data', None)
class VpnPacketCaptureStopParameters(msrest.serialization.Model):
"""Stop packet capture parameters.
:param sas_url: SAS url for packet capture on virtual network gateway.
:type sas_url: str
"""
_attribute_map = {
'sas_url': {'key': 'sasUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnPacketCaptureStopParameters, self).__init__(**kwargs)
self.sas_url = kwargs.get('sas_url', None)
class VpnProfileResponse(msrest.serialization.Model):
"""Vpn Profile Response for package generation.
:param profile_url: URL to the VPN profile.
:type profile_url: str
"""
_attribute_map = {
'profile_url': {'key': 'profileUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnProfileResponse, self).__init__(**kwargs)
self.profile_url = kwargs.get('profile_url', None)
class VpnServerConfigRadiusClientRootCertificate(msrest.serialization.Model):
"""Properties of the Radius client root certificate of VpnServerConfiguration.
:param name: The certificate name.
:type name: str
:param thumbprint: The Radius client root certificate thumbprint.
:type thumbprint: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnServerConfigRadiusClientRootCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.thumbprint = kwargs.get('thumbprint', None)
class VpnServerConfigRadiusServerRootCertificate(msrest.serialization.Model):
"""Properties of Radius Server root certificate of VpnServerConfiguration.
:param name: The certificate name.
:type name: str
:param public_cert_data: The certificate public data.
:type public_cert_data: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'public_cert_data': {'key': 'publicCertData', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnServerConfigRadiusServerRootCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.public_cert_data = kwargs.get('public_cert_data', None)
class VpnServerConfiguration(Resource):
"""VpnServerConfiguration Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param name_properties_name: The name of the VpnServerConfiguration that is unique within a
resource group.
:type name_properties_name: str
:param vpn_protocols: VPN protocols for the VpnServerConfiguration.
:type vpn_protocols: list[str or
~azure.mgmt.network.v2020_04_01.models.VpnGatewayTunnelingProtocol]
:param vpn_authentication_types: VPN authentication types for the VpnServerConfiguration.
:type vpn_authentication_types: list[str or
~azure.mgmt.network.v2020_04_01.models.VpnAuthenticationType]
:param vpn_client_root_certificates: VPN client root certificate of VpnServerConfiguration.
:type vpn_client_root_certificates:
list[~azure.mgmt.network.v2020_04_01.models.VpnServerConfigVpnClientRootCertificate]
:param vpn_client_revoked_certificates: VPN client revoked certificate of
VpnServerConfiguration.
:type vpn_client_revoked_certificates:
list[~azure.mgmt.network.v2020_04_01.models.VpnServerConfigVpnClientRevokedCertificate]
:param radius_server_root_certificates: Radius Server root certificate of
VpnServerConfiguration.
:type radius_server_root_certificates:
list[~azure.mgmt.network.v2020_04_01.models.VpnServerConfigRadiusServerRootCertificate]
:param radius_client_root_certificates: Radius client root certificate of
VpnServerConfiguration.
:type radius_client_root_certificates:
list[~azure.mgmt.network.v2020_04_01.models.VpnServerConfigRadiusClientRootCertificate]
:param vpn_client_ipsec_policies: VpnClientIpsecPolicies for VpnServerConfiguration.
:type vpn_client_ipsec_policies: list[~azure.mgmt.network.v2020_04_01.models.IpsecPolicy]
:param radius_server_address: The radius server address property of the VpnServerConfiguration
resource for point to site client connection.
:type radius_server_address: str
:param radius_server_secret: The radius secret property of the VpnServerConfiguration resource
for point to site client connection.
:type radius_server_secret: str
:param radius_servers: Multiple Radius Server configuration for VpnServerConfiguration.
:type radius_servers: list[~azure.mgmt.network.v2020_04_01.models.RadiusServer]
:param aad_authentication_parameters: The set of aad vpn authentication parameters.
:type aad_authentication_parameters:
~azure.mgmt.network.v2020_04_01.models.AadAuthenticationParameters
:ivar provisioning_state: The provisioning state of the VpnServerConfiguration resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:vartype provisioning_state: str
:ivar p2_s_vpn_gateways: List of references to P2SVpnGateways.
:vartype p2_s_vpn_gateways: list[~azure.mgmt.network.v2020_04_01.models.P2SVpnGateway]
:ivar etag_properties_etag: A unique read-only string that changes whenever the resource is
updated.
:vartype etag_properties_etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
'p2_s_vpn_gateways': {'readonly': True},
'etag_properties_etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'vpn_protocols': {'key': 'properties.vpnProtocols', 'type': '[str]'},
'vpn_authentication_types': {'key': 'properties.vpnAuthenticationTypes', 'type': '[str]'},
'vpn_client_root_certificates': {'key': 'properties.vpnClientRootCertificates', 'type': '[VpnServerConfigVpnClientRootCertificate]'},
'vpn_client_revoked_certificates': {'key': 'properties.vpnClientRevokedCertificates', 'type': '[VpnServerConfigVpnClientRevokedCertificate]'},
'radius_server_root_certificates': {'key': 'properties.radiusServerRootCertificates', 'type': '[VpnServerConfigRadiusServerRootCertificate]'},
'radius_client_root_certificates': {'key': 'properties.radiusClientRootCertificates', 'type': '[VpnServerConfigRadiusClientRootCertificate]'},
'vpn_client_ipsec_policies': {'key': 'properties.vpnClientIpsecPolicies', 'type': '[IpsecPolicy]'},
'radius_server_address': {'key': 'properties.radiusServerAddress', 'type': 'str'},
'radius_server_secret': {'key': 'properties.radiusServerSecret', 'type': 'str'},
'radius_servers': {'key': 'properties.radiusServers', 'type': '[RadiusServer]'},
'aad_authentication_parameters': {'key': 'properties.aadAuthenticationParameters', 'type': 'AadAuthenticationParameters'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'p2_s_vpn_gateways': {'key': 'properties.p2SVpnGateways', 'type': '[P2SVpnGateway]'},
'etag_properties_etag': {'key': 'properties.etag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnServerConfiguration, self).__init__(**kwargs)
self.etag = None
self.name_properties_name = kwargs.get('name_properties_name', None)
self.vpn_protocols = kwargs.get('vpn_protocols', None)
self.vpn_authentication_types = kwargs.get('vpn_authentication_types', None)
self.vpn_client_root_certificates = kwargs.get('vpn_client_root_certificates', None)
self.vpn_client_revoked_certificates = kwargs.get('vpn_client_revoked_certificates', None)
self.radius_server_root_certificates = kwargs.get('radius_server_root_certificates', None)
self.radius_client_root_certificates = kwargs.get('radius_client_root_certificates', None)
self.vpn_client_ipsec_policies = kwargs.get('vpn_client_ipsec_policies', None)
self.radius_server_address = kwargs.get('radius_server_address', None)
self.radius_server_secret = kwargs.get('radius_server_secret', None)
self.radius_servers = kwargs.get('radius_servers', None)
self.aad_authentication_parameters = kwargs.get('aad_authentication_parameters', None)
self.provisioning_state = None
self.p2_s_vpn_gateways = None
self.etag_properties_etag = None
class VpnServerConfigurationsResponse(msrest.serialization.Model):
"""VpnServerConfigurations list associated with VirtualWan Response.
:param vpn_server_configuration_resource_ids: List of VpnServerConfigurations associated with
VirtualWan.
:type vpn_server_configuration_resource_ids: list[str]
"""
_attribute_map = {
'vpn_server_configuration_resource_ids': {'key': 'vpnServerConfigurationResourceIds', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(VpnServerConfigurationsResponse, self).__init__(**kwargs)
self.vpn_server_configuration_resource_ids = kwargs.get('vpn_server_configuration_resource_ids', None)
class VpnServerConfigVpnClientRevokedCertificate(msrest.serialization.Model):
"""Properties of the revoked VPN client certificate of VpnServerConfiguration.
:param name: The certificate name.
:type name: str
:param thumbprint: The revoked VPN client certificate thumbprint.
:type thumbprint: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnServerConfigVpnClientRevokedCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.thumbprint = kwargs.get('thumbprint', None)
class VpnServerConfigVpnClientRootCertificate(msrest.serialization.Model):
"""Properties of VPN client root certificate of VpnServerConfiguration.
:param name: The certificate name.
:type name: str
:param public_cert_data: The certificate public data.
:type public_cert_data: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'public_cert_data': {'key': 'publicCertData', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnServerConfigVpnClientRootCertificate, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.public_cert_data = kwargs.get('public_cert_data', None)
class VpnSite(Resource):
"""VpnSite Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param virtual_wan: The VirtualWAN to which the vpnSite belongs.
:type virtual_wan: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param device_properties: The device properties.
:type device_properties: ~azure.mgmt.network.v2020_04_01.models.DeviceProperties
:param ip_address: The ip-address for the vpn-site.
:type ip_address: str
:param site_key: The key for vpn-site that can be used for connections.
:type site_key: str
:param address_space: The AddressSpace that contains an array of IP address ranges.
:type address_space: ~azure.mgmt.network.v2020_04_01.models.AddressSpace
:param bgp_properties: The set of bgp properties.
:type bgp_properties: ~azure.mgmt.network.v2020_04_01.models.BgpSettings
:ivar provisioning_state: The provisioning state of the VPN site resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:param is_security_site: IsSecuritySite flag.
:type is_security_site: bool
:param vpn_site_links: List of all vpn site links.
:type vpn_site_links: list[~azure.mgmt.network.v2020_04_01.models.VpnSiteLink]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_wan': {'key': 'properties.virtualWan', 'type': 'SubResource'},
'device_properties': {'key': 'properties.deviceProperties', 'type': 'DeviceProperties'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'str'},
'site_key': {'key': 'properties.siteKey', 'type': 'str'},
'address_space': {'key': 'properties.addressSpace', 'type': 'AddressSpace'},
'bgp_properties': {'key': 'properties.bgpProperties', 'type': 'BgpSettings'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'is_security_site': {'key': 'properties.isSecuritySite', 'type': 'bool'},
'vpn_site_links': {'key': 'properties.vpnSiteLinks', 'type': '[VpnSiteLink]'},
}
def __init__(
self,
**kwargs
):
super(VpnSite, self).__init__(**kwargs)
self.etag = None
self.virtual_wan = kwargs.get('virtual_wan', None)
self.device_properties = kwargs.get('device_properties', None)
self.ip_address = kwargs.get('ip_address', None)
self.site_key = kwargs.get('site_key', None)
self.address_space = kwargs.get('address_space', None)
self.bgp_properties = kwargs.get('bgp_properties', None)
self.provisioning_state = None
self.is_security_site = kwargs.get('is_security_site', None)
self.vpn_site_links = kwargs.get('vpn_site_links', None)
class VpnSiteId(msrest.serialization.Model):
"""VpnSite Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar vpn_site: The resource-uri of the vpn-site for which config is to be fetched.
:vartype vpn_site: str
"""
_validation = {
'vpn_site': {'readonly': True},
}
_attribute_map = {
'vpn_site': {'key': 'vpnSite', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnSiteId, self).__init__(**kwargs)
self.vpn_site = None
class VpnSiteLink(SubResource):
"""VpnSiteLink Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar type: Resource type.
:vartype type: str
:param link_properties: The link provider properties.
:type link_properties: ~azure.mgmt.network.v2020_04_01.models.VpnLinkProviderProperties
:param ip_address: The ip-address for the vpn-site-link.
:type ip_address: str
:param fqdn: FQDN of vpn-site-link.
:type fqdn: str
:param bgp_properties: The set of bgp properties.
:type bgp_properties: ~azure.mgmt.network.v2020_04_01.models.VpnLinkBgpSettings
:ivar provisioning_state: The provisioning state of the VPN site link resource. Possible values
include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'link_properties': {'key': 'properties.linkProperties', 'type': 'VpnLinkProviderProperties'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'bgp_properties': {'key': 'properties.bgpProperties', 'type': 'VpnLinkBgpSettings'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnSiteLink, self).__init__(**kwargs)
self.etag = None
self.name = kwargs.get('name', None)
self.type = None
self.link_properties = kwargs.get('link_properties', None)
self.ip_address = kwargs.get('ip_address', None)
self.fqdn = kwargs.get('fqdn', None)
self.bgp_properties = kwargs.get('bgp_properties', None)
self.provisioning_state = None
class VpnSiteLinkConnection(SubResource):
"""VpnSiteLinkConnection Resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:param name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param vpn_site_link: Id of the connected vpn site link.
:type vpn_site_link: ~azure.mgmt.network.v2020_04_01.models.SubResource
:param routing_weight: Routing weight for vpn connection.
:type routing_weight: int
:ivar connection_status: The connection status. Possible values include: "Unknown",
"Connecting", "Connected", "NotConnected".
:vartype connection_status: str or ~azure.mgmt.network.v2020_04_01.models.VpnConnectionStatus
:param vpn_connection_protocol_type: Connection protocol used for this connection. Possible
values include: "IKEv2", "IKEv1".
:type vpn_connection_protocol_type: str or
~azure.mgmt.network.v2020_04_01.models.VirtualNetworkGatewayConnectionProtocol
:ivar ingress_bytes_transferred: Ingress bytes transferred.
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: Egress bytes transferred.
:vartype egress_bytes_transferred: long
:param connection_bandwidth: Expected bandwidth in MBPS.
:type connection_bandwidth: int
:param shared_key: SharedKey for the vpn connection.
:type shared_key: str
:param enable_bgp: EnableBgp flag.
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this connection.
:type ipsec_policies: list[~azure.mgmt.network.v2020_04_01.models.IpsecPolicy]
:param enable_rate_limiting: EnableBgp flag.
:type enable_rate_limiting: bool
:param use_local_azure_ip_address: Use local azure ip to initiate connection.
:type use_local_azure_ip_address: bool
:ivar provisioning_state: The provisioning state of the VPN site link connection resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
"""
_validation = {
'etag': {'readonly': True},
'type': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vpn_site_link': {'key': 'properties.vpnSiteLink', 'type': 'SubResource'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'vpn_connection_protocol_type': {'key': 'properties.vpnConnectionProtocolType', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'connection_bandwidth': {'key': 'properties.connectionBandwidth', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'enable_rate_limiting': {'key': 'properties.enableRateLimiting', 'type': 'bool'},
'use_local_azure_ip_address': {'key': 'properties.useLocalAzureIpAddress', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VpnSiteLinkConnection, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.type = None
self.vpn_site_link = kwargs.get('vpn_site_link', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.connection_status = None
self.vpn_connection_protocol_type = kwargs.get('vpn_connection_protocol_type', None)
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.connection_bandwidth = kwargs.get('connection_bandwidth', None)
self.shared_key = kwargs.get('shared_key', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.enable_rate_limiting = kwargs.get('enable_rate_limiting', None)
self.use_local_azure_ip_address = kwargs.get('use_local_azure_ip_address', None)
self.provisioning_state = None
class WebApplicationFirewallCustomRule(msrest.serialization.Model):
"""Defines contents of a web application rule.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: The name of the resource that is unique within a policy. This name can be used to
access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param priority: Required. Priority of the rule. Rules with a lower value will be evaluated
before rules with a higher value.
:type priority: int
:param rule_type: Required. The rule type. Possible values include: "MatchRule", "Invalid".
:type rule_type: str or ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallRuleType
:param match_conditions: Required. List of match conditions.
:type match_conditions: list[~azure.mgmt.network.v2020_04_01.models.MatchCondition]
:param action: Required. Type of Actions. Possible values include: "Allow", "Block", "Log".
:type action: str or ~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallAction
"""
_validation = {
'name': {'max_length': 128, 'min_length': 0},
'etag': {'readonly': True},
'priority': {'required': True},
'rule_type': {'required': True},
'match_conditions': {'required': True},
'action': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'rule_type': {'key': 'ruleType', 'type': 'str'},
'match_conditions': {'key': 'matchConditions', 'type': '[MatchCondition]'},
'action': {'key': 'action', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebApplicationFirewallCustomRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.etag = None
self.priority = kwargs['priority']
self.rule_type = kwargs['rule_type']
self.match_conditions = kwargs['match_conditions']
self.action = kwargs['action']
class WebApplicationFirewallPolicy(Resource):
"""Defines web application firewall policy.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:param policy_settings: The PolicySettings for policy.
:type policy_settings: ~azure.mgmt.network.v2020_04_01.models.PolicySettings
:param custom_rules: The custom rules inside the policy.
:type custom_rules:
list[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallCustomRule]
:ivar application_gateways: A collection of references to application gateways.
:vartype application_gateways: list[~azure.mgmt.network.v2020_04_01.models.ApplicationGateway]
:ivar provisioning_state: The provisioning state of the web application firewall policy
resource. Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.network.v2020_04_01.models.ProvisioningState
:ivar resource_state: Resource status of the policy. Possible values include: "Creating",
"Enabling", "Enabled", "Disabling", "Disabled", "Deleting".
:vartype resource_state: str or
~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicyResourceState
:param managed_rules: Describes the managedRules structure.
:type managed_rules: ~azure.mgmt.network.v2020_04_01.models.ManagedRulesDefinition
:ivar http_listeners: A collection of references to application gateway http listeners.
:vartype http_listeners: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
:ivar path_based_rules: A collection of references to application gateway path rules.
:vartype path_based_rules: list[~azure.mgmt.network.v2020_04_01.models.SubResource]
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'application_gateways': {'readonly': True},
'provisioning_state': {'readonly': True},
'resource_state': {'readonly': True},
'http_listeners': {'readonly': True},
'path_based_rules': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'policy_settings': {'key': 'properties.policySettings', 'type': 'PolicySettings'},
'custom_rules': {'key': 'properties.customRules', 'type': '[WebApplicationFirewallCustomRule]'},
'application_gateways': {'key': 'properties.applicationGateways', 'type': '[ApplicationGateway]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'managed_rules': {'key': 'properties.managedRules', 'type': 'ManagedRulesDefinition'},
'http_listeners': {'key': 'properties.httpListeners', 'type': '[SubResource]'},
'path_based_rules': {'key': 'properties.pathBasedRules', 'type': '[SubResource]'},
}
def __init__(
self,
**kwargs
):
super(WebApplicationFirewallPolicy, self).__init__(**kwargs)
self.etag = None
self.policy_settings = kwargs.get('policy_settings', None)
self.custom_rules = kwargs.get('custom_rules', None)
self.application_gateways = None
self.provisioning_state = None
self.resource_state = None
self.managed_rules = kwargs.get('managed_rules', None)
self.http_listeners = None
self.path_based_rules = None
class WebApplicationFirewallPolicyListResult(msrest.serialization.Model):
"""Result of the request to list WebApplicationFirewallPolicies. It contains a list of WebApplicationFirewallPolicy objects and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of WebApplicationFirewallPolicies within a resource group.
:vartype value: list[~azure.mgmt.network.v2020_04_01.models.WebApplicationFirewallPolicy]
:ivar next_link: URL to get the next set of WebApplicationFirewallPolicy objects if there are
any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[WebApplicationFirewallPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(WebApplicationFirewallPolicyListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None<|fim▁end|>
|
self.destination_ports = kwargs.get('destination_ports', None)
self.protocols = kwargs.get('protocols', None)
|
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>"""
Django settings for MapaAsentamientosTecho project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l4eg9uqh!s0w&6@2t+xdedd-7m=$1z13s7ylzi_mc^-w2m@jsk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # Django API REST FRAMEWORK
'crispy_forms', #Formularios mas bonitos
'main',
'constance',
'constance.backends.database',
'import_export',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MapaAsentamientosTecho.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'constance.context_processors.config',
],
},
},
]
WSGI_APPLICATION = 'MapaAsentamientosTecho.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
<|fim▁hole|># Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
}
CONSTANCE_CONFIG = {
'dominio': ('localhost:8000', 'Dominio de la plataforma' , str),
'Analytics': ('Analytics ID', 'Google Analytics' , str),
}
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'<|fim▁end|>
| |
<|file_name|>imagenator.ts<|end_file_name|><|fim▁begin|>//original code is from http://www.html5canvastutorials.com/tutorials/html5-canvas-wrap-text-tutorial/
class Imagenator {
static convertText(text: string = "표 표 고 으 로 리\r\n로 온 랑 문 로 스\r\n 점 자 죽 만 트\r\n치 과 마 더 간 스 들 도\r\n환 침 배 크 고\r\n! 꺾 표 따 경 롤 세\r\n 음 를 옴 에 하 옆 로") {
const canvas = document.createElement('canvas');
canvas.width = 578;
canvas.height = 120;
const maxWidth = 440;
const lineHeight = 28;
const context = canvas.getContext('2d');
context.font = '22px/28px Georgia, "Times New Roman", serif';
canvas.height += this._measureHeight(context, text, maxWidth, lineHeight);
const x = (canvas.width - maxWidth) / 2;
const y = 60;
//context = canvas.getContext('2d');
context.font = '22px/28px Georgia, "Times New Roman", serif';
context.fillStyle = "#FFF";
context.fillRect(0, 0, canvas.width, canvas.height);
context.fillStyle = '#000';
this._writeText(context, text, x, y, maxWidth, lineHeight);
return canvas.toDataURL('image/png');
}<|fim▁hole|> private static _writeText(context: CanvasRenderingContext2D, text: string, x: number, y: number, maxWidth: number, lineHeight: number) {
const words = text.split('');
let line = '';
for (var n = 0; n < words.length; n++) {
if (words[n] == '\n') {
y += lineHeight;
context.fillText(line, x, y);
line = '';
continue;
}
const testLine = line + words[n] + ' ';
const metrics = context.measureText(testLine);
const testWidth = metrics.width;
if (testWidth > maxWidth && n > 0) {
y += lineHeight;
context.fillText(line, x, y);
line = words[n] + ' ';
}
else {
line = testLine;
}
}
if (line.length > 0) {
y += lineHeight;
context.fillText(line, x, y);
}
}
private static _measureHeight(context: CanvasRenderingContext2D, text: string, maxWidth: number, lineHeight: number) {
const words = text.split('');
let line = '';
let height = 0;
for (var n = 0; n < words.length; n++) {
if (words[n] == '\n') {
line = '';
height += lineHeight;
continue;
}
const testLine = line + words[n] + ' ';
const metrics = context.measureText(testLine);
const testWidth = metrics.width;
if (testWidth > maxWidth && n > 0) {
line = words[n] + ' ';
height += lineHeight;
}
else {
line = testLine;
}
}
if (line.length > 0) {
height += lineHeight;
}
return height;
}
}<|fim▁end|>
| |
<|file_name|>getElementsByCache.js<|end_file_name|><|fim▁begin|><|fim▁hole|>// 如果没有缓存的话建立缓存,否则返回缓存中的标签对象
function getElements(name) {
if (!getElements.cache) {
getElements.cache = {};
}
return getElements.cache[name] = getElements.cache[name] || document.getElementsByTagName(name);
}<|fim▁end|>
|
// 使用缓存优化标签选择
|
<|file_name|>print_with_newline.rs<|end_file_name|><|fim▁begin|>// FIXME: Ideally these suggestions would be fixed via rustfix. Blocked by rust-lang/rust#53934
// // run-rustfix
#![allow(clippy::print_literal)]
#![warn(clippy::print_with_newline)]
fn main() {
print!("Hello\n");
print!("Hello {}\n", "world");
print!("Hello {} {}\n", "world", "#2");
print!("{}\n", 1265);
print!("\n");
// these are all fine
print!("");
print!("Hello");
println!("Hello");
println!("Hello\n");
println!("Hello {}\n", "world");
print!("Issue\n{}", 1265);
print!("{}", 1265);
print!("\n{}", 1275);
print!("\n\n");
print!("like eof\n\n");
print!("Hello {} {}\n\n", "world", "#2");
println!("\ndon't\nwarn\nfor\nmultiple\nnewlines\n"); // #3126
println!("\nbla\n\n"); // #3126
// Escaping
print!("\\n"); // #3514
print!("\\\n"); // should fail
print!("\\\\n");
// Raw strings
print!(r"\n"); // #3778
// Literal newlines should also fail
print!(
"<|fim▁hole|>"
);
// Don't warn on CRLF (#4208)
print!("\r\n");
print!("foo\r\n");
print!("\\r\n"); //~ ERROR
print!("foo\rbar\n") // ~ ERROR
}<|fim▁end|>
|
"
);
print!(
r"
|
<|file_name|>dynamic_shaders.rs<|end_file_name|><|fim▁begin|>use std::time::Instant;<|fim▁hole|><|fim▁end|>
|
use super::{ShaderSetInner, ShaderSource};
|
<|file_name|>hidden-rt-injection.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.<|fim▁hole|> // The rt has been called both 'native' and 'rt'
use native; //~ ERROR unresolved import
}
fn main() { }<|fim▁end|>
|
// This is testing that users can't access the runtime crate.
mod m {
|
<|file_name|>WebConfig.java<|end_file_name|><|fim▁begin|>package com.pcalouche.spat.config;
import com.pcalouche.spat.interceptors.LoggerInterceptor;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.CorsRegistry;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
@Configuration<|fim▁hole|>
public WebConfig(SpatProperties spatProperties,
LoggerInterceptor loggerInterceptor) {
this.spatProperties = spatProperties;
this.loggerInterceptor = loggerInterceptor;
}
@Override
public void addInterceptors(InterceptorRegistry registry) {
registry.addInterceptor(loggerInterceptor);
}
@Override
public void addCorsMappings(CorsRegistry registry) {
registry.addMapping("/**")
.allowedOriginPatterns(spatProperties.getCorsAllowedOrigins())
.allowCredentials(true)
.allowedHeaders("*")
.allowedMethods(
"OPTIONS",
"GET",
"POST",
"PUT",
"PATCH",
"DELETE"
);
}
}<|fim▁end|>
|
public class WebConfig implements WebMvcConfigurer {
private final SpatProperties spatProperties;
private final LoggerInterceptor loggerInterceptor;
|
<|file_name|>keycodes.py<|end_file_name|><|fim▁begin|>import pygame
# EXPORT
KeyCodes = {
"BACKSPACE": pygame.K_BACKSPACE,
"TAB": pygame.K_TAB,
"CLEAR": pygame.K_CLEAR,
"RETURN": pygame.K_RETURN,
"PAUSE": pygame.K_PAUSE,
"ESCAPE": pygame.K_ESCAPE,
"SPACE": pygame.K_SPACE,
"EXCLAIM": pygame.K_EXCLAIM,
"QUOTEDBL": pygame.K_QUOTEDBL,
"HASH": pygame.K_HASH,
"DOLLAR": pygame.K_DOLLAR,
"AMPERSAND": pygame.K_AMPERSAND,
"QUOTE": pygame.K_QUOTE,
"LEFTPAREN": pygame.K_LEFTPAREN,
"RIGHTPAREN": pygame.K_RIGHTPAREN,
"ASTERISK": pygame.K_ASTERISK,
"PLUS": pygame.K_PLUS,
"COMMA": pygame.K_COMMA,
"MINUS": pygame.K_MINUS,
"PERIOD": pygame.K_PERIOD,
"SLASH": pygame.K_SLASH,
"0": pygame.K_0,
"1": pygame.K_1,
"2": pygame.K_2,
"3": pygame.K_3,
"4": pygame.K_4,
"5": pygame.K_5,
"6": pygame.K_6,
"7": pygame.K_7,
"8": pygame.K_8,
"9": pygame.K_9,
"COLON": pygame.K_COLON,
"SEMICOLON": pygame.K_SEMICOLON,
"LESS": pygame.K_LESS,
"EQUALS": pygame.K_EQUALS,
"GREATER": pygame.K_GREATER,
"QUESTION": pygame.K_QUESTION,
"AT": pygame.K_AT,
"LEFTBRACKET": pygame.K_LEFTBRACKET,
"BACKSLASH": pygame.K_BACKSLASH,
"RIGHTBRACKET": pygame.K_RIGHTBRACKET,
"CARET": pygame.K_CARET,
"UNDERSCORE": pygame.K_UNDERSCORE,
"BACKQUOTE": pygame.K_BACKQUOTE,
"a": pygame.K_a,
"b": pygame.K_b,
"c": pygame.K_c,
"d": pygame.K_d,
"e": pygame.K_e,
"f": pygame.K_f,
"g": pygame.K_g,
"h": pygame.K_h,
"i": pygame.K_i,
"j": pygame.K_j,
"k": pygame.K_k,
"l": pygame.K_l,
"m": pygame.K_m,
"n": pygame.K_n,
"o": pygame.K_o,
"p": pygame.K_p,
"q": pygame.K_q,
"r": pygame.K_r,
"s": pygame.K_s,
"t": pygame.K_t,<|fim▁hole|> "y": pygame.K_y,
"z": pygame.K_z,
"DELETE": pygame.K_DELETE,
"KP0": pygame.K_KP0,
"KP1": pygame.K_KP1,
"KP2": pygame.K_KP2,
"KP3": pygame.K_KP3,
"KP4": pygame.K_KP4,
"KP5": pygame.K_KP5,
"KP6": pygame.K_KP6,
"KP7": pygame.K_KP7,
"KP8": pygame.K_KP8,
"KP9": pygame.K_KP9,
"KP_PERIOD": pygame.K_KP_PERIOD,
"KP_DIVIDE": pygame.K_KP_DIVIDE,
"KP_MULTIPLY": pygame.K_KP_MULTIPLY,
"KP_MINUS": pygame.K_KP_MINUS,
"KP_PLUS": pygame.K_KP_PLUS,
"KP_ENTER": pygame.K_KP_ENTER,
"KP_EQUALS": pygame.K_KP_EQUALS,
"UP": pygame.K_UP,
"DOWN": pygame.K_DOWN,
"RIGHT": pygame.K_RIGHT,
"LEFT": pygame.K_LEFT,
"INSERT": pygame.K_INSERT,
"HOME": pygame.K_HOME,
"END": pygame.K_END,
"PAGEUP": pygame.K_PAGEUP,
"PAGEDOWN": pygame.K_PAGEDOWN,
"F1": pygame.K_F1,
"F2": pygame.K_F2,
"F3": pygame.K_F3,
"F4": pygame.K_F4,
"F5": pygame.K_F5,
"F6": pygame.K_F6,
"F7": pygame.K_F7,
"F8": pygame.K_F8,
"F9": pygame.K_F9,
"F10": pygame.K_F10,
"F11": pygame.K_F11,
"F12": pygame.K_F12,
"F13": pygame.K_F13,
"F14": pygame.K_F14,
"F15": pygame.K_F15,
"NUMLOCK": pygame.K_NUMLOCK,
"CAPSLOCK": pygame.K_CAPSLOCK,
"SCROLLOCK": pygame.K_SCROLLOCK,
"RSHIFT": pygame.K_RSHIFT,
"LSHIFT": pygame.K_LSHIFT,
"RCTRL": pygame.K_RCTRL,
"LCTRL": pygame.K_LCTRL,
"RALT": pygame.K_RALT,
"LALT": pygame.K_LALT,
"RMETA": pygame.K_RMETA,
"LMETA": pygame.K_LMETA,
"LSUPER": pygame.K_LSUPER,
"RSUPER": pygame.K_RSUPER,
"MODE": pygame.K_MODE,
"HELP": pygame.K_HELP,
"PRINT": pygame.K_PRINT,
"SYSREQ": pygame.K_SYSREQ,
"BREAK": pygame.K_BREAK,
"MENU": pygame.K_MENU,
"POWER": pygame.K_POWER,
"EURO": pygame.K_EURO,
}<|fim▁end|>
|
"u": pygame.K_u,
"v": pygame.K_v,
"w": pygame.K_w,
"x": pygame.K_x,
|
<|file_name|>aiohttp.py<|end_file_name|><|fim▁begin|>"""Utilities to help with aiohttp."""
import json
from typing import Any, Dict, Optional
from urllib.parse import parse_qsl
from multidict import CIMultiDict, MultiDict
class MockRequest:
"""Mock an aiohttp request."""
def __init__(
self,
content: bytes,
method: str = "GET",
status: int = 200,
headers: Optional[Dict[str, str]] = None,
query_string: Optional[str] = None,
url: str = "",
) -> None:
"""Initialize a request."""
self.method = method
self.url = url
self.status = status
self.headers: CIMultiDict[str] = CIMultiDict(headers or {})
self.query_string = query_string or ""
self._content = content
@property
def query(self) -> "MultiDict[str]":
"""Return a dictionary with the query variables."""
return MultiDict(parse_qsl(self.query_string, keep_blank_values=True))
@property
def _text(self) -> str:
"""Return the body as text."""
return self._content.decode("utf-8")
async def json(self) -> Any:
"""Return the body as JSON."""
return json.loads(self._text)
async def post(self) -> "MultiDict[str]":
"""Return POST parameters."""<|fim▁hole|> return MultiDict(parse_qsl(self._text, keep_blank_values=True))
async def text(self) -> str:
"""Return the body as text."""
return self._text<|fim▁end|>
| |
<|file_name|>AgentPath.java<|end_file_name|><|fim▁begin|>/**
* This file is part of the CRISTAL-iSE kernel.
* Copyright (c) 2001-2015 The CRISTAL Consortium. All rights reserved.
<|fim▁hole|> * This library is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; with out even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* http://www.fsf.org/licensing/licenses/lgpl.html
*/
package org.cristalise.kernel.lookup;
import java.util.List;
import java.util.UUID;
import org.cristalise.kernel.common.ObjectNotFoundException;
import org.cristalise.kernel.common.SystemKey;
import org.cristalise.kernel.persistency.ClusterType;
import org.cristalise.kernel.process.Gateway;
/**
* Extends ItemPath with Agent specific codes
**/
public class AgentPath extends ItemPath {
private String mAgentName = null;
private boolean mPasswordTemporary = false;
public AgentPath() {
super();
}
public AgentPath(UUID uuid, String ior, String agentName) {
super(uuid, ior);
mAgentName = agentName;
}
public AgentPath(UUID uuid, String ior, String agentName, boolean isPwdTemporary) {
super(uuid, ior);
mAgentName = agentName;
mPasswordTemporary = isPwdTemporary;
}
public AgentPath(UUID uuid) throws InvalidAgentPathException {
super(uuid);
//This is commented so a AgentPath can be constructed without setting up Lookup
//if (getAgentName() == null) throw new InvalidAgentPathException();
}
public AgentPath(SystemKey syskey) throws InvalidAgentPathException {
this(new UUID(syskey.msb, syskey.lsb));
}
public AgentPath(ItemPath itemPath) throws InvalidAgentPathException {
this(itemPath.getUUID());
}
public AgentPath(String path) throws InvalidItemPathException {
//remove the '/entity/' string from the beginning if exists
this(UUID.fromString(path.substring( (path.lastIndexOf("/") == -1 ? 0 : path.lastIndexOf("/")+1) )));
}
public AgentPath(ItemPath itemPath, String agentName) {
super(itemPath.getUUID());
mAgentName = agentName;
}
public AgentPath(UUID uuid, String agentName) {
super(uuid);
mAgentName = agentName;
}
public void setAgentName(String agentID) {
mAgentName = agentID;
}
public String getAgentName() {
if (mAgentName == null) {
try {
mAgentName = Gateway.getLookup().getAgentName(this);
}
catch (ObjectNotFoundException e) {
return null;
}
}
return mAgentName;
}
public RolePath[] getRoles() {
return Gateway.getLookup().getRoles(this);
}
public RolePath getFirstMatchingRole(List<RolePath> roles) {
for (RolePath role : roles) {
if (Gateway.getLookup().hasRole(this, role)) return role;
}
return null;
}
public boolean hasRole(RolePath role) {
return Gateway.getLookup().hasRole(this, role);
}
public boolean hasRole(String role) {
try {
return hasRole(Gateway.getLookup().getRolePath(role));
}
catch (ObjectNotFoundException ex) {
return false;
}
}
@Override
public String getClusterPath() {
return ClusterType.PATH + "/Agent";
}
@Override
public String dump() {
return super.dump() + "\n agentID=" + mAgentName;
}
public boolean isPasswordTemporary() {
return mPasswordTemporary;
}
}<|fim▁end|>
|
*
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.<|fim▁hole|>#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The OpenStack Neat Project
==========================
OpenStack Neat is a project intended to provide an extension to
OpenStack implementing dynamic consolidation of Virtual Machines (VMs)
using live migration. The major objective of dynamic VM consolidation
is to improve the utilization of physical resources and reduce energy
consumption by re-allocating VMs using live migration according to
their real-time resource demand and switching idle hosts to the sleep
mode. Apart from consolidating VMs, the system should be able to react
to increases in the resource demand and deconsolidate VMs when
necessary to avoid performance degradation. In general, the problem of
dynamic VM consolidation includes 4 sub-problems: host underload /
overload detection, VM selection, and VM placement.
This work is conducted within the Cloud Computing and Distributed
Systems (CLOUDS) Laboratory (http://www.cloudbus.org/) at the
University of Melbourne. The problem of dynamic VM consolidation
considering Quality of Service (QoS) constraints has been studied from
the theoretical perspective and algorithms addressing the sub-problems
listed above have been proposed [1], [2]. The algorithms have been
evaluated using CloudSim (http://code.google.com/p/cloudsim/) and
real-world workload traces collected from more than a thousand
PlanetLab VMs hosted on servers located in more than 500 places around
the world.
The aim of the OpenStack Neat project is to provide an extensible
framework for dynamic consolidation of VMs based on the OpenStack
platform. The framework should provide an infrastructure enabling the
interaction of components implementing the decision-making algorithms.
The framework should allow configuration-driven switching of different
implementations of the decision-making algorithms. The implementation
of the framework will include the algorithms proposed in our previous
works [1], [2].
[1] Anton Beloglazov and Rajkumar Buyya, "Optimal Online Deterministic
Algorithms and Adaptive Heuristics for Energy and Performance
Efficient Dynamic Consolidation of Virtual Machines in Cloud Data
Centers", Concurrency and Computation: Practice and Experience (CCPE),
Volume 24, Issue 13, Pages: 1397-1420, John Wiley & Sons, Ltd, New
York, USA, 2012. Download:
http://beloglazov.info/papers/2012-optimal-algorithms-ccpe.pdf
[2] Anton Beloglazov and Rajkumar Buyya, "Managing Overloaded Hosts
for Dynamic Consolidation of Virtual Machines in Cloud Data Centers
Under Quality of Service Constraints", IEEE Transactions on Parallel
and Distributed Systems (TPDS), IEEE CS Press, USA, 2012 (in press,
accepted on August 2, 2012). Download:
http://beloglazov.info/papers/2012-host-overload-detection-tpds.pdf
"""
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='openstack-neat',
version='0.1',
description='The OpenStack Neat Project',
long_description=__doc__,
author='Anton Beloglazov',
author_email='[email protected]',
url='https://github.com/beloglazov/openstack-neat',
platforms='any',
include_package_data=True,
license='LICENSE',
packages=find_packages(),
test_suite='tests',
tests_require=['pyqcy', 'mocktest', 'PyContracts'],
entry_points = {
'console_scripts': [
'neat-data-collector = neat.locals.collector:start',
'neat-local-manager = neat.locals.manager:start',
'neat-global-manager = neat.globals.manager:start',
'neat-db-cleaner = neat.globals.db_cleaner:start',
]
},
data_files = [('/etc/init.d', ['init.d/openstack-neat-data-collector',
'init.d/openstack-neat-local-manager',
'init.d/openstack-neat-global-manager',
'init.d/openstack-neat-db-cleaner']),
('/etc/neat', ['neat.conf'])],
)<|fim▁end|>
|
# You may obtain a copy of the License at
|
<|file_name|>cloudpipe.py<|end_file_name|><|fim▁begin|># Copyright 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connect your vlan to the world."""
import os
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.api.openstack import extensions
from nova.auth import manager
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import vm_states
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.api.openstack.compute.contrib.cloudpipe")
authorize = extensions.extension_authorizer('compute', 'cloudpipe')
class CloudpipeTemplate(xmlutil.TemplateBuilder):
def construct(self):
return xmlutil.MasterTemplate(xmlutil.make_flat_dict('cloudpipe'), 1)
class CloudpipesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cloudpipes')
elem = xmlutil.make_flat_dict('cloudpipe', selector='cloudpipes',
subselector='cloudpipe')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class CloudpipeController(object):
"""Handle creating and listing cloudpipe instances."""
def __init__(self):
self.compute_api = compute.API()
self.auth_manager = manager.AuthManager()
self.cloudpipe = pipelib.CloudPipe()
self.setup()
def setup(self):
"""Ensure the keychains and folders exist."""
# NOTE(vish): One of the drawbacks of doing this in the api is
# the keys will only be on the api node that launched
# the cloudpipe.
if not os.path.exists(FLAGS.keys_path):
os.makedirs(FLAGS.keys_path)
def _get_cloudpipe_for_project(self, context, project_id):
"""Get the cloudpipe instance for a project ID."""
# NOTE(todd): this should probably change to compute_api.get_all
# or db.instance_get_project_vpn
for instance in db.instance_get_all_by_project(context, project_id):
if (instance['image_id'] == str(FLAGS.vpn_image_id)
and instance['vm_state'] != vm_states.DELETED):
return instance
def _vpn_dict(self, project, vpn_instance):
rv = {'project_id': project.id,
'public_ip': project.vpn_ip,
'public_port': project.vpn_port}
if vpn_instance:
rv['instance_id'] = vpn_instance['uuid']
rv['created_at'] = utils.isotime(vpn_instance['created_at'])
address = vpn_instance.get('fixed_ip', None)
if address:
rv['internal_ip'] = address['address']
if project.vpn_ip and project.vpn_port:
if utils.vpn_ping(project.vpn_ip, project.vpn_port):
rv['state'] = 'running'
else:
rv['state'] = 'down'
else:
rv['state'] = 'invalid'
else:
rv['state'] = 'pending'
return rv
@wsgi.serializers(xml=CloudpipeTemplate)
def create(self, req, body):
"""Create a new cloudpipe instance, if none exists.
Parameters: {cloudpipe: {project_id: XYZ}}
"""
ctxt = req.environ['nova.context']
authorize(ctxt)
params = body.get('cloudpipe', {})
project_id = params.get('project_id', ctxt.project_id)
instance = self._get_cloudpipe_for_project(ctxt, project_id)
if not instance:
proj = self.auth_manager.get_project(project_id)
user_id = proj.project_manager_id
try:
self.cloudpipe.launch_vpn_instance(project_id, user_id)<|fim▁hole|> instance = self._get_cloudpipe_for_project(ctxt, proj)
return {'instance_id': instance['uuid']}
@wsgi.serializers(xml=CloudpipesTemplate)
def index(self, req):
"""List running cloudpipe instances."""
context = req.environ['nova.context']
authorize(context)
vpns = []
# TODO(todd): could use compute_api.get_all with admin context?
for project in self.auth_manager.get_projects():
instance = self._get_cloudpipe_for_project(context, project.id)
vpns.append(self._vpn_dict(project, instance))
return {'cloudpipes': vpns}
class Cloudpipe(extensions.ExtensionDescriptor):
"""Adds actions to create cloudpipe instances.
When running with the Vlan network mode, you need a mechanism to route
from the public Internet to your vlans. This mechanism is known as a
cloudpipe.
At the time of creating this class, only OpenVPN is supported. Support for
a SSH Bastion host is forthcoming.
"""
name = "Cloudpipe"
alias = "os-cloudpipe"
namespace = "http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1"
updated = "2011-12-16T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-cloudpipe',
CloudpipeController())
resources.append(res)
return resources<|fim▁end|>
|
except db.NoMoreNetworks:
msg = _("Unable to claim IP for VPN instances, ensure it "
"isn't running, and try again in a few minutes")
raise exception.ApiError(msg)
|
<|file_name|>core_dependencies.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This tests correct handling of dependencies, specifically, on generated
# sources, and from generated sources.
import BoostBuild<|fim▁hole|>import string
t = BoostBuild.Tester(pass_toolset=0)
t.write("core-dependency-helpers", """
rule hdrrule
{
INCLUDES $(1) : $(2) ;
}
actions copy
{
cp $(>) $(<)
}
""")
code = """include core-dependency-helpers ;
DEPENDS all : a ;
DEPENDS a : b ;
actions create-b
{
echo '#include <foo.h>' > $(<)
}
copy a : b ;
create-b b ;
HDRRULE on b foo.h bar.h = hdrrule ;
HDRSCAN on b foo.h bar.h = \"#include <(.*)>\" ;
"""
# This creates 'a' which depends on 'b', which is generated. The generated 'b'
# contains '#include <foo.h>' and no rules for foo.h are given. The system
# should error out on the first invocation.
t.run_build_system("-f-", stdin=code)
t.fail_test(string.find(t.stdout(), "...skipped a for lack of foo.h...") == -1)
t.rm('b')
# Now test that if target 'c' also depends on 'b', then it will not be built, as
# well.
t.run_build_system("-f-", stdin=code + " copy c : b ; DEPENDS c : b ; DEPENDS all : c ; ")
t.fail_test(string.find(t.stdout(), "...skipped c for lack of foo.h...") == -1)
t.rm('b')
# Now add a rule for creating foo.h.
code += """
actions create-foo
{
echo // > $(<)
}
create-foo foo.h ;
"""
t.run_build_system("-f-", stdin=code)
# Run two times, adding explicit dependency from all to foo.h at the beginning
# and at the end, to make sure that foo.h is generated before 'a' in all cases.
def mk_correct_order_func(s1, s2):
def correct_order(s):
n1 = string.find(s, s1)
n2 = string.find(s, s2)
return ( n1 != -1 ) and ( n2 != -1 ) and ( n1 < n2 )
return correct_order
correct_order = mk_correct_order_func("create-foo", "copy a")
t.rm(["a", "b", "foo.h"])
t.run_build_system("-d+2 -f-", stdin=code + " DEPENDS all : foo.h ;")
t.fail_test(not correct_order(t.stdout()))
t.rm(["a", "b", "foo.h"])
t.run_build_system("-d+2 -f-", stdin=" DEPENDS all : foo.h ; " + code)
t.fail_test(not correct_order(t.stdout()))
# Now foo.h exists. Test include from b -> foo.h -> bar.h -> biz.h. b and foo.h
# already have updating actions.
t.rm(["a", "b"])
t.write("foo.h", "#include <bar.h>")
t.write("bar.h", "#include <biz.h>")
t.run_build_system("-d+2 -f-", stdin=code)
t.fail_test(string.find(t.stdout(), "...skipped a for lack of biz.h...") == -1)
# Add an action for biz.h.
code += """
actions create-biz
{
echo // > $(<)
}
create-biz biz.h ;
"""
t.rm(["b"])
correct_order = mk_correct_order_func("create-biz", "copy a")
t.run_build_system("-d+2 -f-", stdin=code + " DEPENDS all : biz.h ;")
t.fail_test(not correct_order(t.stdout()))
t.rm(["a", "biz.h"])
t.run_build_system("-d+2 -f-", stdin=" DEPENDS all : biz.h ; " + code)
t.fail_test(not correct_order(t.stdout()))
t.write("a", "")
code="""
DEPENDS all : main d ;
actions copy
{
cp $(>) $(<) ;
}
DEPENDS main : a ;
copy main : a ;
INCLUDES a : <1>c ;
NOCARE <1>c ;
SEARCH on <1>c = . ;
actions create-c
{
echo d > $(<)
}
actions create-d
{
echo // > $(<)
}
create-c <2>c ;
LOCATE on <2>c = . ;
create-d d ;
HDRSCAN on <1>c = (.*) ;
HDRRULE on <1>c = hdrrule ;
rule hdrrule
{
INCLUDES $(1) : d ;
}
"""
correct_order = mk_correct_order_func("create-d", "copy main")
t.run_build_system("-d2 -f-", stdin=code)
t.fail_test(not correct_order(t.stdout()))
t.cleanup()<|fim▁end|>
| |
<|file_name|>bools.py<|end_file_name|><|fim▁begin|>"""
Some utilities for bools manipulation.
<|fim▁hole|>Copyright 2013 Deepak Subburam
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
"""
def fields_test(dictionary, conditions):
"""
Return +1 if all conditions are satisfied; 0 if at least one (but not all)
conditions are satisfied; and -1 no conditions are satisfied.
conditions:
dictionary with keys corresponding to keys in dictionary, and values which are
tuples of the form (+2|+1|0|-1|-2|None, val).
+2 meaning dictionary[key] > val,
+1 meaning dictionary[key] >= val,
0 meaning dictionary[key] == val,
-1 meaning dictionary[key] <= val,
-2 meaning dictionary[key] < val,
None meaning dictionary[key] != val.
"""
count = 0
net = 0
for key, cond_value in list(conditions.items()):
count += 1
cond, value = cond_value
field_value = dictionary[key]
if cond == 1:
result = field_value >= value
elif cond == -1:
result = field_value <= value
elif cond == 0:
result = field_value == value
elif cond == 2 :
result = field_value > value
elif cond == -2:
result = field_value < value
elif cond == 0:
result = field_value != value
else: raise AssertionError('Bad condition ' + str(cond))
net += result
if net == count: return 1
elif net > 0: return 0
else: return -1<|fim▁end|>
| |
<|file_name|>xgen_usb_otg_device.go<|end_file_name|><|fim▁begin|>package usb
// DO NOT EDIT THIS FILE. GENERATED BY xgen.
import (
"bits"
"mmio"
"unsafe"
"stm32/o/f411xe/mmap"
)
type USB_OTG_Device_Periph struct {
DCFG RDCFG
DCTL RDCTL
DSTS RDSTS
_ uint32
DIEPMSK RDIEPMSK
DOEPMSK RDOEPMSK
DAINT RDAINT
DAINTMSK RDAINTMSK
_ [2]uint32
DVBUSDIS RDVBUSDIS
DVBUSPULSE RDVBUSPULSE
DTHRCTL RDTHRCTL
DIEPEMPMSK RDIEPEMPMSK
DEACHINT RDEACHINT
DEACHMSK RDEACHMSK
_ uint32
DINEP1MSK RDINEP1MSK
_ [15]uint32
DOUTEP1MSK RDOUTEP1MSK
}
func (p *USB_OTG_Device_Periph) BaseAddr() uintptr {
return uintptr(unsafe.Pointer(p))
}
type DCFG uint32
func (b DCFG) Field(mask DCFG) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DCFG) J(v int) DCFG {
return DCFG(bits.MakeField32(v, uint32(mask)))
}
type RDCFG struct{ mmio.U32 }
<|fim▁hole|>func (r *RDCFG) Bits(mask DCFG) DCFG { return DCFG(r.U32.Bits(uint32(mask))) }
func (r *RDCFG) StoreBits(mask, b DCFG) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDCFG) SetBits(mask DCFG) { r.U32.SetBits(uint32(mask)) }
func (r *RDCFG) ClearBits(mask DCFG) { r.U32.ClearBits(uint32(mask)) }
func (r *RDCFG) Load() DCFG { return DCFG(r.U32.Load()) }
func (r *RDCFG) Store(b DCFG) { r.U32.Store(uint32(b)) }
func (r *RDCFG) AtomicStoreBits(mask, b DCFG) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDCFG) AtomicSetBits(mask DCFG) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDCFG) AtomicClearBits(mask DCFG) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDCFG struct{ mmio.UM32 }
func (rm RMDCFG) Load() DCFG { return DCFG(rm.UM32.Load()) }
func (rm RMDCFG) Store(b DCFG) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) DSPD() RMDCFG {
return RMDCFG{mmio.UM32{&p.DCFG.U32, uint32(DSPD)}}
}
func (p *USB_OTG_Device_Periph) NZLSOHSK() RMDCFG {
return RMDCFG{mmio.UM32{&p.DCFG.U32, uint32(NZLSOHSK)}}
}
func (p *USB_OTG_Device_Periph) DAD() RMDCFG {
return RMDCFG{mmio.UM32{&p.DCFG.U32, uint32(DAD)}}
}
func (p *USB_OTG_Device_Periph) PFIVL() RMDCFG {
return RMDCFG{mmio.UM32{&p.DCFG.U32, uint32(PFIVL)}}
}
func (p *USB_OTG_Device_Periph) PERSCHIVL() RMDCFG {
return RMDCFG{mmio.UM32{&p.DCFG.U32, uint32(PERSCHIVL)}}
}
type DCTL uint32
func (b DCTL) Field(mask DCTL) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DCTL) J(v int) DCTL {
return DCTL(bits.MakeField32(v, uint32(mask)))
}
type RDCTL struct{ mmio.U32 }
func (r *RDCTL) Bits(mask DCTL) DCTL { return DCTL(r.U32.Bits(uint32(mask))) }
func (r *RDCTL) StoreBits(mask, b DCTL) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDCTL) SetBits(mask DCTL) { r.U32.SetBits(uint32(mask)) }
func (r *RDCTL) ClearBits(mask DCTL) { r.U32.ClearBits(uint32(mask)) }
func (r *RDCTL) Load() DCTL { return DCTL(r.U32.Load()) }
func (r *RDCTL) Store(b DCTL) { r.U32.Store(uint32(b)) }
func (r *RDCTL) AtomicStoreBits(mask, b DCTL) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDCTL) AtomicSetBits(mask DCTL) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDCTL) AtomicClearBits(mask DCTL) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDCTL struct{ mmio.UM32 }
func (rm RMDCTL) Load() DCTL { return DCTL(rm.UM32.Load()) }
func (rm RMDCTL) Store(b DCTL) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) RWUSIG() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(RWUSIG)}}
}
func (p *USB_OTG_Device_Periph) SDIS() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(SDIS)}}
}
func (p *USB_OTG_Device_Periph) GINSTS() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(GINSTS)}}
}
func (p *USB_OTG_Device_Periph) GONSTS() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(GONSTS)}}
}
func (p *USB_OTG_Device_Periph) TCTL() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(TCTL)}}
}
func (p *USB_OTG_Device_Periph) SGINAK() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(SGINAK)}}
}
func (p *USB_OTG_Device_Periph) CGINAK() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(CGINAK)}}
}
func (p *USB_OTG_Device_Periph) SGONAK() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(SGONAK)}}
}
func (p *USB_OTG_Device_Periph) CGONAK() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(CGONAK)}}
}
func (p *USB_OTG_Device_Periph) POPRGDNE() RMDCTL {
return RMDCTL{mmio.UM32{&p.DCTL.U32, uint32(POPRGDNE)}}
}
type DSTS uint32
func (b DSTS) Field(mask DSTS) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DSTS) J(v int) DSTS {
return DSTS(bits.MakeField32(v, uint32(mask)))
}
type RDSTS struct{ mmio.U32 }
func (r *RDSTS) Bits(mask DSTS) DSTS { return DSTS(r.U32.Bits(uint32(mask))) }
func (r *RDSTS) StoreBits(mask, b DSTS) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDSTS) SetBits(mask DSTS) { r.U32.SetBits(uint32(mask)) }
func (r *RDSTS) ClearBits(mask DSTS) { r.U32.ClearBits(uint32(mask)) }
func (r *RDSTS) Load() DSTS { return DSTS(r.U32.Load()) }
func (r *RDSTS) Store(b DSTS) { r.U32.Store(uint32(b)) }
func (r *RDSTS) AtomicStoreBits(mask, b DSTS) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDSTS) AtomicSetBits(mask DSTS) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDSTS) AtomicClearBits(mask DSTS) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDSTS struct{ mmio.UM32 }
func (rm RMDSTS) Load() DSTS { return DSTS(rm.UM32.Load()) }
func (rm RMDSTS) Store(b DSTS) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) SUSPSTS() RMDSTS {
return RMDSTS{mmio.UM32{&p.DSTS.U32, uint32(SUSPSTS)}}
}
func (p *USB_OTG_Device_Periph) ENUMSPD() RMDSTS {
return RMDSTS{mmio.UM32{&p.DSTS.U32, uint32(ENUMSPD)}}
}
func (p *USB_OTG_Device_Periph) EERR() RMDSTS {
return RMDSTS{mmio.UM32{&p.DSTS.U32, uint32(EERR)}}
}
func (p *USB_OTG_Device_Periph) FNSOF() RMDSTS {
return RMDSTS{mmio.UM32{&p.DSTS.U32, uint32(FNSOF)}}
}
type DIEPMSK uint32
func (b DIEPMSK) Field(mask DIEPMSK) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DIEPMSK) J(v int) DIEPMSK {
return DIEPMSK(bits.MakeField32(v, uint32(mask)))
}
type RDIEPMSK struct{ mmio.U32 }
func (r *RDIEPMSK) Bits(mask DIEPMSK) DIEPMSK { return DIEPMSK(r.U32.Bits(uint32(mask))) }
func (r *RDIEPMSK) StoreBits(mask, b DIEPMSK) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDIEPMSK) SetBits(mask DIEPMSK) { r.U32.SetBits(uint32(mask)) }
func (r *RDIEPMSK) ClearBits(mask DIEPMSK) { r.U32.ClearBits(uint32(mask)) }
func (r *RDIEPMSK) Load() DIEPMSK { return DIEPMSK(r.U32.Load()) }
func (r *RDIEPMSK) Store(b DIEPMSK) { r.U32.Store(uint32(b)) }
func (r *RDIEPMSK) AtomicStoreBits(mask, b DIEPMSK) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDIEPMSK) AtomicSetBits(mask DIEPMSK) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDIEPMSK) AtomicClearBits(mask DIEPMSK) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDIEPMSK struct{ mmio.UM32 }
func (rm RMDIEPMSK) Load() DIEPMSK { return DIEPMSK(rm.UM32.Load()) }
func (rm RMDIEPMSK) Store(b DIEPMSK) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) XFRCM() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(XFRCM)}}
}
func (p *USB_OTG_Device_Periph) EPDM() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(EPDM)}}
}
func (p *USB_OTG_Device_Periph) TOM() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(TOM)}}
}
func (p *USB_OTG_Device_Periph) ITTXFEMSK() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(ITTXFEMSK)}}
}
func (p *USB_OTG_Device_Periph) INEPNMM() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(INEPNMM)}}
}
func (p *USB_OTG_Device_Periph) INEPNEM() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(INEPNEM)}}
}
func (p *USB_OTG_Device_Periph) TXFURM() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(TXFURM)}}
}
func (p *USB_OTG_Device_Periph) BIM() RMDIEPMSK {
return RMDIEPMSK{mmio.UM32{&p.DIEPMSK.U32, uint32(BIM)}}
}
type DOEPMSK uint32
func (b DOEPMSK) Field(mask DOEPMSK) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DOEPMSK) J(v int) DOEPMSK {
return DOEPMSK(bits.MakeField32(v, uint32(mask)))
}
type RDOEPMSK struct{ mmio.U32 }
func (r *RDOEPMSK) Bits(mask DOEPMSK) DOEPMSK { return DOEPMSK(r.U32.Bits(uint32(mask))) }
func (r *RDOEPMSK) StoreBits(mask, b DOEPMSK) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDOEPMSK) SetBits(mask DOEPMSK) { r.U32.SetBits(uint32(mask)) }
func (r *RDOEPMSK) ClearBits(mask DOEPMSK) { r.U32.ClearBits(uint32(mask)) }
func (r *RDOEPMSK) Load() DOEPMSK { return DOEPMSK(r.U32.Load()) }
func (r *RDOEPMSK) Store(b DOEPMSK) { r.U32.Store(uint32(b)) }
func (r *RDOEPMSK) AtomicStoreBits(mask, b DOEPMSK) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDOEPMSK) AtomicSetBits(mask DOEPMSK) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDOEPMSK) AtomicClearBits(mask DOEPMSK) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDOEPMSK struct{ mmio.UM32 }
func (rm RMDOEPMSK) Load() DOEPMSK { return DOEPMSK(rm.UM32.Load()) }
func (rm RMDOEPMSK) Store(b DOEPMSK) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) XFRCM() RMDOEPMSK {
return RMDOEPMSK{mmio.UM32{&p.DOEPMSK.U32, uint32(XFRCM)}}
}
func (p *USB_OTG_Device_Periph) EPDM() RMDOEPMSK {
return RMDOEPMSK{mmio.UM32{&p.DOEPMSK.U32, uint32(EPDM)}}
}
func (p *USB_OTG_Device_Periph) STUPM() RMDOEPMSK {
return RMDOEPMSK{mmio.UM32{&p.DOEPMSK.U32, uint32(STUPM)}}
}
func (p *USB_OTG_Device_Periph) OTEPDM() RMDOEPMSK {
return RMDOEPMSK{mmio.UM32{&p.DOEPMSK.U32, uint32(OTEPDM)}}
}
func (p *USB_OTG_Device_Periph) B2BSTUP() RMDOEPMSK {
return RMDOEPMSK{mmio.UM32{&p.DOEPMSK.U32, uint32(B2BSTUP)}}
}
func (p *USB_OTG_Device_Periph) OPEM() RMDOEPMSK {
return RMDOEPMSK{mmio.UM32{&p.DOEPMSK.U32, uint32(OPEM)}}
}
func (p *USB_OTG_Device_Periph) BOIM() RMDOEPMSK {
return RMDOEPMSK{mmio.UM32{&p.DOEPMSK.U32, uint32(BOIM)}}
}
type DAINT uint32
func (b DAINT) Field(mask DAINT) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DAINT) J(v int) DAINT {
return DAINT(bits.MakeField32(v, uint32(mask)))
}
type RDAINT struct{ mmio.U32 }
func (r *RDAINT) Bits(mask DAINT) DAINT { return DAINT(r.U32.Bits(uint32(mask))) }
func (r *RDAINT) StoreBits(mask, b DAINT) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDAINT) SetBits(mask DAINT) { r.U32.SetBits(uint32(mask)) }
func (r *RDAINT) ClearBits(mask DAINT) { r.U32.ClearBits(uint32(mask)) }
func (r *RDAINT) Load() DAINT { return DAINT(r.U32.Load()) }
func (r *RDAINT) Store(b DAINT) { r.U32.Store(uint32(b)) }
func (r *RDAINT) AtomicStoreBits(mask, b DAINT) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDAINT) AtomicSetBits(mask DAINT) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDAINT) AtomicClearBits(mask DAINT) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDAINT struct{ mmio.UM32 }
func (rm RMDAINT) Load() DAINT { return DAINT(rm.UM32.Load()) }
func (rm RMDAINT) Store(b DAINT) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) IEPINT() RMDAINT {
return RMDAINT{mmio.UM32{&p.DAINT.U32, uint32(IEPINT)}}
}
func (p *USB_OTG_Device_Periph) OEPINT() RMDAINT {
return RMDAINT{mmio.UM32{&p.DAINT.U32, uint32(OEPINT)}}
}
type DAINTMSK uint32
func (b DAINTMSK) Field(mask DAINTMSK) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DAINTMSK) J(v int) DAINTMSK {
return DAINTMSK(bits.MakeField32(v, uint32(mask)))
}
type RDAINTMSK struct{ mmio.U32 }
func (r *RDAINTMSK) Bits(mask DAINTMSK) DAINTMSK { return DAINTMSK(r.U32.Bits(uint32(mask))) }
func (r *RDAINTMSK) StoreBits(mask, b DAINTMSK) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDAINTMSK) SetBits(mask DAINTMSK) { r.U32.SetBits(uint32(mask)) }
func (r *RDAINTMSK) ClearBits(mask DAINTMSK) { r.U32.ClearBits(uint32(mask)) }
func (r *RDAINTMSK) Load() DAINTMSK { return DAINTMSK(r.U32.Load()) }
func (r *RDAINTMSK) Store(b DAINTMSK) { r.U32.Store(uint32(b)) }
func (r *RDAINTMSK) AtomicStoreBits(mask, b DAINTMSK) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDAINTMSK) AtomicSetBits(mask DAINTMSK) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDAINTMSK) AtomicClearBits(mask DAINTMSK) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDAINTMSK struct{ mmio.UM32 }
func (rm RMDAINTMSK) Load() DAINTMSK { return DAINTMSK(rm.UM32.Load()) }
func (rm RMDAINTMSK) Store(b DAINTMSK) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) IEPM() RMDAINTMSK {
return RMDAINTMSK{mmio.UM32{&p.DAINTMSK.U32, uint32(IEPM)}}
}
func (p *USB_OTG_Device_Periph) OEPM() RMDAINTMSK {
return RMDAINTMSK{mmio.UM32{&p.DAINTMSK.U32, uint32(OEPM)}}
}
type DVBUSDIS uint32
func (b DVBUSDIS) Field(mask DVBUSDIS) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DVBUSDIS) J(v int) DVBUSDIS {
return DVBUSDIS(bits.MakeField32(v, uint32(mask)))
}
type RDVBUSDIS struct{ mmio.U32 }
func (r *RDVBUSDIS) Bits(mask DVBUSDIS) DVBUSDIS { return DVBUSDIS(r.U32.Bits(uint32(mask))) }
func (r *RDVBUSDIS) StoreBits(mask, b DVBUSDIS) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDVBUSDIS) SetBits(mask DVBUSDIS) { r.U32.SetBits(uint32(mask)) }
func (r *RDVBUSDIS) ClearBits(mask DVBUSDIS) { r.U32.ClearBits(uint32(mask)) }
func (r *RDVBUSDIS) Load() DVBUSDIS { return DVBUSDIS(r.U32.Load()) }
func (r *RDVBUSDIS) Store(b DVBUSDIS) { r.U32.Store(uint32(b)) }
func (r *RDVBUSDIS) AtomicStoreBits(mask, b DVBUSDIS) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDVBUSDIS) AtomicSetBits(mask DVBUSDIS) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDVBUSDIS) AtomicClearBits(mask DVBUSDIS) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDVBUSDIS struct{ mmio.UM32 }
func (rm RMDVBUSDIS) Load() DVBUSDIS { return DVBUSDIS(rm.UM32.Load()) }
func (rm RMDVBUSDIS) Store(b DVBUSDIS) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) VBUSDT() RMDVBUSDIS {
return RMDVBUSDIS{mmio.UM32{&p.DVBUSDIS.U32, uint32(VBUSDT)}}
}
type DVBUSPULSE uint32
func (b DVBUSPULSE) Field(mask DVBUSPULSE) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DVBUSPULSE) J(v int) DVBUSPULSE {
return DVBUSPULSE(bits.MakeField32(v, uint32(mask)))
}
type RDVBUSPULSE struct{ mmio.U32 }
func (r *RDVBUSPULSE) Bits(mask DVBUSPULSE) DVBUSPULSE { return DVBUSPULSE(r.U32.Bits(uint32(mask))) }
func (r *RDVBUSPULSE) StoreBits(mask, b DVBUSPULSE) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDVBUSPULSE) SetBits(mask DVBUSPULSE) { r.U32.SetBits(uint32(mask)) }
func (r *RDVBUSPULSE) ClearBits(mask DVBUSPULSE) { r.U32.ClearBits(uint32(mask)) }
func (r *RDVBUSPULSE) Load() DVBUSPULSE { return DVBUSPULSE(r.U32.Load()) }
func (r *RDVBUSPULSE) Store(b DVBUSPULSE) { r.U32.Store(uint32(b)) }
func (r *RDVBUSPULSE) AtomicStoreBits(mask, b DVBUSPULSE) {
r.U32.AtomicStoreBits(uint32(mask), uint32(b))
}
func (r *RDVBUSPULSE) AtomicSetBits(mask DVBUSPULSE) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDVBUSPULSE) AtomicClearBits(mask DVBUSPULSE) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDVBUSPULSE struct{ mmio.UM32 }
func (rm RMDVBUSPULSE) Load() DVBUSPULSE { return DVBUSPULSE(rm.UM32.Load()) }
func (rm RMDVBUSPULSE) Store(b DVBUSPULSE) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) DVBUSP() RMDVBUSPULSE {
return RMDVBUSPULSE{mmio.UM32{&p.DVBUSPULSE.U32, uint32(DVBUSP)}}
}
type DTHRCTL uint32
func (b DTHRCTL) Field(mask DTHRCTL) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DTHRCTL) J(v int) DTHRCTL {
return DTHRCTL(bits.MakeField32(v, uint32(mask)))
}
type RDTHRCTL struct{ mmio.U32 }
func (r *RDTHRCTL) Bits(mask DTHRCTL) DTHRCTL { return DTHRCTL(r.U32.Bits(uint32(mask))) }
func (r *RDTHRCTL) StoreBits(mask, b DTHRCTL) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDTHRCTL) SetBits(mask DTHRCTL) { r.U32.SetBits(uint32(mask)) }
func (r *RDTHRCTL) ClearBits(mask DTHRCTL) { r.U32.ClearBits(uint32(mask)) }
func (r *RDTHRCTL) Load() DTHRCTL { return DTHRCTL(r.U32.Load()) }
func (r *RDTHRCTL) Store(b DTHRCTL) { r.U32.Store(uint32(b)) }
func (r *RDTHRCTL) AtomicStoreBits(mask, b DTHRCTL) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDTHRCTL) AtomicSetBits(mask DTHRCTL) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDTHRCTL) AtomicClearBits(mask DTHRCTL) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDTHRCTL struct{ mmio.UM32 }
func (rm RMDTHRCTL) Load() DTHRCTL { return DTHRCTL(rm.UM32.Load()) }
func (rm RMDTHRCTL) Store(b DTHRCTL) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) NONISOTHREN() RMDTHRCTL {
return RMDTHRCTL{mmio.UM32{&p.DTHRCTL.U32, uint32(NONISOTHREN)}}
}
func (p *USB_OTG_Device_Periph) ISOTHREN() RMDTHRCTL {
return RMDTHRCTL{mmio.UM32{&p.DTHRCTL.U32, uint32(ISOTHREN)}}
}
func (p *USB_OTG_Device_Periph) TXTHRLEN() RMDTHRCTL {
return RMDTHRCTL{mmio.UM32{&p.DTHRCTL.U32, uint32(TXTHRLEN)}}
}
func (p *USB_OTG_Device_Periph) RXTHREN() RMDTHRCTL {
return RMDTHRCTL{mmio.UM32{&p.DTHRCTL.U32, uint32(RXTHREN)}}
}
func (p *USB_OTG_Device_Periph) RXTHRLEN() RMDTHRCTL {
return RMDTHRCTL{mmio.UM32{&p.DTHRCTL.U32, uint32(RXTHRLEN)}}
}
func (p *USB_OTG_Device_Periph) ARPEN() RMDTHRCTL {
return RMDTHRCTL{mmio.UM32{&p.DTHRCTL.U32, uint32(ARPEN)}}
}
type DIEPEMPMSK uint32
func (b DIEPEMPMSK) Field(mask DIEPEMPMSK) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DIEPEMPMSK) J(v int) DIEPEMPMSK {
return DIEPEMPMSK(bits.MakeField32(v, uint32(mask)))
}
type RDIEPEMPMSK struct{ mmio.U32 }
func (r *RDIEPEMPMSK) Bits(mask DIEPEMPMSK) DIEPEMPMSK { return DIEPEMPMSK(r.U32.Bits(uint32(mask))) }
func (r *RDIEPEMPMSK) StoreBits(mask, b DIEPEMPMSK) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDIEPEMPMSK) SetBits(mask DIEPEMPMSK) { r.U32.SetBits(uint32(mask)) }
func (r *RDIEPEMPMSK) ClearBits(mask DIEPEMPMSK) { r.U32.ClearBits(uint32(mask)) }
func (r *RDIEPEMPMSK) Load() DIEPEMPMSK { return DIEPEMPMSK(r.U32.Load()) }
func (r *RDIEPEMPMSK) Store(b DIEPEMPMSK) { r.U32.Store(uint32(b)) }
func (r *RDIEPEMPMSK) AtomicStoreBits(mask, b DIEPEMPMSK) {
r.U32.AtomicStoreBits(uint32(mask), uint32(b))
}
func (r *RDIEPEMPMSK) AtomicSetBits(mask DIEPEMPMSK) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDIEPEMPMSK) AtomicClearBits(mask DIEPEMPMSK) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDIEPEMPMSK struct{ mmio.UM32 }
func (rm RMDIEPEMPMSK) Load() DIEPEMPMSK { return DIEPEMPMSK(rm.UM32.Load()) }
func (rm RMDIEPEMPMSK) Store(b DIEPEMPMSK) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) INEPTXFEM() RMDIEPEMPMSK {
return RMDIEPEMPMSK{mmio.UM32{&p.DIEPEMPMSK.U32, uint32(INEPTXFEM)}}
}
type DEACHINT uint32
func (b DEACHINT) Field(mask DEACHINT) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DEACHINT) J(v int) DEACHINT {
return DEACHINT(bits.MakeField32(v, uint32(mask)))
}
type RDEACHINT struct{ mmio.U32 }
func (r *RDEACHINT) Bits(mask DEACHINT) DEACHINT { return DEACHINT(r.U32.Bits(uint32(mask))) }
func (r *RDEACHINT) StoreBits(mask, b DEACHINT) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDEACHINT) SetBits(mask DEACHINT) { r.U32.SetBits(uint32(mask)) }
func (r *RDEACHINT) ClearBits(mask DEACHINT) { r.U32.ClearBits(uint32(mask)) }
func (r *RDEACHINT) Load() DEACHINT { return DEACHINT(r.U32.Load()) }
func (r *RDEACHINT) Store(b DEACHINT) { r.U32.Store(uint32(b)) }
func (r *RDEACHINT) AtomicStoreBits(mask, b DEACHINT) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDEACHINT) AtomicSetBits(mask DEACHINT) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDEACHINT) AtomicClearBits(mask DEACHINT) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDEACHINT struct{ mmio.UM32 }
func (rm RMDEACHINT) Load() DEACHINT { return DEACHINT(rm.UM32.Load()) }
func (rm RMDEACHINT) Store(b DEACHINT) { rm.UM32.Store(uint32(b)) }
func (p *USB_OTG_Device_Periph) IEP1INT() RMDEACHINT {
return RMDEACHINT{mmio.UM32{&p.DEACHINT.U32, uint32(IEP1INT)}}
}
func (p *USB_OTG_Device_Periph) OEP1INT() RMDEACHINT {
return RMDEACHINT{mmio.UM32{&p.DEACHINT.U32, uint32(OEP1INT)}}
}
type DEACHMSK uint32
func (b DEACHMSK) Field(mask DEACHMSK) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DEACHMSK) J(v int) DEACHMSK {
return DEACHMSK(bits.MakeField32(v, uint32(mask)))
}
type RDEACHMSK struct{ mmio.U32 }
func (r *RDEACHMSK) Bits(mask DEACHMSK) DEACHMSK { return DEACHMSK(r.U32.Bits(uint32(mask))) }
func (r *RDEACHMSK) StoreBits(mask, b DEACHMSK) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDEACHMSK) SetBits(mask DEACHMSK) { r.U32.SetBits(uint32(mask)) }
func (r *RDEACHMSK) ClearBits(mask DEACHMSK) { r.U32.ClearBits(uint32(mask)) }
func (r *RDEACHMSK) Load() DEACHMSK { return DEACHMSK(r.U32.Load()) }
func (r *RDEACHMSK) Store(b DEACHMSK) { r.U32.Store(uint32(b)) }
func (r *RDEACHMSK) AtomicStoreBits(mask, b DEACHMSK) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RDEACHMSK) AtomicSetBits(mask DEACHMSK) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDEACHMSK) AtomicClearBits(mask DEACHMSK) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDEACHMSK struct{ mmio.UM32 }
func (rm RMDEACHMSK) Load() DEACHMSK { return DEACHMSK(rm.UM32.Load()) }
func (rm RMDEACHMSK) Store(b DEACHMSK) { rm.UM32.Store(uint32(b)) }
type DINEP1MSK uint32
func (b DINEP1MSK) Field(mask DINEP1MSK) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DINEP1MSK) J(v int) DINEP1MSK {
return DINEP1MSK(bits.MakeField32(v, uint32(mask)))
}
type RDINEP1MSK struct{ mmio.U32 }
func (r *RDINEP1MSK) Bits(mask DINEP1MSK) DINEP1MSK { return DINEP1MSK(r.U32.Bits(uint32(mask))) }
func (r *RDINEP1MSK) StoreBits(mask, b DINEP1MSK) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDINEP1MSK) SetBits(mask DINEP1MSK) { r.U32.SetBits(uint32(mask)) }
func (r *RDINEP1MSK) ClearBits(mask DINEP1MSK) { r.U32.ClearBits(uint32(mask)) }
func (r *RDINEP1MSK) Load() DINEP1MSK { return DINEP1MSK(r.U32.Load()) }
func (r *RDINEP1MSK) Store(b DINEP1MSK) { r.U32.Store(uint32(b)) }
func (r *RDINEP1MSK) AtomicStoreBits(mask, b DINEP1MSK) {
r.U32.AtomicStoreBits(uint32(mask), uint32(b))
}
func (r *RDINEP1MSK) AtomicSetBits(mask DINEP1MSK) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDINEP1MSK) AtomicClearBits(mask DINEP1MSK) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDINEP1MSK struct{ mmio.UM32 }
func (rm RMDINEP1MSK) Load() DINEP1MSK { return DINEP1MSK(rm.UM32.Load()) }
func (rm RMDINEP1MSK) Store(b DINEP1MSK) { rm.UM32.Store(uint32(b)) }
type DOUTEP1MSK uint32
func (b DOUTEP1MSK) Field(mask DOUTEP1MSK) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask DOUTEP1MSK) J(v int) DOUTEP1MSK {
return DOUTEP1MSK(bits.MakeField32(v, uint32(mask)))
}
type RDOUTEP1MSK struct{ mmio.U32 }
func (r *RDOUTEP1MSK) Bits(mask DOUTEP1MSK) DOUTEP1MSK { return DOUTEP1MSK(r.U32.Bits(uint32(mask))) }
func (r *RDOUTEP1MSK) StoreBits(mask, b DOUTEP1MSK) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RDOUTEP1MSK) SetBits(mask DOUTEP1MSK) { r.U32.SetBits(uint32(mask)) }
func (r *RDOUTEP1MSK) ClearBits(mask DOUTEP1MSK) { r.U32.ClearBits(uint32(mask)) }
func (r *RDOUTEP1MSK) Load() DOUTEP1MSK { return DOUTEP1MSK(r.U32.Load()) }
func (r *RDOUTEP1MSK) Store(b DOUTEP1MSK) { r.U32.Store(uint32(b)) }
func (r *RDOUTEP1MSK) AtomicStoreBits(mask, b DOUTEP1MSK) {
r.U32.AtomicStoreBits(uint32(mask), uint32(b))
}
func (r *RDOUTEP1MSK) AtomicSetBits(mask DOUTEP1MSK) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RDOUTEP1MSK) AtomicClearBits(mask DOUTEP1MSK) { r.U32.AtomicClearBits(uint32(mask)) }
type RMDOUTEP1MSK struct{ mmio.UM32 }
func (rm RMDOUTEP1MSK) Load() DOUTEP1MSK { return DOUTEP1MSK(rm.UM32.Load()) }
func (rm RMDOUTEP1MSK) Store(b DOUTEP1MSK) { rm.UM32.Store(uint32(b)) }<|fim▁end|>
| |
<|file_name|>test_worker.js<|end_file_name|><|fim▁begin|>importScripts('../js/examples.js', './solutions.js', '../js/Puzzle.js');
onmessage = function (event) {
if (event.data === 'start') {
runTests();
}
};
function runTests() {
var count = 1;
for (var name in solutions) {
testCase(count, name);
count++;
}
postMessage({
type: 'finished'
});
}
function append_text(text) {
postMessage({
type: 'append',
text: text
});
}
function testCase(count, name) {
append_text("<h2> " + count + ". " + examples[name].width + " x " + examples[name].height + " " + name + " </h2>");
append_text("<p>Solving . . .</p>");
var start = new Date().getTime();
var puzzle = new Puzzle(examples[name]);
puzzle.solve(void 0);
var duration = (new Date().getTime() - start) / 1000;
append_text("<p>Took " + duration + " seconds</p>");
var passed = true;
if (puzzle.solutions.length !== solutions[name].length) {
append_text("<p>Number of solutions are not equal (" + solutions[name].length + " should be " + puzzle.solutions.length + ")</p>");
passed = false;
}
else {
for (var i = 0; i < puzzle.solutions.length; i++) {
var solution_passed = false;
for (var j = 0; j < solutions[name].length; j++) {
if (compare_states(puzzle.solutions[i], solutions[name].solutions[j]))
solution_passed = true;
}
if (solution_passed) {
append_text("<p>Solution " + (i + 1) + " passed.</p>");
}
else {
append_text("<p>Solution " + (i + 1) + " did not pass.</p>");
passed = false;
<|fim▁hole|> }
}
}
if (passed) {
append_text('<p style="font-weight: bold; color: green;">Testcase passed!</p>');
}
else {
append_text('<p style="font-weight: bold; color: red;">Testcase did not pass!</p>');
}
postMessage({
type: 'statistics',
passed: passed
});
append_text('<hr>');
}
function compare_states(state1, state2) {
if (state1.length !== state2.length)
return false;
for (var i = 0; i < state1.length; i++) {
if (state1[i].length !== state2[i].length)
return false;
for (var j = 0; j < state1[i].length; j++) {
if (state1[i][j] !== state2[i][j])
return false;
}
}
return true;
}<|fim▁end|>
| |
<|file_name|>KafkaST.java<|end_file_name|><|fim▁begin|>/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.systemtest.kafka;
import io.fabric8.kubernetes.api.model.ConfigMap;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirementsBuilder;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.SecurityContextBuilder;
import io.fabric8.kubernetes.api.model.Service;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext;
import io.strimzi.api.kafka.Crds;
import io.strimzi.api.kafka.KafkaTopicList;
import io.strimzi.api.kafka.model.EntityOperatorSpec;
import io.strimzi.api.kafka.model.EntityTopicOperatorSpec;
import io.strimzi.api.kafka.model.EntityUserOperatorSpec;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.KafkaClusterSpec;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.KafkaTopic;
import io.strimzi.api.kafka.model.SystemProperty;
import io.strimzi.api.kafka.model.SystemPropertyBuilder;
import io.strimzi.api.kafka.model.ZookeeperClusterSpec;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListener;
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerBuilder;
import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType;
import io.strimzi.api.kafka.model.storage.JbodStorage;
import io.strimzi.api.kafka.model.storage.JbodStorageBuilder;
import io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.systemtest.AbstractST;
import io.strimzi.systemtest.Constants;
import io.strimzi.systemtest.Environment;
import io.strimzi.systemtest.resources.operator.SetupClusterOperator;
import io.strimzi.systemtest.annotations.OpenShiftOnly;
import io.strimzi.systemtest.annotations.ParallelNamespaceTest;
import io.strimzi.systemtest.cli.KafkaCmdClient;
import io.strimzi.systemtest.kafkaclients.internalClients.InternalKafkaClient;
import io.strimzi.systemtest.resources.ResourceOperation;
import io.strimzi.systemtest.resources.crd.KafkaResource;
import io.strimzi.systemtest.resources.crd.KafkaTopicResource;
import io.strimzi.systemtest.templates.crd.KafkaClientsTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTemplates;
import io.strimzi.systemtest.templates.crd.KafkaTopicTemplates;
import io.strimzi.systemtest.templates.crd.KafkaUserTemplates;
import io.strimzi.systemtest.utils.StUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaTopicUtils;
import io.strimzi.systemtest.utils.kafkaUtils.KafkaUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.ConfigMapUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.DeploymentUtils;
import io.strimzi.systemtest.utils.kubeUtils.controllers.StatefulSetUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PersistentVolumeClaimUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.PodUtils;
import io.strimzi.systemtest.utils.kubeUtils.objects.ServiceUtils;
import io.strimzi.test.TestUtils;
import io.strimzi.test.executor.ExecResult;
import io.strimzi.test.timemeasuring.Operation;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.hamcrest.CoreMatchers;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.extension.ExtensionContext;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.stream.Collectors;
import static io.strimzi.api.kafka.model.KafkaResources.kafkaStatefulSetName;
import static io.strimzi.api.kafka.model.KafkaResources.zookeeperStatefulSetName;
import static io.strimzi.systemtest.Constants.CRUISE_CONTROL;
import static io.strimzi.systemtest.Constants.INTERNAL_CLIENTS_USED;
import static io.strimzi.systemtest.Constants.LOADBALANCER_SUPPORTED;
import static io.strimzi.systemtest.Constants.REGRESSION;
import static io.strimzi.systemtest.Constants.STATEFUL_SET;
import static io.strimzi.systemtest.utils.StUtils.configMap2Properties;
import static io.strimzi.systemtest.utils.StUtils.stringToProperties;
import static io.strimzi.test.TestUtils.fromYamlString;
import static io.strimzi.test.TestUtils.map;
import static io.strimzi.test.k8s.KubeClusterResource.cmdKubeClient;
import static io.strimzi.test.k8s.KubeClusterResource.kubeClient;
import static java.util.Arrays.asList;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.emptyOrNullString;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
@Tag(REGRESSION)
@SuppressWarnings("checkstyle:ClassFanOutComplexity")
class KafkaST extends AbstractST {
private static final Logger LOGGER = LogManager.getLogger(KafkaST.class);
private static final String TEMPLATE_PATH = TestUtils.USER_PATH + "/../packaging/examples/templates/cluster-operator";
public static final String NAMESPACE = "kafka-cluster-test";
private static final String OPENSHIFT_CLUSTER_NAME = "openshift-my-cluster";
@ParallelNamespaceTest
@OpenShiftOnly
void testDeployKafkaClusterViaTemplate(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
cluster.createCustomResources(extensionContext, TEMPLATE_PATH);
String templateName = "strimzi-ephemeral";
cmdKubeClient(namespaceName).createResourceAndApply(templateName, map("CLUSTER_NAME", OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
StatefulSetUtils.waitForAllStatefulSetPodsReady(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME), 3, ResourceOperation.getTimeoutForResourceReadiness(STATEFUL_SET));
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME), 1);
//Testing docker images
testDockerImagesForKafkaCluster(OPENSHIFT_CLUSTER_NAME, NAMESPACE, namespaceName, 3, 3, false);
//Testing labels
verifyLabelsForKafkaCluster(NAMESPACE, namespaceName, OPENSHIFT_CLUSTER_NAME, templateName);
LOGGER.info("Deleting Kafka cluster {} after test", OPENSHIFT_CLUSTER_NAME);
cmdKubeClient(namespaceName).deleteByName("Kafka", OPENSHIFT_CLUSTER_NAME);
//Wait for kafka deletion
cmdKubeClient(namespaceName).waitForResourceDeletion(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.kafkaStatefulSetName(OPENSHIFT_CLUSTER_NAME));
StatefulSetUtils.waitForStatefulSetDeletion(namespaceName, KafkaResources.zookeeperStatefulSetName(OPENSHIFT_CLUSTER_NAME));
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(OPENSHIFT_CLUSTER_NAME));
}
@ParallelNamespaceTest
void testEODeletion(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Get pod name to check termination process
Pod pod = kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findAny()
.orElseThrow();
assertThat("Entity operator pod does not exist", pod, notNullValue());
LOGGER.info("Setting entity operator to null");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().setEntityOperator(null), namespaceName);
// Wait when EO(UO + TO) will be removed
DeploymentUtils.waitForDeploymentDeletion(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
PodUtils.deletePodWithWait(namespaceName, pod.getMetadata().getName());
LOGGER.info("Entity operator was deleted");
}
@ParallelNamespaceTest
@SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS"})
void testCustomAndUpdatedValues(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>();
envVarGeneral.put("TEST_ENV_1", "test.env.one");
envVarGeneral.put("TEST_ENV_2", "test.env.two");
LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>();
envVarUpdated.put("TEST_ENV_2", "updated.test.env.two");
envVarUpdated.put("TEST_ENV_3", "test.env.three");
// Kafka Broker config
Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "1");
kafkaConfig.put("transaction.state.log.replication.factor", "1");
kafkaConfig.put("default.replication.factor", "1");
Map<String, Object> updatedKafkaConfig = new HashMap<>();
updatedKafkaConfig.put("offsets.topic.replication.factor", "2");
updatedKafkaConfig.put("transaction.state.log.replication.factor", "2");
updatedKafkaConfig.put("default.replication.factor", "2");
// Zookeeper Config
Map<String, Object> zookeeperConfig = new HashMap<>();
zookeeperConfig.put("tickTime", "2000");
zookeeperConfig.put("initLimit", "5");
zookeeperConfig.put("syncLimit", "2");
zookeeperConfig.put("autopurge.purgeInterval", "1");
Map<String, Object> updatedZookeeperConfig = new HashMap<>();
updatedZookeeperConfig.put("tickTime", "2500");
updatedZookeeperConfig.put("initLimit", "3");
updatedZookeeperConfig.put("syncLimit", "5");
final int initialDelaySeconds = 30;
final int timeoutSeconds = 10;
final int updatedInitialDelaySeconds = 31;
final int updatedTimeoutSeconds = 11;
final int periodSeconds = 10;
final int successThreshold = 1;
final int failureThreshold = 3;
final int updatedPeriodSeconds = 5;
final int updatedFailureThreshold = 1;
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 2)
.editSpec()
.editKafka()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.withConfig(kafkaConfig)
.withNewTemplate()
.withNewKafkaContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endKafkaContainer()<|fim▁hole|> .endKafka()
.editZookeeper()
.withReplicas(2)
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.endLivenessProbe()
.withConfig(zookeeperConfig)
.withNewTemplate()
.withNewZookeeperContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTopicOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endUserOperatorContainer()
.withNewTlsSidecarContainer()
.withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
.endTlsSidecarContainer()
.endTemplate()
.editUserOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endUserOperator()
.editTopicOperator()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTopicOperator()
.withNewTlsSidecar()
.withNewReadinessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endReadinessProbe()
.withNewLivenessProbe()
.withInitialDelaySeconds(initialDelaySeconds)
.withTimeoutSeconds(timeoutSeconds)
.withPeriodSeconds(periodSeconds)
.withSuccessThreshold(successThreshold)
.withFailureThreshold(failureThreshold)
.endLivenessProbe()
.endTlsSidecar()
.endEntityOperator()
.endSpec()
.build());
final Map<String, String> kafkaSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
final Map<String, String> zkSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName));
final Map<String, String> eoPod = DeploymentUtils.depSnapshot(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName));
LOGGER.info("Verify values before update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), kafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarGeneral);
String kafkaConfiguration = kubeClient().getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=1"));
String kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=1"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=1"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", zookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarGeneral);
LOGGER.info("Checking configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarGeneral);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", initialDelaySeconds, timeoutSeconds,
periodSeconds, successThreshold, failureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarGeneral);
LOGGER.info("Updating configuration of Kafka cluster");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
KafkaClusterSpec kafkaClusterSpec = k.getSpec().getKafka();
kafkaClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
kafkaClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
kafkaClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
kafkaClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
kafkaClusterSpec.setConfig(updatedKafkaConfig);
kafkaClusterSpec.getTemplate().getKafkaContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
ZookeeperClusterSpec zookeeperClusterSpec = k.getSpec().getZookeeper();
zookeeperClusterSpec.getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
zookeeperClusterSpec.getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
zookeeperClusterSpec.getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
zookeeperClusterSpec.getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
zookeeperClusterSpec.setConfig(updatedZookeeperConfig);
zookeeperClusterSpec.getTemplate().getZookeeperContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
// Configuring TO and UO to use new values for InitialDelaySeconds and TimeoutSeconds
EntityOperatorSpec entityOperatorSpec = k.getSpec().getEntityOperator();
entityOperatorSpec.getTopicOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTopicOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTopicOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getUserOperator().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getUserOperator().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setInitialDelaySeconds(updatedInitialDelaySeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setTimeoutSeconds(updatedTimeoutSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setPeriodSeconds(updatedPeriodSeconds);
entityOperatorSpec.getTlsSidecar().getLivenessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTlsSidecar().getReadinessProbe().setFailureThreshold(updatedFailureThreshold);
entityOperatorSpec.getTemplate().getTopicOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getUserOperatorContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
entityOperatorSpec.getTemplate().getTlsSidecarContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName), 2, zkSnapshot);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), 2, kafkaSnapshot);
DeploymentUtils.waitTillDepHasRolled(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1, eoPod);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
LOGGER.info("Verify values after update");
checkReadinessLivenessProbe(namespaceName, kafkaStatefulSetName(clusterName), "kafka", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkKafkaConfiguration(namespaceName, kafkaStatefulSetName(clusterName), updatedKafkaConfig, clusterName);
checkSpecificVariablesInContainer(namespaceName, kafkaStatefulSetName(clusterName), "kafka", envVarUpdated);
kafkaConfiguration = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName)).getData().get("server.config");
assertThat(kafkaConfiguration, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfiguration, containsString("default.replication.factor=2"));
kafkaConfigurationFromPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "cat", "/tmp/strimzi.properties").out();
assertThat(kafkaConfigurationFromPod, containsString("offsets.topic.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("transaction.state.log.replication.factor=2"));
assertThat(kafkaConfigurationFromPod, containsString("default.replication.factor=2"));
LOGGER.info("Testing Zookeepers");
checkReadinessLivenessProbe(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkComponentConfiguration(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", "ZOOKEEPER_CONFIGURATION", updatedZookeeperConfig);
checkSpecificVariablesInContainer(namespaceName, zookeeperStatefulSetName(clusterName), "zookeeper", envVarUpdated);
LOGGER.info("Getting entity operator to check configuration of TO and UO");
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "topic-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "user-operator", envVarUpdated);
checkReadinessLivenessProbe(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", updatedInitialDelaySeconds, updatedTimeoutSeconds,
updatedPeriodSeconds, successThreshold, updatedFailureThreshold);
checkSpecificVariablesInContainer(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), "tls-sidecar", envVarUpdated);
}
@ParallelNamespaceTest
void testJvmAndResources(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
ArrayList<SystemProperty> javaSystemProps = new ArrayList<>();
javaSystemProps.add(new SystemPropertyBuilder().withName("javax.net.debug")
.withValue("verbose").build());
Map<String, String> jvmOptionsXX = new HashMap<>();
jvmOptionsXX.put("UseG1GC", "true");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1)
.editSpec()
.editKafka()
.withResources(new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1.5Gi"))
.addToLimits("cpu", new Quantity("1"))
.addToRequests("memory", new Quantity("1Gi"))
.addToRequests("cpu", new Quantity("50m"))
.build())
.withNewJvmOptions()
.withXmx("1g")
.withXms("512m")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endKafka()
.editZookeeper()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1G"))
.addToLimits("cpu", new Quantity("0.5"))
.addToRequests("memory", new Quantity("0.5G"))
.addToRequests("cpu", new Quantity("25m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withXx(jvmOptionsXX)
.endJvmOptions()
.endZookeeper()
.withNewEntityOperator()
.withNewTopicOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("1024Mi"))
.addToLimits("cpu", new Quantity("500m"))
.addToRequests("memory", new Quantity("384Mi"))
.addToRequests("cpu", new Quantity("0.025"))
.build())
.withNewJvmOptions()
.withXmx("2G")
.withXms("1024M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endTopicOperator()
.withNewUserOperator()
.withResources(
new ResourceRequirementsBuilder()
.addToLimits("memory", new Quantity("512M"))
.addToLimits("cpu", new Quantity("300m"))
.addToRequests("memory", new Quantity("256M"))
.addToRequests("cpu", new Quantity("30m"))
.build())
.withNewJvmOptions()
.withXmx("1G")
.withXms("512M")
.withJavaSystemProperties(javaSystemProps)
.endJvmOptions()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
// Make snapshots for Kafka cluster to meke sure that there is no rolling update after CO reconciliation
final String zkStsName = KafkaResources.zookeeperStatefulSetName(clusterName);
final String kafkaStsName = kafkaStatefulSetName(clusterName);
final String eoDepName = KafkaResources.entityOperatorDeploymentName(clusterName);
final Map<String, String> zkPods = StatefulSetUtils.ssSnapshot(namespaceName, zkStsName);
final Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStsName);
final Map<String, String> eoPods = DeploymentUtils.depSnapshot(namespaceName, eoDepName);
assertResources(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"1536Mi", "1", "1Gi", "50m");
assertExpectedJavaOpts(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), "kafka",
"-Xmx1g", "-Xms512m", "-XX:+UseG1GC");
assertResources(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"1G", "500m", "500M", "25m");
assertExpectedJavaOpts(namespaceName, KafkaResources.zookeeperPodName(clusterName, 0), "zookeeper",
"-Xmx1G", "-Xms512M", "-XX:+UseG1GC");
Optional<Pod> pod = kubeClient(namespaceName).listPods(namespaceName)
.stream().filter(p -> p.getMetadata().getName().startsWith(KafkaResources.entityOperatorDeploymentName(clusterName)))
.findFirst();
assertThat("EO pod does not exist", pod.isPresent(), is(true));
assertResources(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"1Gi", "500m", "384Mi", "25m");
assertResources(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"512M", "300m", "256M", "30m");
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "topic-operator",
"-Xmx2G", "-Xms1024M", null);
assertExpectedJavaOpts(namespaceName, pod.get().getMetadata().getName(), "user-operator",
"-Xmx1G", "-Xms512M", null);
String eoPod = eoPods.keySet().toArray()[0].toString();
kubeClient(namespaceName).getPod(namespaceName, eoPod).getSpec().getContainers().forEach(container -> {
if (!container.getName().equals("tls-sidecar")) {
LOGGER.info("Check if -D java options are present in {}", container.getName());
String javaSystemProp = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_SYSTEM_PROPERTIES")).findFirst().orElseThrow().getValue();
String javaOpts = container.getEnv().stream().filter(envVar ->
envVar.getName().equals("STRIMZI_JAVA_OPTS")).findFirst().orElseThrow().getValue();
assertThat(javaSystemProp, is("-Djavax.net.debug=verbose"));
if (container.getName().equals("topic-operator")) {
assertThat(javaOpts, is("-Xms1024M -Xmx2G"));
}
if (container.getName().equals("user-operator")) {
assertThat(javaOpts, is("-Xms512M -Xmx1G"));
}
}
});
LOGGER.info("Checking no rolling update for Kafka cluster");
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, zkStsName, zkPods);
StatefulSetUtils.waitForNoRollingUpdate(namespaceName, kafkaStsName, kafkaPods);
DeploymentUtils.waitForNoRollingUpdate(namespaceName, eoDepName, eoPods);
}
@ParallelNamespaceTest
void testForTopicOperator(ExtensionContext extensionContext) throws InterruptedException {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
final String cliTopicName = "topic-from-cli";
//Creating topics for testing
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
KafkaTopicUtils.waitForKafkaTopicReady(namespaceName, topicName);
assertThat(KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(topicName).get().getMetadata().getName(), is(topicName));
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItem(topicName));
KafkaCmdClient.createTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName, 1, 1);
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), hasItems(topicName, cliTopicName));
assertThat(cmdKubeClient(namespaceName).list(KafkaTopic.RESOURCE_KIND), hasItems(cliTopicName, topicName));
//Updating first topic using pod CLI
KafkaCmdClient.updateTopicPartitionsCountUsingPodCli(namespaceName, clusterName, 0, topicName, 2);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, topicName),
hasItems("PartitionCount:2"));
KafkaTopic testTopic = fromYamlString(cmdKubeClient().get(KafkaTopic.RESOURCE_KIND, topicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Updating second topic via KafkaTopic update
KafkaTopicResource.replaceTopicResourceInSpecificNamespace(cliTopicName, topic -> topic.getSpec().setPartitions(2), namespaceName);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
assertThat(KafkaCmdClient.describeTopicUsingPodCli(namespaceName, clusterName, 0, cliTopicName),
hasItems("PartitionCount:2"));
testTopic = fromYamlString(cmdKubeClient(namespaceName).get(KafkaTopic.RESOURCE_KIND, cliTopicName), KafkaTopic.class);
assertThat(testTopic, is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec(), is(CoreMatchers.notNullValue()));
assertThat(testTopic.getSpec().getPartitions(), is(Integer.valueOf(2)));
//Deleting first topic by deletion of CM
cmdKubeClient(namespaceName).deleteByName(KafkaTopic.RESOURCE_KIND, cliTopicName);
//Deleting another topic using pod CLI
KafkaCmdClient.deleteTopicUsingPodCli(namespaceName, clusterName, 0, topicName);
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, topicName);
//Checking all topics were deleted
Thread.sleep(Constants.TIMEOUT_TEARDOWN);
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems(topicName)));
assertThat(topics, not(hasItems(cliTopicName)));
}
@ParallelNamespaceTest
void testRemoveTopicOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(null), namespaceName);
//Waiting when EO pod will be recreated without TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that TO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with TO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that TO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
}
@ParallelNamespaceTest
void testRemoveUserOperatorFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(null), namespaceName);
//Waiting when EO pod will be recreated without UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
PodUtils.waitUntilPodContainersCount(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 2);
//Checking that UO was removed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
eoPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName))
.get(0).getMetadata().getName();
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec()), namespaceName);
//Waiting when EO pod will be recreated with UO
PodUtils.deletePodWithWait(namespaceName, eoPodName);
DeploymentUtils.waitForDeploymentAndPodsReady(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName), 1);
//Checking that UO was created
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testRemoveUserAndTopicOperatorsFromEntityOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// TODO issue #4152 - temporarily disabled for Namespace RBAC scoped
assumeFalse(Environment.isNamespaceRbacScope());
LOGGER.info("Deploying Kafka cluster {}", clusterName);
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
String eoDeploymentName = KafkaResources.entityOperatorDeploymentName(clusterName);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(null);
k.getSpec().getEntityOperator().setUserOperator(null);
}, namespaceName);
PodUtils.waitUntilPodStabilityReplicasCount(namespaceName, eoDeploymentName, 0);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, k -> {
k.getSpec().getEntityOperator().setTopicOperator(new EntityTopicOperatorSpec());
k.getSpec().getEntityOperator().setUserOperator(new EntityUserOperatorSpec());
}, namespaceName);
DeploymentUtils.waitForDeploymentReady(namespaceName, eoDeploymentName);
//Checking that EO was created
kubeClient().listPodsByPrefixInName(namespaceName, eoDeploymentName).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), anyOf(
containsString("topic-operator"),
containsString("user-operator"),
containsString("tls-sidecar"))
);
});
});
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
}
@ParallelNamespaceTest
void testEntityOperatorWithoutTopicOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewUserOperator()
.endUserOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that TO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("topic-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserOperator(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.withNewTopicOperator()
.endTopicOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that UO was not deployed
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.entityOperatorDeploymentName(clusterName)).forEach(pod -> {
pod.getSpec().getContainers().forEach(container -> {
assertThat(container.getName(), not(containsString("user-operator")));
});
});
}
@ParallelNamespaceTest
void testEntityOperatorWithoutUserAndTopicOperators(ExtensionContext extensionContext) {
String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Deploying Kafka cluster without UO and TO in EO");
String operationId = timeMeasuringSystem.startTimeMeasuring(Operation.CLUSTER_DEPLOYMENT, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3)
.editSpec()
.withNewEntityOperator()
.endEntityOperator()
.endSpec()
.build());
timeMeasuringSystem.stopOperation(operationId, extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName());
assertNoCoErrorsLogged(NAMESPACE, timeMeasuringSystem.getDurationInSeconds(extensionContext.getRequiredTestClass().getName(), extensionContext.getDisplayName(), operationId));
//Checking that EO was not deployed
assertThat("EO should not be deployed", kubeClient().listPodsByPrefixInName(KafkaResources.entityOperatorDeploymentName(clusterName)).size(), is(0));
}
@ParallelNamespaceTest
void testTopicWithoutLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
// Negative scenario: creating topic without any labels and make sure that TO can't handle this topic
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3).build());
// Creating topic without any label
resourceManager.createResource(extensionContext, false, KafkaTopicTemplates.topic(clusterName, "topic-without-labels", 1, 1, 1)
.editMetadata()
.withLabels(null)
.endMetadata()
.build());
// Checking that resource was created
assertThat(cmdKubeClient(namespaceName).list("kafkatopic"), hasItems("topic-without-labels"));
// Checking that TO didn't handle new topic and zk pods don't contain new topic
assertThat(KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0), not(hasItems("topic-without-labels")));
// Checking TO logs
String tOPodName = cmdKubeClient(namespaceName).listResourcesByLabel("pod", Labels.STRIMZI_NAME_LABEL + "=" + clusterName + "-entity-operator").get(0);
String tOlogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, tOPodName, "topic-operator");
assertThat(tOlogs, not(containsString("Created topic 'topic-without-labels'")));
//Deleting topic
cmdKubeClient(namespaceName).deleteByName("kafkatopic", "topic-without-labels");
KafkaTopicUtils.waitForKafkaTopicDeletion(namespaceName, "topic-without-labels");
//Checking all topics were deleted
List<String> topics = KafkaCmdClient.listTopicsUsingPodCli(namespaceName, clusterName, 0);
assertThat(topics, not(hasItems("topic-without-labels")));
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrueFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsTrue(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(true).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
final int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
void testKafkaJBODDeleteClaimsFalse(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final int kafkaReplicas = 2;
final String diskSizeGi = "10";
JbodStorage jbodStorage = new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizeGi + "Gi").build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizeGi + "Gi").build()).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaJBOD(clusterName, kafkaReplicas, jbodStorage).build());
// kafka cluster already deployed
verifyVolumeNamesAndLabels(namespaceName, clusterName, kafkaReplicas, 2, diskSizeGi);
int volumesCount = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).size();
LOGGER.info("Deleting cluster");
cmdKubeClient(namespaceName).deleteByName("kafka", clusterName);
LOGGER.info("Waiting for PVC deletion");
PersistentVolumeClaimUtils.waitForPVCDeletion(namespaceName, volumesCount, jbodStorage, clusterName);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testPersistentStorageSize(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final String[] diskSizes = {"70Gi", "20Gi"};
final int kafkaRepl = 2;
final int diskCount = 2;
JbodStorage jbodStorage = new JbodStorageBuilder()
.withVolumes(
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(0).withSize(diskSizes[0]).build(),
new PersistentClaimStorageBuilder().withDeleteClaim(false).withId(1).withSize(diskSizes[1]).build()
).build();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, kafkaRepl)
.editSpec()
.editKafka()
.withStorage(jbodStorage)
.endKafka()
.editZookeeper().
withReplicas(1)
.endZookeeper()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
List<PersistentVolumeClaim> volumes = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
checkStorageSizeForVolumes(volumes, diskSizes, kafkaRepl, diskCount);
String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(LOADBALANCER_SUPPORTED)
void testRegenerateCertExternalAddressChange(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
LOGGER.info("Creating kafka without external listener");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).build());
final String brokerSecret = clusterName + "-kafka-brokers";
Secret secretsWithoutExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Editing kafka with external listener");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
List<GenericKafkaListener> lst = asList(
new GenericKafkaListenerBuilder()
.withName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.withPort(9092)
.withType(KafkaListenerType.INTERNAL)
.withTls(false)
.build(),
new GenericKafkaListenerBuilder()
.withName(Constants.EXTERNAL_LISTENER_DEFAULT_NAME)
.withPort(9094)
.withType(KafkaListenerType.LOADBALANCER)
.withTls(true)
.withNewConfiguration()
.withFinalizers(LB_FINALIZERS)
.endConfiguration()
.build()
);
kafka.getSpec().getKafka().setListeners(lst);
}, namespaceName);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName)));
Secret secretsWithExt = kubeClient(namespaceName).getSecret(namespaceName, brokerSecret);
LOGGER.info("Checking secrets");
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).forEach(kafkaPod -> {
String kafkaPodName = kafkaPod.getMetadata().getName();
assertThat(secretsWithExt.getData().get(kafkaPodName + ".crt"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".crt"))));
assertThat(secretsWithExt.getData().get(kafkaPodName + ".key"), is(not(secretsWithoutExt.getData().get(kafkaPodName + ".key"))));
});
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testLabelModificationDoesNotBreakCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
Map<String, String> labels = new HashMap<>();
final String[] labelKeys = {"label-name-1", "label-name-2", ""};
final String[] labelValues = {"name-of-the-label-1", "name-of-the-label-2", ""};
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editMetadata()
.withLabels(labels)
.endMetadata()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> kafkaPods = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Getting labels from stateful set resource");
StatefulSet statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying default labels in the Kafka CR");
assertThat("Label exists in stateful set with concrete value",
labelValues[0].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[0])));
assertThat("Label exists in stateful set with concrete value",
labelValues[1].equals(statefulSet.getSpec().getTemplate().getMetadata().getLabels().get(labelKeys[1])));
labelValues[0] = "new-name-of-the-label-1";
labelValues[1] = "new-name-of-the-label-2";
labelKeys[2] = "label-name-3";
labelValues[2] = "name-of-the-label-3";
LOGGER.info("Setting new values of labels from {} to {} | from {} to {} and adding one {} with value {}",
"name-of-the-label-1", labelValues[0], "name-of-the-label-2", labelValues[1], labelKeys[2], labelValues[2]);
LOGGER.info("Edit kafka labels in Kafka CR");
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().put(labelKeys[0], labelValues[0]);
resource.getMetadata().getLabels().put(labelKeys[1], labelValues[1]);
resource.getMetadata().getLabels().put(labelKeys[2], labelValues[2]);
}, namespaceName);
labels.put(labelKeys[0], labelValues[0]);
labels.put(labelKeys[1], labelValues[1]);
labels.put(labelKeys[2], labelValues[2]);
LOGGER.info("Waiting for kafka service labels changed {}", labels);
ServiceUtils.waitForServiceLabelsChange(namespaceName, KafkaResources.brokersServiceName(clusterName), labels);
LOGGER.info("Verifying kafka labels via services");
Service service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyPresentLabels(labels, service);
LOGGER.info("Waiting for kafka config map labels changed {}", labels);
ConfigMapUtils.waitForConfigMapLabelsChange(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labels);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMap configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyPresentLabels(labels, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
StatefulSetUtils.waitForStatefulSetLabelsChange(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName), labels);
LOGGER.info("Verifying kafka labels via stateful set");
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
verifyPresentLabels(labels, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Verifying via kafka pods");
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
assertThat("Label exists in kafka pods", labelValues[0].equals(labels.get(labelKeys[0])));
assertThat("Label exists in kafka pods", labelValues[1].equals(labels.get(labelKeys[1])));
assertThat("Label exists in kafka pods", labelValues[2].equals(labels.get(labelKeys[2])));
LOGGER.info("Removing labels: {} -> {}, {} -> {}, {} -> {}", labelKeys[0], labels.get(labelKeys[0]),
labelKeys[1], labels.get(labelKeys[1]), labelKeys[2], labels.get(labelKeys[2]));
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, resource -> {
resource.getMetadata().getLabels().remove(labelKeys[0]);
resource.getMetadata().getLabels().remove(labelKeys[1]);
resource.getMetadata().getLabels().remove(labelKeys[2]);
}, namespaceName);
labels.remove(labelKeys[0]);
labels.remove(labelKeys[1]);
labels.remove(labelKeys[2]);
LOGGER.info("Waiting for kafka service labels deletion {}", labels.toString());
ServiceUtils.waitForServiceLabelsDeletion(namespaceName, KafkaResources.brokersServiceName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
LOGGER.info("Verifying kafka labels via services");
service = kubeClient(namespaceName).getService(namespaceName, KafkaResources.brokersServiceName(clusterName));
verifyNullLabels(labelKeys, service);
LOGGER.info("Verifying kafka labels via config maps");
ConfigMapUtils.waitForConfigMapLabelsDeletion(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName), labelKeys[0], labelKeys[1], labelKeys[2]);
configMap = kubeClient(namespaceName).getConfigMap(namespaceName, KafkaResources.kafkaMetricsAndLogConfigMapName(clusterName));
verifyNullLabels(labelKeys, configMap);
LOGGER.info("Waiting for kafka stateful set labels changed {}", labels);
String statefulSetName = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getName();
StatefulSetUtils.waitForStatefulSetLabelsDeletion(namespaceName, statefulSetName, labelKeys[0], labelKeys[1], labelKeys[2]);
statefulSet = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
LOGGER.info("Verifying kafka labels via stateful set");
verifyNullLabels(labelKeys, statefulSet);
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 3, kafkaPods);
LOGGER.info("Waiting for kafka pod labels deletion {}", labels.toString());
PodUtils.waitUntilPodLabelsDeletion(namespaceName, KafkaResources.kafkaPodName(clusterName, 0), labelKeys[0], labelKeys[1], labelKeys[2]);
labels = kubeClient(namespaceName).getPod(namespaceName, KafkaResources.kafkaPodName(clusterName, 0)).getMetadata().getLabels();
LOGGER.info("Verifying via kafka pods");
verifyNullLabels(labelKeys, labels);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testAppDomainLabels(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
Map<String, String> labels;
LOGGER.info("---> PODS <---");
List<Pod> pods = kubeClient(namespaceName).listPods(namespaceName, clusterName).stream()
.filter(pod -> pod.getMetadata().getName().startsWith(clusterName))
.filter(pod -> !pod.getMetadata().getName().startsWith(clusterName + "-" + Constants.KAFKA_CLIENTS))
.collect(Collectors.toList());
for (Pod pod : pods) {
LOGGER.info("Getting labels from {} pod", pod.getMetadata().getName());
verifyAppLabels(pod.getMetadata().getLabels());
}
LOGGER.info("---> STATEFUL SETS <---");
LOGGER.info("Getting labels from stateful set of kafka resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("Getting labels from stateful set of zookeeper resource");
labels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.zookeeperStatefulSetName(clusterName)).getMetadata().getLabels();
verifyAppLabels(labels);
LOGGER.info("---> SERVICES <---");
List<Service> services = kubeClient(namespaceName).listServices(namespaceName).stream()
.filter(service -> service.getMetadata().getName().startsWith(clusterName))
.collect(Collectors.toList());
for (Service service : services) {
LOGGER.info("Getting labels from {} service", service.getMetadata().getName());
verifyAppLabels(service.getMetadata().getLabels());
}
LOGGER.info("---> SECRETS <---");
List<Secret> secrets = kubeClient(namespaceName).listSecrets(namespaceName).stream()
.filter(secret -> secret.getMetadata().getName().startsWith(clusterName) && secret.getType().equals("Opaque"))
.collect(Collectors.toList());
for (Secret secret : secrets) {
LOGGER.info("Getting labels from {} secret", secret.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(secret.getMetadata().getLabels());
}
LOGGER.info("---> CONFIG MAPS <---");
List<ConfigMap> configMaps = kubeClient(namespaceName).listConfigMapsInSpecificNamespace(namespaceName, clusterName);
for (ConfigMap configMap : configMaps) {
LOGGER.info("Getting labels from {} config map", configMap.getMetadata().getName());
verifyAppLabelsForSecretsAndConfigMaps(configMap.getMetadata().getLabels());
}
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
@ParallelNamespaceTest
void testUOListeningOnlyUsersInSameCluster(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String userName = mapWithTestUsers.get(extensionContext.getDisplayName());
final String firstClusterName = "my-cluster-1";
final String secondClusterName = "my-cluster-2";
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(firstClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(secondClusterName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaUserTemplates.tlsUser(firstClusterName, userName).build());
LOGGER.info("Verifying that user {} in cluster {} is created", userName, firstClusterName);
String entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(firstClusterName)).get(0);
String uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, containsString("User " + userName + " in namespace " + namespaceName + " was ADDED"));
LOGGER.info("Verifying that user {} in cluster {} is not created", userName, secondClusterName);
entityOperatorPodName = kubeClient(namespaceName).listPodNamesInSpecificNamespace(namespaceName, Labels.STRIMZI_NAME_LABEL, KafkaResources.entityOperatorDeploymentName(secondClusterName)).get(0);
uOLogs = kubeClient(namespaceName).logsInSpecificNamespace(namespaceName, entityOperatorPodName, "user-operator");
assertThat(uOLogs, not(containsString("User " + userName + " in namespace " + namespaceName + " was ADDED")));
LOGGER.info("Verifying that user belongs to {} cluster", firstClusterName);
String kafkaUserResource = cmdKubeClient(namespaceName).getResourceAsYaml("kafkauser", userName);
assertThat(kafkaUserResource, containsString(Labels.STRIMZI_CLUSTER_LABEL + ": " + firstClusterName));
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testMessagesAreStoredInDisk(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = KafkaTopicUtils.generateRandomNameOfTopic();
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 1, 1).build());
Map<String, String> kafkaPodsSnapshot = StatefulSetUtils.ssSnapshot(namespaceName, kafkaStatefulSetName(clusterName));
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 1, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
TestUtils.waitFor("KafkaTopic creation inside kafka pod", Constants.GLOBAL_POLL_INTERVAL, Constants.GLOBAL_TIMEOUT,
() -> cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1").out().contains(topicName));
String topicDirNameInPod = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash",
"-c", "cd /var/lib/kafka/data/kafka-log0; ls -1 | sed -n '/" + topicName + "/p'").out();
String commandToGetDataFromTopic =
"cd /var/lib/kafka/data/kafka-log0/" + topicDirNameInPod + "/;cat 00000000000000000000.log";
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
String topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetDataFromTopic).out();
LOGGER.info("Topic {} is present in kafka broker {} with no data", topicName, KafkaResources.kafkaPodName(clusterName, 0));
assertThat("Topic contains data", topicData, emptyOrNullString());
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
List<Pod> kafkaPods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName));
for (Pod kafkaPod : kafkaPods) {
LOGGER.info("Deleting kafka pod {}", kafkaPod.getMetadata().getName());
kubeClient(namespaceName).deletePod(namespaceName, kafkaPod);
}
LOGGER.info("Wait for kafka to rolling restart ...");
StatefulSetUtils.waitTillSsHasRolled(namespaceName, kafkaStatefulSetName(clusterName), 1, kafkaPodsSnapshot);
LOGGER.info("Executing command {} in {}", commandToGetDataFromTopic, KafkaResources.kafkaPodName(clusterName, 0));
topicData = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0), "/bin/bash", "-c",
commandToGetDataFromTopic).out();
assertThat("Topic has no data", topicData, notNullValue());
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
void testConsumerOffsetFiles(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
final Map<String, Object> kafkaConfig = new HashMap<>();
kafkaConfig.put("offsets.topic.replication.factor", "3");
kafkaConfig.put("offsets.topic.num.partitions", "100");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.withConfig(kafkaConfig)
.endKafka()
.endSpec()
.build());
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName, 3, 1).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName =
kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
String commandToGetFiles = "cd /var/lib/kafka/data/kafka-log0/;" +
"ls -1 | sed -n \"s#__consumer_offsets-\\([0-9]*\\)#\\1#p\" | sort -V";
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
String result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
// TODO / FIXME
//assertThat("Folder kafka-log0 has data in files:\n" + result, result.equals(""));
LOGGER.info("Result: \n" + result);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
LOGGER.info("Executing command {} in {}", commandToGetFiles, KafkaResources.kafkaPodName(clusterName, 0));
result = cmdKubeClient(namespaceName).execInPod(KafkaResources.kafkaPodName(clusterName, 0),
"/bin/bash", "-c", commandToGetFiles).out();
StringBuilder stringToMatch = new StringBuilder();
for (int i = 0; i < 100; i++) {
stringToMatch.append(i).append("\n");
}
assertThat("Folder kafka-log0 doesn't contain 100 files", result, containsString(stringToMatch.toString()));
}
@ParallelNamespaceTest
void testLabelsAndAnnotationForPVC(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String labelAnnotationKey = "testKey";
final String firstValue = "testValue";
final String changedValue = "editedTestValue";
Map<String, String> pvcLabel = new HashMap<>();
pvcLabel.put(labelAnnotationKey, firstValue);
Map<String, String> pvcAnnotation = pvcLabel;
Map<String, String> statefulSetLabels = new HashMap<>();
statefulSetLabels.put("app.kubernetes.io/part-of", "some-app");
statefulSetLabels.put("app.kubernetes.io/managed-by", "some-app");
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewStatefulset()
.withNewMetadata()
.withLabels(statefulSetLabels)
.endMetadata()
.endStatefulset()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withStorage(new JbodStorageBuilder().withVolumes(
new PersistentClaimStorageBuilder()
.withDeleteClaim(false)
.withId(0)
.withSize("20Gi")
.build(),
new PersistentClaimStorageBuilder()
.withDeleteClaim(true)
.withId(1)
.withSize("10Gi")
.build())
.build())
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewPersistentVolumeClaim()
.withNewMetadata()
.addToLabels(pvcLabel)
.addToAnnotations(pvcAnnotation)
.endMetadata()
.endPersistentVolumeClaim()
.endTemplate()
.withNewPersistentClaimStorage()
.withDeleteClaim(false)
.withId(0)
.withSize("3Gi")
.endPersistentClaimStorage()
.endZookeeper()
.endSpec()
.build());
LOGGER.info("Check if Kubernetes labels are applied");
Map<String, String> actualStatefulSetLabels = kubeClient(namespaceName).getStatefulSet(namespaceName, KafkaResources.kafkaStatefulSetName(clusterName)).getMetadata().getLabels();
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/part-of"), is("some-app"));
assertThat(actualStatefulSetLabels.get("app.kubernetes.io/managed-by"), is("some-app"));
LOGGER.info("Kubernetes labels are correctly set and present");
List<PersistentVolumeClaim> pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying that PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(firstValue, is(pvc.getMetadata().getLabels().get(labelAnnotationKey)));
assertThat(firstValue, is(pvc.getMetadata().getAnnotations().get(labelAnnotationKey)));
}
pvcLabel.put(labelAnnotationKey, changedValue);
pvcAnnotation.put(labelAnnotationKey, changedValue);
KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> {
LOGGER.info("Replacing kafka && zookeeper labels and annotations from {} to {}", labelAnnotationKey, changedValue);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getKafka().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setLabels(pvcLabel);
kafka.getSpec().getZookeeper().getTemplate().getPersistentVolumeClaim().getMetadata().setAnnotations(pvcAnnotation);
}, namespaceName);
PersistentVolumeClaimUtils.waitUntilPVCLabelsChange(namespaceName, clusterName, pvcLabel, labelAnnotationKey);
PersistentVolumeClaimUtils.waitUntilPVCAnnotationChange(namespaceName, clusterName, pvcAnnotation, labelAnnotationKey);
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
pvcs = kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream().filter(
persistentVolumeClaim -> persistentVolumeClaim.getMetadata().getName().contains(clusterName)).collect(Collectors.toList());
LOGGER.info(pvcs.toString());
assertThat(pvcs.size(), is(7));
for (PersistentVolumeClaim pvc : pvcs) {
LOGGER.info("Verifying replaced PVC label {} - {} = {}", pvc.getMetadata().getName(), firstValue, pvc.getMetadata().getLabels().get(labelAnnotationKey));
assertThat(pvc.getMetadata().getLabels().get(labelAnnotationKey), is(changedValue));
assertThat(pvc.getMetadata().getAnnotations().get(labelAnnotationKey), is(changedValue));
}
}
@ParallelNamespaceTest
void testKafkaOffsetsReplicationFactorHigherThanReplicas(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, false, KafkaTemplates.kafkaEphemeral(clusterName, 3, 1)
.editSpec()
.editKafka()
.addToConfig("offsets.topic.replication.factor", 4)
.addToConfig("transaction.state.log.min.isr", 4)
.addToConfig("transaction.state.log.replication.factor", 4)
.endKafka()
.endSpec().build());
KafkaUtils.waitUntilKafkaStatusConditionContainsMessage(clusterName, namespaceName,
"Kafka configuration option .* should be set to " + 3 + " or less because 'spec.kafka.replicas' is " + 3);
}
@ParallelNamespaceTest
@Tag(INTERNAL_CLIENTS_USED)
@Tag(CRUISE_CONTROL)
void testReadOnlyRootFileSystem(ExtensionContext extensionContext) {
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 3)
.editSpec()
.editKafka()
.withNewTemplate()
.withNewKafkaContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endKafkaContainer()
.endTemplate()
.endKafka()
.editZookeeper()
.withNewTemplate()
.withNewZookeeperContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endZookeeperContainer()
.endTemplate()
.endZookeeper()
.editEntityOperator()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewTopicOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTopicOperatorContainer()
.withNewUserOperatorContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endUserOperatorContainer()
.endTemplate()
.endEntityOperator()
.editOrNewKafkaExporter()
.withNewTemplate()
.withNewContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endContainer()
.endTemplate()
.endKafkaExporter()
.editOrNewCruiseControl()
.withNewTemplate()
.withNewTlsSidecarContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endTlsSidecarContainer()
.withNewCruiseControlContainer()
.withSecurityContext(new SecurityContextBuilder().withReadOnlyRootFilesystem(true).build())
.endCruiseControlContainer()
.endTemplate()
.endCruiseControl()
.endSpec()
.build());
KafkaUtils.waitForKafkaReady(namespaceName, clusterName);
resourceManager.createResource(extensionContext, KafkaTopicTemplates.topic(clusterName, topicName).build());
resourceManager.createResource(extensionContext, KafkaClientsTemplates.kafkaClients(false, clusterName + "-" + Constants.KAFKA_CLIENTS).build());
final String kafkaClientsPodName = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, clusterName + "-" + Constants.KAFKA_CLIENTS).get(0).getMetadata().getName();
InternalKafkaClient internalKafkaClient = new InternalKafkaClient.Builder()
.withUsingPodName(kafkaClientsPodName)
.withTopicName(topicName)
.withNamespaceName(namespaceName)
.withClusterName(clusterName)
.withMessageCount(MESSAGE_COUNT)
.withListenerName(Constants.PLAIN_LISTENER_DEFAULT_NAME)
.build();
LOGGER.info("Checking produced and consumed messages to pod:{}", kafkaClientsPodName);
internalKafkaClient.checkProducedAndConsumedMessages(
internalKafkaClient.sendMessagesPlain(),
internalKafkaClient.receiveMessagesPlain()
);
}
protected void checkKafkaConfiguration(String namespaceName, String podNamePrefix, Map<String, Object> config, String clusterName) {
LOGGER.info("Checking kafka configuration");
List<Pod> pods = kubeClient(namespaceName).listPodsByPrefixInName(namespaceName, podNamePrefix);
Properties properties = configMap2Properties(kubeClient(namespaceName).getConfigMap(namespaceName, clusterName + "-kafka-config"));
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(properties.keySet().contains(key), is(true));
assertThat(properties.getProperty(key), is(val));
}
for (Pod pod: pods) {
ExecResult result = cmdKubeClient(namespaceName).execInPod(pod.getMetadata().getName(), "/bin/bash", "-c", "cat /tmp/strimzi.properties");
Properties execProperties = stringToProperties(result.out());
for (Map.Entry<String, Object> property : config.entrySet()) {
String key = property.getKey();
Object val = property.getValue();
assertThat(execProperties.keySet().contains(key), is(true));
assertThat(execProperties.getProperty(key), is(val));
}
}
}
void checkStorageSizeForVolumes(List<PersistentVolumeClaim> volumes, String[] diskSizes, int kafkaRepl, int diskCount) {
int k = 0;
for (int i = 0; i < kafkaRepl; i++) {
for (int j = 0; j < diskCount; j++) {
LOGGER.info("Checking volume {} and size of storage {}", volumes.get(k).getMetadata().getName(),
volumes.get(k).getSpec().getResources().getRequests().get("storage"));
assertThat(volumes.get(k).getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizes[i])));
k++;
}
}
}
void verifyVolumeNamesAndLabels(String namespaceName, String clusterName, int kafkaReplicas, int diskCountPerReplica, String diskSizeGi) {
ArrayList<String> pvcs = new ArrayList<>();
kubeClient(namespaceName).listPersistentVolumeClaims(namespaceName, clusterName).stream()
.filter(pvc -> pvc.getMetadata().getName().contains(clusterName + "-kafka"))
.forEach(volume -> {
String volumeName = volume.getMetadata().getName();
pvcs.add(volumeName);
LOGGER.info("Checking labels for volume:" + volumeName);
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL), is(clusterName));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_KIND_LABEL), is(Kafka.RESOURCE_KIND));
assertThat(volume.getMetadata().getLabels().get(Labels.STRIMZI_NAME_LABEL), is(clusterName.concat("-kafka")));
assertThat(volume.getSpec().getResources().getRequests().get("storage"), is(new Quantity(diskSizeGi, "Gi")));
});
LOGGER.info("Checking PVC names included in JBOD array");
for (int i = 0; i < kafkaReplicas; i++) {
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(pvcs.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
LOGGER.info("Checking PVC on Kafka pods");
for (int i = 0; i < kafkaReplicas; i++) {
ArrayList<String> dataSourcesOnPod = new ArrayList<>();
ArrayList<String> pvcsOnPod = new ArrayList<>();
LOGGER.info("Getting list of mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
dataSourcesOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getName());
pvcsOnPod.add(kubeClient(namespaceName).getPod(namespaceName, clusterName.concat("-kafka-" + i))
.getSpec().getVolumes().get(j).getPersistentVolumeClaim().getClaimName());
}
LOGGER.info("Verifying mounted data sources and PVCs on Kafka pod " + i);
for (int j = 0; j < diskCountPerReplica; j++) {
assertThat(dataSourcesOnPod.contains("data-" + j), is(true));
assertThat(pvcsOnPod.contains("data-" + j + "-" + clusterName + "-kafka-" + i), is(true));
}
}
}
void verifyPresentLabels(Map<String, String> labels, HasMetadata resources) {
for (Map.Entry<String, String> label : labels.entrySet()) {
assertThat("Label exists with concrete value in HasMetadata(Services, CM, STS) resources",
label.getValue().equals(resources.getMetadata().getLabels().get(label.getKey())));
}
}
void verifyNullLabels(String[] labelKeys, Map<String, String> labels) {
for (String labelKey : labelKeys) {
assertThat(labels.get(labelKey), nullValue());
}
}
void verifyNullLabels(String[] labelKeys, HasMetadata resources) {
for (String labelKey : labelKeys) {
assertThat(resources.getMetadata().getLabels().get(labelKey), nullValue());
}
}
void verifyAppLabels(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
assertThat("Label " + Labels.STRIMZI_NAME_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_NAME_LABEL));
}
void verifyAppLabelsForSecretsAndConfigMaps(Map<String, String> labels) {
LOGGER.info("Verifying labels {}", labels);
assertThat("Label " + Labels.STRIMZI_CLUSTER_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_CLUSTER_LABEL));
assertThat("Label " + Labels.STRIMZI_KIND_LABEL + " is not present", labels.containsKey(Labels.STRIMZI_KIND_LABEL));
}
@BeforeAll
void setup(ExtensionContext extensionContext) {
install = new SetupClusterOperator.SetupClusterOperatorBuilder()
.withExtensionContext(extensionContext)
.withNamespace(NAMESPACE)
.withWatchingNamespaces(Constants.WATCH_ALL_NAMESPACES)
.createInstallation()
.runInstallation();
}
protected void afterEachMayOverride(ExtensionContext extensionContext) throws Exception {
resourceManager.deleteResources(extensionContext);
final String namespaceName = StUtils.getNamespaceBasedOnRbac(NAMESPACE, extensionContext);
if (cluster.getListOfDeployedResources().contains(TEMPLATE_PATH)) {
cluster.deleteCustomResources(extensionContext, TEMPLATE_PATH);
}
if (KafkaResource.kafkaClient().inNamespace(namespaceName).withName(OPENSHIFT_CLUSTER_NAME).get() != null) {
cmdKubeClient(namespaceName).deleteByName(Kafka.RESOURCE_KIND, OPENSHIFT_CLUSTER_NAME);
}
kubeClient(namespaceName).listPods(namespaceName).stream()
.filter(p -> p.getMetadata().getName().startsWith(OPENSHIFT_CLUSTER_NAME))
.forEach(p -> PodUtils.deletePodWithWait(p.getMetadata().getName()));
kubeClient(namespaceName).getClient().customResources(CustomResourceDefinitionContext.fromCrd(Crds.kafkaTopic()), KafkaTopic.class, KafkaTopicList.class).inNamespace(namespaceName).delete();
kubeClient(namespaceName).getClient().persistentVolumeClaims().inNamespace(namespaceName).delete();
}
}<|fim▁end|>
|
.endTemplate()
|
<|file_name|>gwcred.py<|end_file_name|><|fim▁begin|>###
# Copyright (c) 2005, Ali Afshar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from twisted.cred import portal, checkers, credentials, error
from twisted.conch.checkers import SSHPublicKeyDatabase
from twisted.conch.credentials import ISSHPrivateKey
from twisted.python import failure
from twisted.internet import defer
from twisted.conch.ssh import keys
class SBCredChecker(object):
""" SSH Username and Password Credential checker """
# this implements line tells the portal that we can handle un/pw
__implements__ = (checkers.ICredentialsChecker,)
credentialInterfaces = (credentials.IUsernamePassword,)
def requestAvatarId(self, credentials):
self.cb.log.debug('twisted checker checking %s',
credentials.username)<|fim▁hole|> password=credentials.password,
peer=credentials.peer)
if a:
return a
else:
return failure.Failure(error.UnauthorizedLogin())
class SBPublicKeyChecker(object):
""" Public key checker """
__implements__ = (checkers.ICredentialsChecker,)
credentialInterfaces = (ISSHPrivateKey,)
def requestAvatarId(self, credentials):
a = self.cb.getUser(protocol=self.cb.PROTOCOL,
username=credentials.username,
blob=credentials.blob,
peer=credentials.peer)
#except:
# pass
if a:
return a
else:
return failure.Failure(error.UnauthorizedLogin())
#class SBPublicKeyChecker(SSHPublicKeyDatabase):
# credentialInterfaces = ISSHPrivateKey,
# __implements__ = ICredentialsChecker
#
# def requestAvatarId(self, credentials):
# if not self.checkKey(credentials):
# return defer.fail(UnauthorizedLogin())
# if not credentials.signature:
# return defer.fail(error.ValidPublicKey())
# else:
# try:
# pubKey = keys.getPublicKeyObject(data = credentials.blob)
# if keys.verifySignature(pubKey, credentials.signature,
# credentials.sigData):
# return defer.succeed(credentials.username)
# except:
# pass
# return defer.fail(UnauthorizedLogin())
#
# def checkKey(self, credentials):
# sshDir = os.path.expanduser('~%s/.ssh/' % credentials.username)
# if sshDir.startswith('~'): # didn't expand
# return 0
# uid, gid = os.geteuid(), os.getegid()
# ouid, ogid = pwd.getpwnam(credentials.username)[2:4]
# os.setegid(0)
# os.seteuid(0)
# os.setegid(ogid)
# os.seteuid(ouid)
# for name in ['authorized_keys2', 'authorized_keys']:
# if not os.path.exists(sshDir+name):
# continue
# lines = open(sshDir+name).xreadlines()
# os.setegid(0)
# os.seteuid(0)
# os.setegid(gid)
# os.seteuid(uid)
# for l in lines:
# l2 = l.split()
# if len(l2) < 2:
# continue
# try:
# if base64.decodestring(l2[1]) == credentials.blob:
# return 1
# except binascii.Error:
# continue
# return 0
class SBPortal(portal.Portal):
pass
class SBRealm:
__implements__ = portal.IRealm
def __init__(self, userclass):
self.userclass = userclass
def requestAvatar(self, avatarId, mind, *interfaces):
self.cb.cb.log.critical('%s', interfaces)
av = self.userclass(avatarId)
av.cb = self.cb
return interfaces[0], av, lambda: None
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:<|fim▁end|>
|
""" Return an avatar id or return an error """
a = self.cb.getUser(protocol=self.cb.PROTOCOL,
username=credentials.username,
|
<|file_name|>drive_api_util.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/drive/drive_api_util.h"
#include <string>
#include "base/files/file.h"
#include "base/logging.h"
#include "base/md5.h"
#include "base/strings/string16.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/values.h"
#include "content/public/browser/browser_thread.h"
#include "google_apis/drive/drive_api_parser.h"
#include "google_apis/drive/gdata_wapi_parser.h"
#include "net/base/escape.h"
#include "third_party/re2/re2/re2.h"
#include "url/gurl.h"
namespace drive {
namespace util {
namespace {
// Google Apps MIME types:
const char kGoogleDocumentMimeType[] = "application/vnd.google-apps.document";
const char kGoogleDrawingMimeType[] = "application/vnd.google-apps.drawing";
const char kGooglePresentationMimeType[] =
"application/vnd.google-apps.presentation";
const char kGoogleSpreadsheetMimeType[] =
"application/vnd.google-apps.spreadsheet";
const char kGoogleTableMimeType[] = "application/vnd.google-apps.table";
const char kGoogleFormMimeType[] = "application/vnd.google-apps.form";
const char kDriveFolderMimeType[] = "application/vnd.google-apps.folder";
std::string GetMimeTypeFromEntryKind(google_apis::DriveEntryKind kind) {
switch (kind) {
case google_apis::ENTRY_KIND_DOCUMENT:
return kGoogleDocumentMimeType;
case google_apis::ENTRY_KIND_SPREADSHEET:
return kGoogleSpreadsheetMimeType;
case google_apis::ENTRY_KIND_PRESENTATION:
return kGooglePresentationMimeType;
case google_apis::ENTRY_KIND_DRAWING:
return kGoogleDrawingMimeType;
case google_apis::ENTRY_KIND_TABLE:
return kGoogleTableMimeType;
case google_apis::ENTRY_KIND_FORM:
return kGoogleFormMimeType;
default:
return std::string();
}
}
ScopedVector<std::string> CopyScopedVectorString(
const ScopedVector<std::string>& source) {
ScopedVector<std::string> result;
result.reserve(source.size());
for (size_t i = 0; i < source.size(); ++i)
result.push_back(new std::string(*source[i]));
return result.Pass();
}
// Converts AppIcon (of GData WAPI) to DriveAppIcon.
scoped_ptr<google_apis::DriveAppIcon>
ConvertAppIconToDriveAppIcon(const google_apis::AppIcon& app_icon) {
scoped_ptr<google_apis::DriveAppIcon> resource(
new google_apis::DriveAppIcon);
switch (app_icon.category()) {
case google_apis::AppIcon::ICON_UNKNOWN:
resource->set_category(google_apis::DriveAppIcon::UNKNOWN);
break;
case google_apis::AppIcon::ICON_DOCUMENT:
resource->set_category(google_apis::DriveAppIcon::DOCUMENT);
break;
case google_apis::AppIcon::ICON_APPLICATION:
resource->set_category(google_apis::DriveAppIcon::APPLICATION);
break;
case google_apis::AppIcon::ICON_SHARED_DOCUMENT:
resource->set_category(google_apis::DriveAppIcon::SHARED_DOCUMENT);
break;
default:
NOTREACHED();
}
resource->set_icon_side_length(app_icon.icon_side_length());
resource->set_icon_url(app_icon.GetIconURL());
return resource.Pass();
}
// Converts InstalledApp to AppResource.
scoped_ptr<google_apis::AppResource>
ConvertInstalledAppToAppResource(
const google_apis::InstalledApp& installed_app) {
scoped_ptr<google_apis::AppResource> resource(new google_apis::AppResource);
resource->set_application_id(installed_app.app_id());
resource->set_name(installed_app.app_name());
resource->set_object_type(installed_app.object_type());
resource->set_supports_create(installed_app.supports_create());
{
ScopedVector<std::string> primary_mimetypes(
CopyScopedVectorString(installed_app.primary_mimetypes()));
resource->set_primary_mimetypes(primary_mimetypes.Pass());
}
{
ScopedVector<std::string> secondary_mimetypes(
CopyScopedVectorString(installed_app.secondary_mimetypes()));
resource->set_secondary_mimetypes(secondary_mimetypes.Pass());
}
{
ScopedVector<std::string> primary_file_extensions(
CopyScopedVectorString(installed_app.primary_extensions()));
resource->set_primary_file_extensions(primary_file_extensions.Pass());
}
{
ScopedVector<std::string> secondary_file_extensions(
CopyScopedVectorString(installed_app.secondary_extensions()));
resource->set_secondary_file_extensions(secondary_file_extensions.Pass());
}
{
const ScopedVector<google_apis::AppIcon>& app_icons =
installed_app.app_icons();
ScopedVector<google_apis::DriveAppIcon> icons;
icons.reserve(app_icons.size());
for (size_t i = 0; i < app_icons.size(); ++i) {
icons.push_back(ConvertAppIconToDriveAppIcon(*app_icons[i]).release());
}
resource->set_icons(icons.Pass());
}
// supports_import, installed and authorized are not supported in
// InstalledApp.
return resource.Pass();
}
// Returns the argument string.
std::string Identity(const std::string& resource_id) { return resource_id; }
} // namespace
std::string EscapeQueryStringValue(const std::string& str) {
std::string result;
result.reserve(str.size());
for (size_t i = 0; i < str.size(); ++i) {
if (str[i] == '\\' || str[i] == '\'') {
result.push_back('\\');
}
result.push_back(str[i]);
}
return result;
}
std::string TranslateQuery(const std::string& original_query) {
// In order to handle non-ascii white spaces correctly, convert to UTF16.
base::string16 query = base::UTF8ToUTF16(original_query);
const base::string16 kDelimiter(
base::kWhitespaceUTF16 +
base::string16(1, static_cast<base::char16>('"')));
std::string result;
for (size_t index = query.find_first_not_of(base::kWhitespaceUTF16);
index != base::string16::npos;
index = query.find_first_not_of(base::kWhitespaceUTF16, index)) {
bool is_exclusion = (query[index] == '-');
if (is_exclusion)
++index;
if (index == query.length()) {
// Here, the token is '-' and it should be ignored.
continue;
}
size_t begin_token = index;
base::string16 token;
if (query[begin_token] == '"') {
// Quoted query.
++begin_token;
size_t end_token = query.find('"', begin_token);
if (end_token == base::string16::npos) {
// This is kind of syntax error, since quoted string isn't finished.
// However, the query is built by user manually, so here we treat
// whole remaining string as a token as a fallback, by appending
// a missing double-quote character.
end_token = query.length();
query.push_back('"');
}
token = query.substr(begin_token, end_token - begin_token);
index = end_token + 1; // Consume last '"', too.
} else {
size_t end_token = query.find_first_of(kDelimiter, begin_token);
if (end_token == base::string16::npos) {
end_token = query.length();
}
token = query.substr(begin_token, end_token - begin_token);
index = end_token;
}
if (token.empty()) {
// Just ignore an empty token.
continue;
}
if (!result.empty()) {
// If there are two or more tokens, need to connect with "and".
result.append(" and ");
}
// The meaning of "fullText" should include title, description and content.
base::StringAppendF(
&result,
"%sfullText contains \'%s\'",
is_exclusion ? "not " : "",
EscapeQueryStringValue(base::UTF16ToUTF8(token)).c_str());
}
return result;
}
std::string ExtractResourceIdFromUrl(const GURL& url) {
return net::UnescapeURLComponent(url.ExtractFileName(),
net::UnescapeRule::URL_SPECIAL_CHARS);
}
std::string CanonicalizeResourceId(const std::string& resource_id) {
// If resource ID is in the old WAPI format starting with a prefix like
// "document:", strip it and return the remaining part.
std::string stripped_resource_id;
if (RE2::FullMatch(resource_id, "^[a-z-]+(?::|%3A)([\\w-]+)$",
&stripped_resource_id))
return stripped_resource_id;
return resource_id;
}
ResourceIdCanonicalizer GetIdentityResourceIdCanonicalizer() {
return base::Bind(&Identity);
}
const char kDocsListScope[] = "https://docs.google.com/feeds/";
const char kDriveAppsScope[] = "https://www.googleapis.com/auth/drive.apps";
void ParseShareUrlAndRun(const google_apis::GetShareUrlCallback& callback,
google_apis::GDataErrorCode error,
scoped_ptr<base::Value> value) {
DCHECK(content::BrowserThread::CurrentlyOn(content::BrowserThread::UI));
if (!value) {
callback.Run(error, GURL());
return;
}
// Parsing ResourceEntry is cheap enough to do on UI thread.
scoped_ptr<google_apis::ResourceEntry> entry =
google_apis::ResourceEntry::ExtractAndParse(*value);
if (!entry) {
callback.Run(google_apis::GDATA_PARSE_ERROR, GURL());
return;
}
const google_apis::Link* share_link =
entry->GetLinkByType(google_apis::Link::LINK_SHARE);
callback.Run(error, share_link ? share_link->href() : GURL());
}
scoped_ptr<google_apis::AboutResource>
ConvertAccountMetadataToAboutResource(
const google_apis::AccountMetadata& account_metadata,
const std::string& root_resource_id) {
scoped_ptr<google_apis::AboutResource> resource(
new google_apis::AboutResource);
resource->set_largest_change_id(account_metadata.largest_changestamp());
resource->set_quota_bytes_total(account_metadata.quota_bytes_total());
resource->set_quota_bytes_used(account_metadata.quota_bytes_used());
resource->set_root_folder_id(root_resource_id);
return resource.Pass();
}
scoped_ptr<google_apis::AppList>
ConvertAccountMetadataToAppList(
const google_apis::AccountMetadata& account_metadata) {<|fim▁hole|> const ScopedVector<google_apis::InstalledApp>& installed_apps =
account_metadata.installed_apps();
ScopedVector<google_apis::AppResource> app_resources;
app_resources.reserve(installed_apps.size());
for (size_t i = 0; i < installed_apps.size(); ++i) {
app_resources.push_back(
ConvertInstalledAppToAppResource(*installed_apps[i]).release());
}
resource->set_items(app_resources.Pass());
// etag is not supported in AccountMetadata.
return resource.Pass();
}
scoped_ptr<google_apis::FileResource> ConvertResourceEntryToFileResource(
const google_apis::ResourceEntry& entry) {
scoped_ptr<google_apis::FileResource> file(new google_apis::FileResource);
file->set_file_id(entry.resource_id());
file->set_title(entry.title());
file->set_created_date(entry.published_time());
if (std::find(entry.labels().begin(), entry.labels().end(),
"shared-with-me") != entry.labels().end()) {
// Set current time to mark the file is shared_with_me, since ResourceEntry
// doesn't have |shared_with_me_date| equivalent.
file->set_shared_with_me_date(base::Time::Now());
}
file->set_shared(std::find(entry.labels().begin(), entry.labels().end(),
"shared") != entry.labels().end());
if (entry.is_folder()) {
file->set_mime_type(kDriveFolderMimeType);
} else {
std::string mime_type = GetMimeTypeFromEntryKind(entry.kind());
if (mime_type.empty())
mime_type = entry.content_mime_type();
file->set_mime_type(mime_type);
}
file->set_md5_checksum(entry.file_md5());
file->set_file_size(entry.file_size());
file->mutable_labels()->set_trashed(entry.deleted());
file->set_etag(entry.etag());
google_apis::ImageMediaMetadata* image_media_metadata =
file->mutable_image_media_metadata();
image_media_metadata->set_width(entry.image_width());
image_media_metadata->set_height(entry.image_height());
image_media_metadata->set_rotation(entry.image_rotation());
std::vector<google_apis::ParentReference>* parents = file->mutable_parents();
for (size_t i = 0; i < entry.links().size(); ++i) {
using google_apis::Link;
const Link& link = *entry.links()[i];
switch (link.type()) {
case Link::LINK_PARENT: {
google_apis::ParentReference parent;
parent.set_parent_link(link.href());
std::string file_id =
drive::util::ExtractResourceIdFromUrl(link.href());
parent.set_file_id(file_id);
parent.set_is_root(file_id == kWapiRootDirectoryResourceId);
parents->push_back(parent);
break;
}
case Link::LINK_ALTERNATE:
file->set_alternate_link(link.href());
break;
default:
break;
}
}
file->set_modified_date(entry.updated_time());
file->set_last_viewed_by_me_date(entry.last_viewed_time());
return file.Pass();
}
google_apis::DriveEntryKind GetKind(
const google_apis::FileResource& file_resource) {
if (file_resource.IsDirectory())
return google_apis::ENTRY_KIND_FOLDER;
const std::string& mime_type = file_resource.mime_type();
if (mime_type == kGoogleDocumentMimeType)
return google_apis::ENTRY_KIND_DOCUMENT;
if (mime_type == kGoogleSpreadsheetMimeType)
return google_apis::ENTRY_KIND_SPREADSHEET;
if (mime_type == kGooglePresentationMimeType)
return google_apis::ENTRY_KIND_PRESENTATION;
if (mime_type == kGoogleDrawingMimeType)
return google_apis::ENTRY_KIND_DRAWING;
if (mime_type == kGoogleTableMimeType)
return google_apis::ENTRY_KIND_TABLE;
if (mime_type == kGoogleFormMimeType)
return google_apis::ENTRY_KIND_FORM;
if (mime_type == "application/pdf")
return google_apis::ENTRY_KIND_PDF;
return google_apis::ENTRY_KIND_FILE;
}
scoped_ptr<google_apis::ResourceEntry>
ConvertFileResourceToResourceEntry(
const google_apis::FileResource& file_resource) {
scoped_ptr<google_apis::ResourceEntry> entry(new google_apis::ResourceEntry);
// ResourceEntry
entry->set_resource_id(file_resource.file_id());
entry->set_id(file_resource.file_id());
entry->set_kind(GetKind(file_resource));
entry->set_title(file_resource.title());
entry->set_published_time(file_resource.created_date());
std::vector<std::string> labels;
if (!file_resource.shared_with_me_date().is_null())
labels.push_back("shared-with-me");
if (file_resource.shared())
labels.push_back("shared");
entry->set_labels(labels);
// This should be the url to download the file_resource.
{
google_apis::Content content;
content.set_mime_type(file_resource.mime_type());
entry->set_content(content);
}
// TODO(kochi): entry->resource_links_
// For file entries
entry->set_filename(file_resource.title());
entry->set_suggested_filename(file_resource.title());
entry->set_file_md5(file_resource.md5_checksum());
entry->set_file_size(file_resource.file_size());
// If file is removed completely, that information is only available in
// ChangeResource, and is reflected in |removed_|. If file is trashed, the
// file entry still exists but with its "trashed" label true.
entry->set_deleted(file_resource.labels().is_trashed());
// ImageMediaMetadata
entry->set_image_width(file_resource.image_media_metadata().width());
entry->set_image_height(file_resource.image_media_metadata().height());
entry->set_image_rotation(file_resource.image_media_metadata().rotation());
// CommonMetadata
entry->set_etag(file_resource.etag());
// entry->authors_
// entry->links_.
ScopedVector<google_apis::Link> links;
for (size_t i = 0; i < file_resource.parents().size(); ++i) {
google_apis::Link* link = new google_apis::Link;
link->set_type(google_apis::Link::LINK_PARENT);
link->set_href(file_resource.parents()[i].parent_link());
links.push_back(link);
}
if (!file_resource.alternate_link().is_empty()) {
google_apis::Link* link = new google_apis::Link;
link->set_type(google_apis::Link::LINK_ALTERNATE);
link->set_href(file_resource.alternate_link());
links.push_back(link);
}
entry->set_links(links.Pass());
// entry->categories_
entry->set_updated_time(file_resource.modified_date());
entry->set_last_viewed_time(file_resource.last_viewed_by_me_date());
entry->FillRemainingFields();
return entry.Pass();
}
scoped_ptr<google_apis::ResourceEntry>
ConvertChangeResourceToResourceEntry(
const google_apis::ChangeResource& change_resource) {
scoped_ptr<google_apis::ResourceEntry> entry;
if (change_resource.file())
entry = ConvertFileResourceToResourceEntry(*change_resource.file()).Pass();
else
entry.reset(new google_apis::ResourceEntry);
entry->set_resource_id(change_resource.file_id());
// If |is_deleted()| returns true, the file is removed from Drive.
entry->set_removed(change_resource.is_deleted());
entry->set_changestamp(change_resource.change_id());
entry->set_modification_date(change_resource.modification_date());
return entry.Pass();
}
scoped_ptr<google_apis::ResourceList>
ConvertFileListToResourceList(const google_apis::FileList& file_list) {
scoped_ptr<google_apis::ResourceList> feed(new google_apis::ResourceList);
const ScopedVector<google_apis::FileResource>& items = file_list.items();
ScopedVector<google_apis::ResourceEntry> entries;
for (size_t i = 0; i < items.size(); ++i)
entries.push_back(ConvertFileResourceToResourceEntry(*items[i]).release());
feed->set_entries(entries.Pass());
ScopedVector<google_apis::Link> links;
if (!file_list.next_link().is_empty()) {
google_apis::Link* link = new google_apis::Link;
link->set_type(google_apis::Link::LINK_NEXT);
link->set_href(file_list.next_link());
links.push_back(link);
}
feed->set_links(links.Pass());
return feed.Pass();
}
scoped_ptr<google_apis::ResourceList>
ConvertChangeListToResourceList(const google_apis::ChangeList& change_list) {
scoped_ptr<google_apis::ResourceList> feed(new google_apis::ResourceList);
const ScopedVector<google_apis::ChangeResource>& items = change_list.items();
ScopedVector<google_apis::ResourceEntry> entries;
for (size_t i = 0; i < items.size(); ++i) {
entries.push_back(
ConvertChangeResourceToResourceEntry(*items[i]).release());
}
feed->set_entries(entries.Pass());
feed->set_largest_changestamp(change_list.largest_change_id());
ScopedVector<google_apis::Link> links;
if (!change_list.next_link().is_empty()) {
google_apis::Link* link = new google_apis::Link;
link->set_type(google_apis::Link::LINK_NEXT);
link->set_href(change_list.next_link());
links.push_back(link);
}
feed->set_links(links.Pass());
return feed.Pass();
}
std::string GetMd5Digest(const base::FilePath& file_path) {
const int kBufferSize = 512 * 1024; // 512kB.
base::File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
if (!file.IsValid())
return std::string();
base::MD5Context context;
base::MD5Init(&context);
int64 offset = 0;
scoped_ptr<char[]> buffer(new char[kBufferSize]);
while (true) {
int result = file.Read(offset, buffer.get(), kBufferSize);
if (result < 0) {
// Found an error.
return std::string();
}
if (result == 0) {
// End of file.
break;
}
offset += result;
base::MD5Update(&context, base::StringPiece(buffer.get(), result));
}
base::MD5Digest digest;
base::MD5Final(&digest, &context);
return MD5DigestToBase16(digest);
}
const char kWapiRootDirectoryResourceId[] = "folder:root";
} // namespace util
} // namespace drive<|fim▁end|>
|
scoped_ptr<google_apis::AppList> resource(new google_apis::AppList);
|
<|file_name|>multi_product.rs<|end_file_name|><|fim▁begin|>#![cfg(feature = "use_std")]
use crate::size_hint;
use crate::Itertools;
#[derive(Clone)]
/// An iterator adaptor that iterates over the cartesian product of
/// multiple iterators of type `I`.
///
/// An iterator element type is `Vec<I>`.
///
/// See [`.multi_cartesian_product()`](../trait.Itertools.html#method.multi_cartesian_product)
/// for more information.
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
pub struct MultiProduct<I>(Vec<MultiProductIter<I>>)
where I: Iterator + Clone,
I::Item: Clone;
/// Create a new cartesian product iterator over an arbitrary number
/// of iterators of the same type.
///
/// Iterator element is of type `Vec<H::Item::Item>`.
pub fn multi_cartesian_product<H>(iters: H) -> MultiProduct<<H::Item as IntoIterator>::IntoIter>
where H: Iterator,
H::Item: IntoIterator,
<H::Item as IntoIterator>::IntoIter: Clone,
<H::Item as IntoIterator>::Item: Clone
{
MultiProduct(iters.map(|i| MultiProductIter::new(i.into_iter())).collect())
}
#[derive(Clone, Debug)]
/// Holds the state of a single iterator within a MultiProduct.
struct MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
cur: Option<I::Item>,
iter: I,
iter_orig: I,
}
/// Holds the current state during an iteration of a MultiProduct.
#[derive(Debug)]
enum MultiProductIterState {
StartOfIter,
MidIter { on_first_iter: bool },
}
impl<I> MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
/// Iterates the rightmost iterator, then recursively iterates iterators
/// to the left if necessary.
///
/// Returns true if the iteration succeeded, else false.
fn iterate_last(
multi_iters: &mut [MultiProductIter<I>],
mut state: MultiProductIterState
) -> bool {
use self::MultiProductIterState::*;
if let Some((last, rest)) = multi_iters.split_last_mut() {
let on_first_iter = match state {
StartOfIter => {
let on_first_iter = !last.in_progress();
state = MidIter { on_first_iter };
on_first_iter
},
MidIter { on_first_iter } => on_first_iter
};
if !on_first_iter {
last.iterate();
}
if last.in_progress() {
true
} else if MultiProduct::iterate_last(rest, state) {
last.reset();
last.iterate();
// If iterator is None twice consecutively, then iterator is
// empty; whole product is empty.
last.in_progress()
} else {
false
}
} else {
// Reached end of iterator list. On initialisation, return true.
// At end of iteration (final iterator finishes), finish.
match state {
StartOfIter => false,
MidIter { on_first_iter } => on_first_iter
}
}
}
/// Returns the unwrapped value of the next iteration.
fn curr_iterator(&self) -> Vec<I::Item> {
self.0.iter().map(|multi_iter| {
multi_iter.cur.clone().unwrap()
}).collect()
}
/// Returns true if iteration has started and has not yet finished; false
/// otherwise.
fn in_progress(&self) -> bool {
if let Some(last) = self.0.last() {
last.in_progress()
} else {
false
}
}
}
impl<I> MultiProductIter<I>
where I: Iterator + Clone,
I::Item: Clone
{
fn new(iter: I) -> Self {
MultiProductIter {
cur: None,
iter: iter.clone(),
iter_orig: iter
}
}
/// Iterate the managed iterator.
fn iterate(&mut self) {
self.cur = self.iter.next();
}
/// Reset the managed iterator.
fn reset(&mut self) {
self.iter = self.iter_orig.clone();
}
/// Returns true if the current iterator has been started and has not yet<|fim▁hole|> self.cur.is_some()
}
}
impl<I> Iterator for MultiProduct<I>
where I: Iterator + Clone,
I::Item: Clone
{
type Item = Vec<I::Item>;
fn next(&mut self) -> Option<Self::Item> {
if MultiProduct::iterate_last(
&mut self.0,
MultiProductIterState::StartOfIter
) {
Some(self.curr_iterator())
} else {
None
}
}
fn count(self) -> usize {
if self.0.len() == 0 {
return 0;
}
if !self.in_progress() {
return self.0.into_iter().fold(1, |acc, multi_iter| {
acc * multi_iter.iter.count()
});
}
self.0.into_iter().fold(
0,
|acc, MultiProductIter { iter, iter_orig, cur: _ }| {
let total_count = iter_orig.count();
let cur_count = iter.count();
acc * total_count + cur_count
}
)
}
fn size_hint(&self) -> (usize, Option<usize>) {
// Not ExactSizeIterator because size may be larger than usize
if self.0.len() == 0 {
return (0, Some(0));
}
if !self.in_progress() {
return self.0.iter().fold((1, Some(1)), |acc, multi_iter| {
size_hint::mul(acc, multi_iter.iter.size_hint())
});
}
self.0.iter().fold(
(0, Some(0)),
|acc, &MultiProductIter { ref iter, ref iter_orig, cur: _ }| {
let cur_size = iter.size_hint();
let total_size = iter_orig.size_hint();
size_hint::add(size_hint::mul(acc, total_size), cur_size)
}
)
}
fn last(self) -> Option<Self::Item> {
let iter_count = self.0.len();
let lasts: Self::Item = self.0.into_iter()
.map(|multi_iter| multi_iter.iter.last())
.while_some()
.collect();
if lasts.len() == iter_count {
Some(lasts)
} else {
None
}
}
}<|fim▁end|>
|
/// finished; false otherwise.
fn in_progress(&self) -> bool {
|
<|file_name|>gamification.js<|end_file_name|><|fim▁begin|>openerp.gamification = function(instance) {
var QWeb = instance.web.qweb;
instance.gamification.Sidebar = instance.web.Widget.extend({
template: 'gamification.UserWallSidebar',
init: function (parent, action) {
var self = this;
this._super(parent, action);
this.deferred = $.Deferred();
this.goals_info = {};
this.challenge_suggestions = {};
$(document).off('keydown.klistener');
},
events: {
// update a challenge and related goals
'click a.oe_update_challenge': function(event) {
var self = this;
var challenge_id = parseInt(event.currentTarget.id, 10);
var goals_updated = new instance.web.Model('gamification.challenge').call('quick_update', [challenge_id]);
$.when(goals_updated).done(function() {
self.get_goal_todo_info();
});
},
// action to modify a goal
'click a.oe_goal_action': function(event) {
var self = this;
var goal_id = parseInt(event.currentTarget.id, 10);
var goal_action = new instance.web.Model('gamification.goal').call('get_action', [goal_id]).then(function(res) {
goal_action['action'] = res;
});
$.when(goal_action).done(function() {
var action = self.do_action(goal_action.action);
$.when(action).done(function () {
new instance.web.Model('gamification.goal').call('update', [[goal_id]]).then(function(res) {
self.get_goal_todo_info();
});
});
});
},
// get more info about a challenge request
'click a.oe_challenge_reply': function(event) {
var self = this;
var challenge_id = parseInt(event.currentTarget.id, 10);
var challenge_action = new instance.web.Model('gamification.challenge').call('reply_challenge_wizard', [challenge_id]).then(function(res) {
challenge_action['action'] = res;
});
$.when(challenge_action).done(function() {
self.do_action(challenge_action.action).done(function () {
self.get_goal_todo_info();
});
});
}
},
start: function() {
var self = this;
this._super.apply(this, arguments);
self.get_goal_todo_info();
self.get_challenge_suggestions();
},
get_goal_todo_info: function() {
var self = this;
var challenges = new instance.web.Model('res.users').call('get_serialised_gamification_summary', []).then(function(result) {
if (result.length === 0) {
self.$el.find(".oe_gamification_challenge_list").hide();
} else {
self.$el.find(".oe_gamification_challenge_list").empty();
_.each(result, function(item){
var $item = $(QWeb.render("gamification.ChallengeSummary", {challenge: item}));
self.render_money_fields($item);
self.render_user_avatars($item);
self.$el.find('.oe_gamification_challenge_list').append($item);
});
}
});
},
get_challenge_suggestions: function() {
var self = this;
var challenge_suggestions = new instance.web.Model('res.users').call('get_challenge_suggestions', []).then(function(result) {
if (result.length === 0) {
self.$el.find(".oe_gamification_suggestion").hide();
} else {
var $item = $(QWeb.render("gamification.ChallengeSuggestion", {challenges: result}));
self.$el.find('.oe_gamification_suggestion').append($item);
}
});
},
render_money_fields: function(item) {
var self = this;
self.dfm = new instance.web.form.DefaultFieldManager(self);
// Generate a FieldMonetary for each .oe_goal_field_monetary
item.find(".oe_goal_field_monetary").each(function() {
var currency_id = parseInt( $(this).attr('data-id'), 10);
money_field = new instance.web.form.FieldMonetary(self.dfm, {<|fim▁hole|> attrs: {
modifiers: '{"readonly": true}'
}
});
money_field.set('currency', currency_id);
money_field.get_currency_info();
money_field.set('value', parseInt($(this).text(), 10));
money_field.replace($(this));
});
},
render_user_avatars: function(item) {
var self = this;
item.find(".oe_user_avatar").each(function() {
var user_id = parseInt( $(this).attr('data-id'), 10);
var url = instance.session.url('/web/binary/image', {model: 'res.users', field: 'image_small', id: user_id});
$(this).attr("src", url);
});
}
});
instance.web.WebClient.include({
to_kitten: function() {
this._super();
new instance.web.Model('gamification.badge').call('check_progress', []);
}
});
instance.mail.Wall.include({
start: function() {
this._super();
var sidebar = new instance.gamification.Sidebar(this);
sidebar.appendTo($('.oe_mail_wall_aside'));
},
});
};<|fim▁end|>
| |
<|file_name|>core.py<|end_file_name|><|fim▁begin|># Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import venusian
from BTrees.OOBTree import OOBTree
from persistent.list import PersistentList
from persistent.dict import PersistentDict
from webob.multidict import MultiDict
from zope.interface import implementer
from pyramid.threadlocal import get_current_request
from substanced.util import get_oid
from substanced.util import renamer
from substanced.content import content
from dace.objectofcollaboration.principal.role import DACE_ROLES
from dace.objectofcollaboration.principal.util import get_access_keys
from dace.objectofcollaboration.entity import Entity
from dace.descriptors import (
SharedUniqueProperty,
CompositeUniqueProperty,
SharedMultipleProperty,
CompositeMultipleProperty)
from dace.util import getSite, get_obj, find_catalog
from pontus.schema import Schema
from pontus.core import VisualisableElement
from pontus.widget import (
Select2Widget)
from novaideo import _, ACCESS_ACTIONS
from novaideo.content.interface import (
IVersionableEntity,
IDuplicableEntity,
ISearchableEntity,
ICommentable,
IPrivateChannel,
IChannel,
ICorrelableEntity,
IPresentableEntity,
INode,
IEmojiable,
IPerson,
ISignalableEntity,
ISustainable,
IDebatable,
ITokenable)
BATCH_DEFAULT_SIZE = 8
SEARCHABLE_CONTENTS = {}
SUSTAINABLE_CONTENTS = {}
NOVAIDO_ACCES_ACTIONS = {}
ADVERTISING_CONTAINERS = {}
<|fim▁hole|>ON_LOAD_VIEWS = {}
class AnonymisationKinds(object):
anonymity = 'anonymity'
pseudonymity = 'pseudonymity'
@classmethod
def get_items(cls):
return {
cls.anonymity: _('Anonymity'),
cls.pseudonymity: _('Pseudonymity')
}
@classmethod
def get_title(cls, item):
items = cls.get_items()
return items.get(item, None)
class Evaluations():
support = 'support'
oppose = 'oppose'
def get_searchable_content(request=None):
if request is None:
request = get_current_request()
return getattr(request, 'searchable_contents', {})
class advertising_banner_config(object):
""" A function, class or method decorator which allows a
developer to create advertising banner registrations.
Advertising banner is a panel. See pyramid_layout.panel_config.
"""
def __init__(self, name='', context=None, renderer=None, attr=None):
self.name = name
self.context = context
self.renderer = renderer
self.attr = attr
def __call__(self, wrapped):
settings = self.__dict__.copy()
def callback(context, name, ob):
config = context.config.with_package(info.module)
config.add_panel(panel=ob, **settings)
ADVERTISING_CONTAINERS[self.name] = {'title': ob.title,
'description': ob.description,
'order': ob.order,
'validator': ob.validator,
'tags': ob.tags
#TODO add validator ob.validator
}
info = venusian.attach(wrapped, callback, category='pyramid_layout')
if info.scope == 'class':
# if the decorator was attached to a method in a class, or
# otherwise executed at class scope, we need to set an
# 'attr' into the settings if one isn't already in there
if settings['attr'] is None:
settings['attr'] = wrapped.__name__
settings['_info'] = info.codeinfo # fbo "action_method"
return wrapped
class access_action(object):
""" Decorator for creationculturelle access actions.
An access action allows to view an object"""
def __init__(self, access_key=None):
self.access_key = access_key
def __call__(self, wrapped):
def callback(scanner, name, ob):
if ob.context in ACCESS_ACTIONS:
ACCESS_ACTIONS[ob.context].append({'action': ob,
'access_key': self.access_key})
else:
ACCESS_ACTIONS[ob.context] = [{'action': ob,
'access_key': self.access_key}]
venusian.attach(wrapped, callback)
return wrapped
def can_access(user, context, request=None, root=None):
""" Return 'True' if the user can access to the context"""
declared = getattr(getattr(context, '__provides__', None),
'declared', [None])[0]
for data in ACCESS_ACTIONS.get(declared, []):
if data['action'].processsecurity_validation(None, context):
return True
return False
_marker = object()
def serialize_roles(roles, root=None):
result = []
principal_root = getSite()
if principal_root is None:
return []
if root is None:
root = principal_root
root_oid = str(get_oid(root, ''))
principal_root_oid = str(get_oid(principal_root, ''))
for role in roles:
if isinstance(role, tuple):
obj_oid = str(get_oid(role[1], ''))
result.append((role[0]+'_'+obj_oid).lower())
superiors = getattr(DACE_ROLES.get(role[0], _marker),
'all_superiors', [])
result.extend([(r.name+'_'+obj_oid).lower()
for r in superiors])
else:
result.append(role.lower()+'_'+root_oid)
superiors = getattr(DACE_ROLES.get(role, _marker),
'all_superiors', [])
result.extend([(r.name+'_'+root_oid).lower() for r in
superiors])
for superior in superiors:
if superior.name == 'Admin':
result.append('admin_'+principal_root_oid)
break
return list(set(result))
def generate_access_keys(user, root):
return get_access_keys(
user, root=root)
@implementer(ICommentable)
class Commentable(VisualisableElement, Entity):
""" A Commentable entity is an entity that can be comment"""
name = renamer()
comments = CompositeMultipleProperty('comments')
def __init__(self, **kwargs):
super(Commentable, self).__init__(**kwargs)
self.len_comments = 0
def update_len_comments(self):
result = len(self.comments)
result += sum([c.update_len_comments() for c in self.comments])
self.len_comments = result
return self.len_comments
def addtoproperty(self, name, value, moving=None):
super(Commentable, self).addtoproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments += 1
if self is not channel:
self.len_comments += 1
def delfromproperty(self, name, value, moving=None):
super(Commentable, self).delfromproperty(name, value, moving)
if name == 'comments':
channel = getattr(self, 'channel', self)
channel.len_comments -= 1
if self is not channel:
self.len_comments -= 1
@implementer(IDebatable)
class Debatable(VisualisableElement, Entity):
""" A Debatable entity is an entity that can be comment"""
channels = CompositeMultipleProperty('channels', 'subject')
def __init__(self, **kwargs):
super(Debatable, self).__init__(**kwargs)
@property
def channel(self):
channels = getattr(self, 'channels', [])
return channels[0] if channels else None
def get_channel(self, user):
return self.channel
def get_title(self, user=None):
return getattr(self, 'title', '')
def subscribe_to_channel(self, user):
channel = getattr(self, 'channel', None)
if channel and (user not in channel.members):
channel.addtoproperty('members', user)
def add_new_channel(self):
self.addtoproperty('channels', Channel())
@content(
'channel',
icon='icon novaideo-icon icon-idea',
)
@implementer(IChannel)
class Channel(Commentable):
"""Channel class"""
type_title = _('Channel')
icon = 'icon novaideo-icon icon-idea'
templates = {'default': 'novaideo:views/templates/channel_result.pt'}
name = renamer()
members = SharedMultipleProperty('members', 'following_channels')
subject = SharedUniqueProperty('subject', 'channels')
def __init__(self, **kwargs):
super(Channel, self).__init__(**kwargs)
self.set_data(kwargs)
self._comments_at = OOBTree()
def add_comment(self, comment):
self._comments_at[comment.created_at] = get_oid(comment)
def remove_comment(self, comment):
self._comments_at.pop(comment.created_at)
def get_comments_between(self, start, end):
return list(self._comments_at.values(
min=start, max=end))
def get_subject(self, user=None):
subject = self.subject
return subject if subject else getattr(self, '__parent__', None)
def get_title(self, user=None):
title = getattr(self, 'title', '')
if not title:
return getattr(self.get_subject(user), 'title', None)
return title
def is_discuss(self):
return self.subject.__class__.__name__.lower() == 'person'
@implementer(IEmojiable)
class Emojiable(Entity):
def __init__(self, **kwargs):
super(Emojiable, self).__init__(**kwargs)
self.emojis = OOBTree()
self.users_emoji = OOBTree()
def add_emoji(self, emoji, user):
user_oid = get_oid(user)
current_emoji = self.get_user_emoji(user)
if current_emoji:
self.remove_emoji(current_emoji, user)
if emoji:
self.emojis.setdefault(emoji, PersistentList())
self.emojis[emoji].append(user_oid)
self.users_emoji[user_oid] = emoji
def remove_emoji(self, emoji, user):
user_oid = get_oid(user)
if emoji in self.emojis and \
user_oid in self.emojis[emoji]:
self.emojis[emoji].remove(user_oid)
self.users_emoji.pop(user_oid)
def get_user_emoji(self, user):
user_oid = get_oid(user)
return self.users_emoji.get(user_oid, None)
def can_add_reaction(self, user, process):
return False
@content(
'privatechannel',
icon='icon novaideo-icon icon-idea',
)
@implementer(IPrivateChannel)
class PrivateChannel(Channel):
"""Channel class"""
def __init__(self, **kwargs):
super(PrivateChannel, self).__init__(**kwargs)
self.set_data(kwargs)
def get_subject(self, user=None):
subject = None
for member in self.members:
if member is not user:
subject = member
break
return subject if subject else getattr(self, '__parent__', None)
def get_title(self, user=None):
title = getattr(self, 'title', '')
if not title:
return getattr(self.get_subject(user), 'title', None)
return title
@implementer(IVersionableEntity)
class VersionableEntity(Entity):
""" A Versionable entity is an entity that can be versioned"""
version = CompositeUniqueProperty('version', 'nextversion')
nextversion = SharedUniqueProperty('nextversion', 'version')
@property
def current_version(self):
""" Return the current version"""
if self.nextversion is None:
return self
else:
return self.nextversion.current_version
@property
def history(self):
""" Return all versions"""
result = []
if self.version is None:
return [self]
else:
result.append(self)
result.extend(self.version.history)
return result
def destroy(self):
"""Remove branch"""
if self.version:
self.version.destroy()
if self.nextversion:
self.nextversion.delfromproperty('version', self)
@implementer(IDuplicableEntity)
class DuplicableEntity(Entity):
""" A Duplicable entity is an entity that can be duplicated"""
originalentity = SharedUniqueProperty('originalentity', 'duplicates')
duplicates = SharedMultipleProperty('duplicates', 'originalentity')
@colander.deferred
def keywords_choice(node, kw):
root = getSite()
values = [(i, i) for i in sorted(root.keywords)]
create = getattr(root, 'can_add_keywords', True)
return Select2Widget(max_len=5,
values=values,
create=create,
multiple=True)
class SearchableEntitySchema(Schema):
keywords = colander.SchemaNode(
colander.Set(),
widget=keywords_choice,
title=_('Keywords'),
description=_("To add keywords, you need to separate them by commas "
"and then tap the « Enter » key to validate your selection.")
)
@implementer(ISearchableEntity)
class SearchableEntity(VisualisableElement, Entity):
""" A Searchable entity is an entity that can be searched"""
templates = {'default': 'novaideo:templates/views/default_result.pt',
'bloc': 'novaideo:templates/views/default_result.pt'}
def __init__(self, **kwargs):
super(SearchableEntity, self).__init__(**kwargs)
self.keywords = PersistentList()
@property
def is_published(self):
return 'published' in self.state
@property
def is_workable(self):
return self.is_published
@property
def relevant_data(self):
return [getattr(self, 'title', ''),
getattr(self, 'description', ''),
', '.join(getattr(self, 'keywords', []))]
def set_source_data(self, source_data):
if not hasattr(self, 'source_data'):
self.source_data = PersistentDict({})
app_name = source_data.get('app_name')
self.source_data.setdefault(app_name, {})
self.source_data[app_name] = source_data
def get_source_data(self, app_id):
if not hasattr(self, 'source_data'):
return {}
return self.source_data.get(app_id, {})
def is_managed(self, root):
return True
def get_title(self, user=None):
return getattr(self, 'title', '')
def _init_presentation_text(self):
pass
def get_release_date(self):
return getattr(self, 'release_date', self.modified_at)
def presentation_text(self, nb_characters=400):
return getattr(self, 'description', "")[:nb_characters]+'...'
def get_more_contents_criteria(self):
"return specific query, filter values"
return None, {
'metadata_filter': {
'states': ['published'],
'keywords': list(self.keywords)
}
}
@implementer(IPresentableEntity)
class PresentableEntity(Entity):
""" A Presentable entity is an entity that can be presented"""
def __init__(self, **kwargs):
super(PresentableEntity, self).__init__(**kwargs)
self._email_persons_contacted = PersistentList()
@property
def len_contacted(self):
return len(self._email_persons_contacted)
@property
def persons_contacted(self):
""" Return all contacted persons"""
dace_catalog = find_catalog('dace')
novaideo_catalog = find_catalog('novaideo')
identifier_index = novaideo_catalog['identifier']
object_provides_index = dace_catalog['object_provides']
result = []
for email in self._email_persons_contacted:
query = object_provides_index.any([IPerson.__identifier__]) &\
identifier_index.any([email])
users = list(query.execute().all())
user = users[0] if users else None
if user is not None:
result.append(user)
else:
result.append(email.split('@')[0].split('+')[0])
return set(result)
@implementer(ICorrelableEntity)
class CorrelableEntity(Entity):
"""
A Correlable entity is an entity that can be correlated.
A correlation is an abstract association between source entity
and targets entities.
"""
source_correlations = SharedMultipleProperty('source_correlations',
'source')
target_correlations = SharedMultipleProperty('target_correlations',
'targets')
@property
def correlations(self):
"""Return all source correlations and target correlations"""
result = [c.target for c in self.source_correlations]
result.extend([c.source for c in self.target_correlations])
return list(set(result))
@property
def all_source_related_contents(self):
lists_targets = [(c.targets, c) for c in self.source_correlations]
return [(target, c) for targets, c in lists_targets
for target in targets]
@property
def all_target_related_contents(self):
return [(c.source, c) for c in self.target_correlations]
@property
def all_related_contents(self):
related_contents = self.all_source_related_contents
related_contents.extend(self.all_target_related_contents)
return related_contents
@property
def contextualized_contents(self):
lists_contents = [(c.targets, c) for c in
self.contextualized_correlations]
lists_contents = [(target, c) for targets, c in lists_contents
for target in targets]
lists_contents.extend([(c.source, c) for c in
self.contextualized_correlations])
return lists_contents
def get_related_contents(self, type_=None, tags=[]):
if type_ is None and not tags:
return self.all_related_contents
return [(content, c) for content, c in self.all_related_contents
if (type_ is None or c.type == type_) and
(not tags or any(t in tags for t in c.tags))]
class ExaminableEntity(Entity):
"""
A Examinable entity is an entity that can be examined.
"""
opinions_base = {}
@property
def opinion_value(self):
return self.opinions_base.get(
getattr(self, 'opinion', {}).get('opinion', ''), None)
@implementer(INode)
class Node(Entity):
def __init__(self, **kwargs):
super(Node, self).__init__(**kwargs)
self.graph = PersistentDict()
def get_node_id(self):
return str(self.__oid__).replace('-', '_')
def get_node_descriminator(self):
return 'node'
def init_graph(self, calculated=[]):
result = self.get_nodes_data()
self.graph = PersistentDict(result[0])
oid = self.get_node_id()
newcalculated = list(calculated)
newcalculated.append(oid)
for node in self.graph:
if node not in newcalculated:
node_obj = get_obj(self.graph[node]['oid'])
if node_obj:
graph, newcalculated = node_obj.init_graph(
newcalculated)
return self.graph, newcalculated
def get_nodes_data(self, calculated=[]):
oid = self.get_node_id()
newcalculated = list(calculated)
if oid in calculated:
return {}, newcalculated
all_target_contents = [r for r in self.all_target_related_contents
if isinstance(r[0], Node)]
targets = [{'id': t.get_node_id(),
'type': c.type_name,
'oid': getattr(t, '__oid__', 0)}
for (t, c) in all_target_contents]
all_source_contents = [r for r in self.all_source_related_contents
if r[0] not in all_target_contents
and isinstance(r[0], Node)]
targets.extend([{'id': t.get_node_id(),
'type': c.type_name,
'oid': getattr(t, '__oid__', 0)}
for (t, c) in all_source_contents])
result = {oid: {
'oid': self.__oid__,
'title': self.title,
'descriminator': self.get_node_descriminator(),
'targets': targets
}}
all_source_contents.extend(all_target_contents)
newcalculated.append(oid)
for r_content in all_source_contents:
sub_result, newcalculated = r_content[0].get_nodes_data(newcalculated)
result.update(sub_result)
return result, newcalculated
def get_all_sub_nodes(self):
oid = self.get_node_id()
return set([get_obj(self.graph[id_]['oid']) for id_ in self.graph
if id_ != oid])
def get_sub_nodes(self):
oid = self.get_node_id()
return set([get_obj(node['oid']) for
node in self.graph[oid]['targets']])
@implementer(ISignalableEntity)
class SignalableEntity(Entity):
reports = CompositeMultipleProperty('reports')
censoring_reason = CompositeUniqueProperty('censoring_reason')
def __init__(self, **kwargs):
super(SignalableEntity, self).__init__(**kwargs)
self.len_reports = 0
self.init_len_current_reports()
@property
def subject(self):
return self.__parent__
def init_len_current_reports(self):
self.len_current_reports = 0
def addtoproperty(self, name, value, moving=None):
super(SignalableEntity, self).addtoproperty(name, value, moving)
if name == 'reports':
self.len_current_reports = getattr(self, 'len_current_reports', 0)
self.len_reports = getattr(self, 'len_reports', 0)
self.len_current_reports += 1
self.len_reports += 1
@implementer(ISustainable)
class Sustainable(Entity):
"""Question class"""
def __init__(self, **kwargs):
super(Sustainable, self).__init__(**kwargs)
self.set_data(kwargs)
self.votes_positive = OOBTree()
self.votes_negative = OOBTree()
@property
def len_support(self):
return len(self.votes_positive)
@property
def len_opposition(self):
return len(self.votes_negative)
def add_vote(self, user, date, kind='positive'):
oid = get_oid(user)
if kind == 'positive':
self.votes_positive[oid] = date
else:
self.votes_negative[oid] = date
def withdraw_vote(self, user):
oid = get_oid(user)
if oid in self.votes_positive:
self.votes_positive.pop(oid)
elif oid in self.votes_negative:
self.votes_negative.pop(oid)
def has_vote(self, user):
oid = get_oid(user)
return oid in self.votes_positive or \
oid in self.votes_negative
def has_negative_vote(self, user):
oid = get_oid(user)
return oid in self.votes_negative
def has_positive_vote(self, user):
oid = get_oid(user)
return oid in self.votes_positive
@implementer(ITokenable)
class Tokenable(Entity):
"""Question class"""
tokens_opposition = CompositeMultipleProperty('tokens_opposition')
tokens_support = CompositeMultipleProperty('tokens_support')
def __init__(self, **kwargs):
super(Tokenable, self).__init__(**kwargs)
self.set_data(kwargs)
self.allocated_tokens = OOBTree()
self.len_allocated_tokens = PersistentDict({})
def add_token(self, user, evaluation_type):
user_oid = get_oid(user)
if user_oid in self.allocated_tokens:
self.remove_token(user)
self.allocated_tokens[user_oid] = evaluation_type
self.len_allocated_tokens.setdefault(evaluation_type, 0)
self.len_allocated_tokens[evaluation_type] += 1
def remove_token(self, user):
user_oid = get_oid(user)
if user_oid in self.allocated_tokens:
evaluation_type = self.allocated_tokens.pop(user_oid)
self.len_allocated_tokens.setdefault(evaluation_type, 0)
self.len_allocated_tokens[evaluation_type] -= 1
def evaluators(self, evaluation_type=None):
if evaluation_type:
return [get_obj(key) for value, key
in self.allocated_tokens.byValue(evaluation_type)]
return [get_obj(key) for key
in self.allocated_tokens.keys()]
def evaluation(self, user):
user_oid = get_oid(user, None)
return self.allocated_tokens.get(user_oid, None)
def remove_tokens(self, force=False):
evaluators = self.evaluators()
for user in evaluators:
user.remove_token(self)
if force:
self.remove_token(user)
def user_has_token(self, user, root=None):
if hasattr(user, 'has_token'):
return user.has_token(self, root)
return False
def init_support_history(self):
# [(user_oid, date, support_type), ...], support_type = {1:support, 0:oppose, -1:withdraw}
if not hasattr(self, '_support_history'):
setattr(self, '_support_history', PersistentList())
@property
def len_support(self):
return self.len_allocated_tokens.get(Evaluations.support, 0)
@property
def len_opposition(self):
return self.len_allocated_tokens.get(Evaluations.oppose, 0)<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use document_loader::{DocumentLoader, LoadType};
use dom::bindings::cell::DomRefCell;
use dom::bindings::codegen::Bindings::DocumentBinding::{DocumentMethods, DocumentReadyState};
use dom::bindings::codegen::Bindings::HTMLImageElementBinding::HTMLImageElementMethods;
use dom::bindings::codegen::Bindings::HTMLTemplateElementBinding::HTMLTemplateElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::Bindings::ServoParserBinding;
use dom::bindings::inheritance::Castable;
use dom::bindings::refcounted::Trusted;
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::bindings::root::{Dom, DomRoot, MutNullableDom, RootedReference};
use dom::bindings::str::DOMString;
use dom::characterdata::CharacterData;
use dom::comment::Comment;
use dom::document::{Document, DocumentSource, HasBrowsingContext, IsHTMLDocument};
use dom::documenttype::DocumentType;
use dom::element::{Element, ElementCreator, CustomElementCreationMode};
use dom::globalscope::GlobalScope;
use dom::htmlformelement::{FormControlElementHelpers, HTMLFormElement};
use dom::htmlimageelement::HTMLImageElement;
use dom::htmlscriptelement::{HTMLScriptElement, ScriptResult};
use dom::htmltemplateelement::HTMLTemplateElement;
use dom::node::Node;
use dom::processinginstruction::ProcessingInstruction;
use dom::text::Text;
use dom::virtualmethods::vtable_for;
use dom_struct::dom_struct;
use html5ever::{Attribute, ExpandedName, LocalName, QualName};
use html5ever::buffer_queue::BufferQueue;
use html5ever::tendril::{StrTendril, ByteTendril, IncompleteUtf8};
use html5ever::tree_builder::{NodeOrText, TreeSink, NextParserState, QuirksMode, ElementFlags};
use hyper::header::ContentType;
use hyper::mime::{Mime, SubLevel, TopLevel};
use hyper_serde::Serde;
use msg::constellation_msg::PipelineId;
use net_traits::{FetchMetadata, FetchResponseListener, Metadata, NetworkError};
use network_listener::PreInvoke;
use profile_traits::time::{TimerMetadata, TimerMetadataFrameType};
use profile_traits::time::{TimerMetadataReflowType, ProfilerCategory, profile};
use script_thread::ScriptThread;
use script_traits::DocumentActivity;
use servo_config::prefs::PREFS;
use servo_config::resource_files::read_resource_file;
use servo_url::ServoUrl;
use std::ascii::AsciiExt;
use std::borrow::Cow;
use std::cell::Cell;
use std::mem;
use style::context::QuirksMode as ServoQuirksMode;
mod async_html;
mod html;
mod xml;
#[dom_struct]
/// The parser maintains two input streams: one for input from script through
/// document.write(), and one for input from network.
///
/// There is no concrete representation of the insertion point, instead it
/// always points to just before the next character from the network input,
/// with all of the script input before itself.
///
/// ```text
/// ... script input ... | ... network input ...
/// ^
/// insertion point
/// ```
pub struct ServoParser {
reflector: Reflector,
/// The document associated with this parser.
document: Dom<Document>,
/// Input received from network.
#[ignore_malloc_size_of = "Defined in html5ever"]
network_input: DomRefCell<BufferQueue>,
/// Part of an UTF-8 code point spanning input chunks
#[ignore_malloc_size_of = "Defined in html5ever"]
incomplete_utf8: DomRefCell<Option<IncompleteUtf8>>,
/// Input received from script. Used only to support document.write().
#[ignore_malloc_size_of = "Defined in html5ever"]
script_input: DomRefCell<BufferQueue>,
/// The tokenizer of this parser.
tokenizer: DomRefCell<Tokenizer>,
/// Whether to expect any further input from the associated network request.
last_chunk_received: Cell<bool>,
/// Whether this parser should avoid passing any further data to the tokenizer.
suspended: Cell<bool>,
/// <https://html.spec.whatwg.org/multipage/#script-nesting-level>
script_nesting_level: Cell<usize>,
/// <https://html.spec.whatwg.org/multipage/#abort-a-parser>
aborted: Cell<bool>,
/// <https://html.spec.whatwg.org/multipage/#script-created-parser>
script_created_parser: bool,
}
#[derive(PartialEq)]
enum LastChunkState {
Received,
NotReceived,
}
impl ServoParser {
pub fn parse_html_document(document: &Document, input: DOMString, url: ServoUrl) {
let parser = if PREFS.get("dom.servoparser.async_html_tokenizer.enabled").as_boolean().unwrap() {
ServoParser::new(document,
Tokenizer::AsyncHtml(self::async_html::Tokenizer::new(document, url, None)),
LastChunkState::NotReceived,
ParserKind::Normal)
} else {
ServoParser::new(document,
Tokenizer::Html(self::html::Tokenizer::new(document, url, None)),
LastChunkState::NotReceived,
ParserKind::Normal)
};
parser.parse_string_chunk(String::from(input));
}
// https://html.spec.whatwg.org/multipage/#parsing-html-fragments
pub fn parse_html_fragment(context: &Element, input: DOMString) -> impl Iterator<Item=DomRoot<Node>> {
let context_node = context.upcast::<Node>();
let context_document = context_node.owner_doc();
let window = context_document.window();
let url = context_document.url();
// Step 1.
let loader = DocumentLoader::new_with_threads(context_document.loader().resource_threads().clone(),
Some(url.clone()));
let document = Document::new(window,
HasBrowsingContext::No,
Some(url.clone()),
context_document.origin().clone(),
IsHTMLDocument::HTMLDocument,
None,
None,
DocumentActivity::Inactive,
DocumentSource::FromParser,
loader,
None,
None);
// Step 2.
document.set_quirks_mode(context_document.quirks_mode());
// Step 11.
let form = context_node.inclusive_ancestors()
.find(|element| element.is::<HTMLFormElement>());
let fragment_context = FragmentContext {
context_elem: context_node,
form_elem: form.r(),
};
let parser = ServoParser::new(&document,
Tokenizer::Html(self::html::Tokenizer::new(&document,
url,
Some(fragment_context))),
LastChunkState::Received,
ParserKind::Normal);
parser.parse_string_chunk(String::from(input));
// Step 14.
let root_element = document.GetDocumentElement().expect("no document element");
FragmentParsingResult {
inner: root_element.upcast::<Node>().children(),
}
}
pub fn parse_html_script_input(document: &Document, url: ServoUrl, type_: &str) {
let parser = ServoParser::new(document,
Tokenizer::Html(self::html::Tokenizer::new(document, url, None)),
LastChunkState::NotReceived,
ParserKind::ScriptCreated);
document.set_current_parser(Some(&parser));
if !type_.eq_ignore_ascii_case("text/html") {
parser.parse_string_chunk("<pre>\n".to_owned());
parser.tokenizer.borrow_mut().set_plaintext_state();
}
}
pub fn parse_xml_document(document: &Document, input: DOMString, url: ServoUrl) {
let parser = ServoParser::new(document,<|fim▁hole|> }
pub fn script_nesting_level(&self) -> usize {
self.script_nesting_level.get()
}
pub fn is_script_created(&self) -> bool {
self.script_created_parser
}
/// Corresponds to the latter part of the "Otherwise" branch of the 'An end
/// tag whose tag name is "script"' of
/// <https://html.spec.whatwg.org/multipage/#parsing-main-incdata>
///
/// This first moves everything from the script input to the beginning of
/// the network input, effectively resetting the insertion point to just
/// before the next character to be consumed.
///
///
/// ```text
/// | ... script input ... network input ...
/// ^
/// insertion point
/// ```
pub fn resume_with_pending_parsing_blocking_script(&self, script: &HTMLScriptElement, result: ScriptResult) {
assert!(self.suspended.get());
self.suspended.set(false);
mem::swap(&mut *self.script_input.borrow_mut(),
&mut *self.network_input.borrow_mut());
while let Some(chunk) = self.script_input.borrow_mut().pop_front() {
self.network_input.borrow_mut().push_back(chunk);
}
let script_nesting_level = self.script_nesting_level.get();
assert_eq!(script_nesting_level, 0);
self.script_nesting_level.set(script_nesting_level + 1);
script.execute(result);
self.script_nesting_level.set(script_nesting_level);
if !self.suspended.get() {
self.parse_sync();
}
}
pub fn can_write(&self) -> bool {
self.script_created_parser || self.script_nesting_level.get() > 0
}
/// Steps 6-8 of https://html.spec.whatwg.org/multipage/#document.write()
pub fn write(&self, text: Vec<DOMString>) {
assert!(self.can_write());
if self.document.has_pending_parsing_blocking_script() {
// There is already a pending parsing blocking script so the
// parser is suspended, we just append everything to the
// script input and abort these steps.
for chunk in text {
self.script_input.borrow_mut().push_back(String::from(chunk).into());
}
return;
}
// There is no pending parsing blocking script, so all previous calls
// to document.write() should have seen their entire input tokenized
// and process, with nothing pushed to the parser script input.
assert!(self.script_input.borrow().is_empty());
let mut input = BufferQueue::new();
for chunk in text {
input.push_back(String::from(chunk).into());
}
self.tokenize(|tokenizer| tokenizer.feed(&mut input));
if self.suspended.get() {
// Parser got suspended, insert remaining input at end of
// script input, following anything written by scripts executed
// reentrantly during this call.
while let Some(chunk) = input.pop_front() {
self.script_input.borrow_mut().push_back(chunk);
}
return;
}
assert!(input.is_empty());
}
// Steps 4-6 of https://html.spec.whatwg.org/multipage/#dom-document-close
pub fn close(&self) {
assert!(self.script_created_parser);
// Step 4.
self.last_chunk_received.set(true);
if self.suspended.get() {
// Step 5.
return;
}
// Step 6.
self.parse_sync();
}
// https://html.spec.whatwg.org/multipage/#abort-a-parser
pub fn abort(&self) {
assert!(!self.aborted.get());
self.aborted.set(true);
// Step 1.
*self.script_input.borrow_mut() = BufferQueue::new();
*self.network_input.borrow_mut() = BufferQueue::new();
// Step 2.
self.document.set_ready_state(DocumentReadyState::Interactive);
// Step 3.
self.tokenizer.borrow_mut().end();
self.document.set_current_parser(None);
// Step 4.
self.document.set_ready_state(DocumentReadyState::Interactive);
}
#[allow(unrooted_must_root)]
fn new_inherited(document: &Document,
tokenizer: Tokenizer,
last_chunk_state: LastChunkState,
kind: ParserKind)
-> Self {
ServoParser {
reflector: Reflector::new(),
document: Dom::from_ref(document),
incomplete_utf8: DomRefCell::new(None),
network_input: DomRefCell::new(BufferQueue::new()),
script_input: DomRefCell::new(BufferQueue::new()),
tokenizer: DomRefCell::new(tokenizer),
last_chunk_received: Cell::new(last_chunk_state == LastChunkState::Received),
suspended: Default::default(),
script_nesting_level: Default::default(),
aborted: Default::default(),
script_created_parser: kind == ParserKind::ScriptCreated,
}
}
#[allow(unrooted_must_root)]
fn new(document: &Document,
tokenizer: Tokenizer,
last_chunk_state: LastChunkState,
kind: ParserKind)
-> DomRoot<Self> {
reflect_dom_object(Box::new(ServoParser::new_inherited(document, tokenizer, last_chunk_state, kind)),
document.window(),
ServoParserBinding::Wrap)
}
fn push_bytes_input_chunk(&self, chunk: Vec<u8>) {
let mut chunk = ByteTendril::from(&*chunk);
let mut network_input = self.network_input.borrow_mut();
let mut incomplete_utf8 = self.incomplete_utf8.borrow_mut();
if let Some(mut incomplete) = incomplete_utf8.take() {
let result = incomplete.try_complete(chunk, |s| network_input.push_back(s));
match result {
Err(()) => {
*incomplete_utf8 = Some(incomplete);
return
}
Ok(remaining) => {
chunk = remaining
}
}
}
*incomplete_utf8 = chunk.decode_utf8_lossy(|s| network_input.push_back(s));
}
fn push_string_input_chunk(&self, chunk: String) {
self.network_input.borrow_mut().push_back(chunk.into());
}
fn parse_sync(&self) {
let metadata = TimerMetadata {
url: self.document.url().as_str().into(),
iframe: TimerMetadataFrameType::RootWindow,
incremental: TimerMetadataReflowType::FirstReflow,
};
let profiler_category = self.tokenizer.borrow().profiler_category();
profile(profiler_category,
Some(metadata),
self.document.window().upcast::<GlobalScope>().time_profiler_chan().clone(),
|| self.do_parse_sync())
}
fn do_parse_sync(&self) {
assert!(self.script_input.borrow().is_empty());
// This parser will continue to parse while there is either pending input or
// the parser remains unsuspended.
if self.last_chunk_received.get() {
if let Some(_) = self.incomplete_utf8.borrow_mut().take() {
self.network_input.borrow_mut().push_back(StrTendril::from("\u{FFFD}"))
}
}
self.tokenize(|tokenizer| tokenizer.feed(&mut *self.network_input.borrow_mut()));
if self.suspended.get() {
return;
}
assert!(self.network_input.borrow().is_empty());
if self.last_chunk_received.get() {
self.finish();
}
}
fn parse_string_chunk(&self, input: String) {
self.document.set_current_parser(Some(self));
self.push_string_input_chunk(input);
if !self.suspended.get() {
self.parse_sync();
}
}
fn parse_bytes_chunk(&self, input: Vec<u8>) {
self.document.set_current_parser(Some(self));
self.push_bytes_input_chunk(input);
if !self.suspended.get() {
self.parse_sync();
}
}
fn tokenize<F>(&self, mut feed: F)
where F: FnMut(&mut Tokenizer) -> Result<(), DomRoot<HTMLScriptElement>>,
{
loop {
assert!(!self.suspended.get());
assert!(!self.aborted.get());
self.document.reflow_if_reflow_timer_expired();
let script = match feed(&mut *self.tokenizer.borrow_mut()) {
Ok(()) => return,
Err(script) => script,
};
let script_nesting_level = self.script_nesting_level.get();
self.script_nesting_level.set(script_nesting_level + 1);
script.prepare();
self.script_nesting_level.set(script_nesting_level);
if self.document.has_pending_parsing_blocking_script() {
self.suspended.set(true);
return;
}
}
}
// https://html.spec.whatwg.org/multipage/#the-end
fn finish(&self) {
assert!(!self.suspended.get());
assert!(self.last_chunk_received.get());
assert!(self.script_input.borrow().is_empty());
assert!(self.network_input.borrow().is_empty());
assert!(self.incomplete_utf8.borrow().is_none());
// Step 1.
self.document.set_ready_state(DocumentReadyState::Interactive);
// Step 2.
self.tokenizer.borrow_mut().end();
self.document.set_current_parser(None);
// Steps 3-12 are in another castle, namely finish_load.
let url = self.tokenizer.borrow().url().clone();
self.document.finish_load(LoadType::PageSource(url));
}
}
struct FragmentParsingResult<I>
where I: Iterator<Item=DomRoot<Node>>
{
inner: I,
}
impl<I> Iterator for FragmentParsingResult<I>
where I: Iterator<Item=DomRoot<Node>>
{
type Item = DomRoot<Node>;
fn next(&mut self) -> Option<DomRoot<Node>> {
let next = self.inner.next()?;
next.remove_self();
Some(next)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(JSTraceable, MallocSizeOf, PartialEq)]
enum ParserKind {
Normal,
ScriptCreated,
}
#[derive(JSTraceable, MallocSizeOf)]
#[must_root]
enum Tokenizer {
Html(self::html::Tokenizer),
AsyncHtml(self::async_html::Tokenizer),
Xml(self::xml::Tokenizer),
}
impl Tokenizer {
fn feed(&mut self, input: &mut BufferQueue) -> Result<(), DomRoot<HTMLScriptElement>> {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.feed(input),
Tokenizer::AsyncHtml(ref mut tokenizer) => tokenizer.feed(input),
Tokenizer::Xml(ref mut tokenizer) => tokenizer.feed(input),
}
}
fn end(&mut self) {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.end(),
Tokenizer::AsyncHtml(ref mut tokenizer) => tokenizer.end(),
Tokenizer::Xml(ref mut tokenizer) => tokenizer.end(),
}
}
fn url(&self) -> &ServoUrl {
match *self {
Tokenizer::Html(ref tokenizer) => tokenizer.url(),
Tokenizer::AsyncHtml(ref tokenizer) => tokenizer.url(),
Tokenizer::Xml(ref tokenizer) => tokenizer.url(),
}
}
fn set_plaintext_state(&mut self) {
match *self {
Tokenizer::Html(ref mut tokenizer) => tokenizer.set_plaintext_state(),
Tokenizer::AsyncHtml(ref mut tokenizer) => tokenizer.set_plaintext_state(),
Tokenizer::Xml(_) => unimplemented!(),
}
}
fn profiler_category(&self) -> ProfilerCategory {
match *self {
Tokenizer::Html(_) => ProfilerCategory::ScriptParseHTML,
Tokenizer::AsyncHtml(_) => ProfilerCategory::ScriptParseHTML,
Tokenizer::Xml(_) => ProfilerCategory::ScriptParseXML,
}
}
}
/// The context required for asynchronously fetching a document
/// and parsing it progressively.
#[derive(JSTraceable)]
pub struct ParserContext {
/// The parser that initiated the request.
parser: Option<Trusted<ServoParser>>,
/// Is this a synthesized document
is_synthesized_document: bool,
/// The pipeline associated with this document.
id: PipelineId,
/// The URL for this document.
url: ServoUrl,
}
impl ParserContext {
pub fn new(id: PipelineId, url: ServoUrl) -> ParserContext {
ParserContext {
parser: None,
is_synthesized_document: false,
id: id,
url: url,
}
}
}
impl FetchResponseListener for ParserContext {
fn process_request_body(&mut self) {}
fn process_request_eof(&mut self) {}
fn process_response(&mut self, meta_result: Result<FetchMetadata, NetworkError>) {
let mut ssl_error = None;
let mut network_error = None;
let metadata = match meta_result {
Ok(meta) => {
Some(match meta {
FetchMetadata::Unfiltered(m) => m,
FetchMetadata::Filtered { unsafe_, .. } => unsafe_,
})
},
Err(NetworkError::SslValidation(url, reason)) => {
ssl_error = Some(reason);
let mut meta = Metadata::default(url);
let mime: Option<Mime> = "text/html".parse().ok();
meta.set_content_type(mime.as_ref());
Some(meta)
},
Err(NetworkError::Internal(reason)) => {
network_error = Some(reason);
let mut meta = Metadata::default(self.url.clone());
let mime: Option<Mime> = "text/html".parse().ok();
meta.set_content_type(mime.as_ref());
Some(meta)
},
Err(_) => None,
};
let content_type = metadata.clone().and_then(|meta| meta.content_type).map(Serde::into_inner);
let parser = match ScriptThread::page_headers_available(&self.id, metadata) {
Some(parser) => parser,
None => return,
};
if parser.aborted.get() {
return;
}
self.parser = Some(Trusted::new(&*parser));
match content_type {
Some(ContentType(Mime(TopLevel::Image, _, _))) => {
self.is_synthesized_document = true;
let page = "<html><body></body></html>".into();
parser.push_string_input_chunk(page);
parser.parse_sync();
let doc = &parser.document;
let doc_body = DomRoot::upcast::<Node>(doc.GetBody().unwrap());
let img = HTMLImageElement::new(local_name!("img"), None, doc);
img.SetSrc(DOMString::from(self.url.to_string()));
doc_body.AppendChild(&DomRoot::upcast::<Node>(img)).expect("Appending failed");
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Plain, _))) => {
// https://html.spec.whatwg.org/multipage/#read-text
let page = "<pre>\n".into();
parser.push_string_input_chunk(page);
parser.parse_sync();
parser.tokenizer.borrow_mut().set_plaintext_state();
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Html, _))) => {
// Handle text/html
if let Some(reason) = ssl_error {
self.is_synthesized_document = true;
let page_bytes = read_resource_file("badcert.html").unwrap();
let page = String::from_utf8(page_bytes).unwrap();
let page = page.replace("${reason}", &reason);
parser.push_string_input_chunk(page);
parser.parse_sync();
}
if let Some(reason) = network_error {
self.is_synthesized_document = true;
let page_bytes = read_resource_file("neterror.html").unwrap();
let page = String::from_utf8(page_bytes).unwrap();
let page = page.replace("${reason}", &reason);
parser.push_string_input_chunk(page);
parser.parse_sync();
}
},
Some(ContentType(Mime(TopLevel::Text, SubLevel::Xml, _))) => {}, // Handle text/xml
Some(ContentType(Mime(toplevel, sublevel, _))) => {
if toplevel.as_str() == "application" && sublevel.as_str() == "xhtml+xml" {
// Handle xhtml (application/xhtml+xml).
return;
}
// Show warning page for unknown mime types.
let page = format!("<html><body><p>Unknown content type ({}/{}).</p></body></html>",
toplevel.as_str(),
sublevel.as_str());
self.is_synthesized_document = true;
parser.push_string_input_chunk(page);
parser.parse_sync();
},
None => {
// No content-type header.
// Merge with #4212 when fixed.
},
}
}
fn process_response_chunk(&mut self, payload: Vec<u8>) {
if self.is_synthesized_document {
return;
}
let parser = match self.parser.as_ref() {
Some(parser) => parser.root(),
None => return,
};
if parser.aborted.get() {
return;
}
parser.parse_bytes_chunk(payload);
}
fn process_response_eof(&mut self, status: Result<(), NetworkError>) {
let parser = match self.parser.as_ref() {
Some(parser) => parser.root(),
None => return,
};
if parser.aborted.get() {
return;
}
if let Err(err) = status {
// TODO(Savago): we should send a notification to callers #5463.
debug!("Failed to load page URL {}, error: {:?}", self.url, err);
}
parser.last_chunk_received.set(true);
if !parser.suspended.get() {
parser.parse_sync();
}
}
}
impl PreInvoke for ParserContext {}
pub struct FragmentContext<'a> {
pub context_elem: &'a Node,
pub form_elem: Option<&'a Node>,
}
#[allow(unrooted_must_root)]
fn insert(parent: &Node, reference_child: Option<&Node>, child: NodeOrText<Dom<Node>>) {
match child {
NodeOrText::AppendNode(n) => {
parent.InsertBefore(&n, reference_child).unwrap();
},
NodeOrText::AppendText(t) => {
let text = reference_child
.and_then(Node::GetPreviousSibling)
.or_else(|| parent.GetLastChild())
.and_then(DomRoot::downcast::<Text>);
if let Some(text) = text {
text.upcast::<CharacterData>().append_data(&t);
} else {
let text = Text::new(String::from(t).into(), &parent.owner_doc());
parent.InsertBefore(text.upcast(), reference_child).unwrap();
}
},
}
}
#[derive(JSTraceable, MallocSizeOf)]
#[must_root]
pub struct Sink {
base_url: ServoUrl,
document: Dom<Document>,
current_line: u64,
script: MutNullableDom<HTMLScriptElement>,
}
impl Sink {
fn same_tree(&self, x: &Dom<Node>, y: &Dom<Node>) -> bool {
let x = x.downcast::<Element>().expect("Element node expected");
let y = y.downcast::<Element>().expect("Element node expected");
x.is_in_same_home_subtree(y)
}
fn has_parent_node(&self, node: &Dom<Node>) -> bool {
node.GetParentNode().is_some()
}
}
#[allow(unrooted_must_root)] // FIXME: really?
impl TreeSink for Sink {
type Output = Self;
fn finish(self) -> Self { self }
type Handle = Dom<Node>;
fn get_document(&mut self) -> Dom<Node> {
Dom::from_ref(self.document.upcast())
}
fn get_template_contents(&mut self, target: &Dom<Node>) -> Dom<Node> {
let template = target.downcast::<HTMLTemplateElement>()
.expect("tried to get template contents of non-HTMLTemplateElement in HTML parsing");
Dom::from_ref(template.Content().upcast())
}
fn same_node(&self, x: &Dom<Node>, y: &Dom<Node>) -> bool {
x == y
}
fn elem_name<'a>(&self, target: &'a Dom<Node>) -> ExpandedName<'a> {
let elem = target.downcast::<Element>()
.expect("tried to get name of non-Element in HTML parsing");
ExpandedName {
ns: elem.namespace(),
local: elem.local_name(),
}
}
fn create_element(&mut self, name: QualName, attrs: Vec<Attribute>, _flags: ElementFlags)
-> Dom<Node> {
let is = attrs.iter()
.find(|attr| attr.name.local.eq_str_ignore_ascii_case("is"))
.map(|attr| LocalName::from(&*attr.value));
let elem = Element::create(name,
is,
&*self.document,
ElementCreator::ParserCreated(self.current_line),
CustomElementCreationMode::Synchronous);
for attr in attrs {
elem.set_attribute_from_parser(attr.name, DOMString::from(String::from(attr.value)), None);
}
Dom::from_ref(elem.upcast())
}
fn create_comment(&mut self, text: StrTendril) -> Dom<Node> {
let comment = Comment::new(DOMString::from(String::from(text)), &*self.document);
Dom::from_ref(comment.upcast())
}
fn create_pi(&mut self, target: StrTendril, data: StrTendril) -> Dom<Node> {
let doc = &*self.document;
let pi = ProcessingInstruction::new(
DOMString::from(String::from(target)), DOMString::from(String::from(data)),
doc);
Dom::from_ref(pi.upcast())
}
fn associate_with_form(&mut self, target: &Dom<Node>, form: &Dom<Node>, nodes: (&Dom<Node>, Option<&Dom<Node>>)) {
let (element, prev_element) = nodes;
let tree_node = prev_element.map_or(element, |prev| {
if self.has_parent_node(element) { element } else { prev }
});
if !self.same_tree(tree_node, form) {
return;
}
let node = target;
let form = DomRoot::downcast::<HTMLFormElement>(DomRoot::from_ref(&**form))
.expect("Owner must be a form element");
let elem = node.downcast::<Element>();
let control = elem.and_then(|e| e.as_maybe_form_control());
if let Some(control) = control {
control.set_form_owner_from_parser(&form);
} else {
// TODO remove this code when keygen is implemented.
assert!(node.NodeName() == "KEYGEN", "Unknown form-associatable element");
}
}
fn append_before_sibling(&mut self,
sibling: &Dom<Node>,
new_node: NodeOrText<Dom<Node>>) {
let parent = sibling.GetParentNode()
.expect("append_before_sibling called on node without parent");
insert(&parent, Some(&*sibling), new_node);
}
fn parse_error(&mut self, msg: Cow<'static, str>) {
debug!("Parse error: {}", msg);
}
fn set_quirks_mode(&mut self, mode: QuirksMode) {
let mode = match mode {
QuirksMode::Quirks => ServoQuirksMode::Quirks,
QuirksMode::LimitedQuirks => ServoQuirksMode::LimitedQuirks,
QuirksMode::NoQuirks => ServoQuirksMode::NoQuirks,
};
self.document.set_quirks_mode(mode);
}
fn append(&mut self, parent: &Dom<Node>, child: NodeOrText<Dom<Node>>) {
insert(&parent, None, child);
}
fn append_based_on_parent_node(
&mut self,
elem: &Dom<Node>,
prev_elem: &Dom<Node>,
child: NodeOrText<Dom<Node>>,
) {
if self.has_parent_node(elem) {
self.append_before_sibling(elem, child);
} else {
self.append(prev_elem, child);
}
}
fn append_doctype_to_document(&mut self, name: StrTendril, public_id: StrTendril,
system_id: StrTendril) {
let doc = &*self.document;
let doctype = DocumentType::new(
DOMString::from(String::from(name)), Some(DOMString::from(String::from(public_id))),
Some(DOMString::from(String::from(system_id))), doc);
doc.upcast::<Node>().AppendChild(doctype.upcast()).expect("Appending failed");
}
fn add_attrs_if_missing(&mut self, target: &Dom<Node>, attrs: Vec<Attribute>) {
let elem = target.downcast::<Element>()
.expect("tried to set attrs on non-Element in HTML parsing");
for attr in attrs {
elem.set_attribute_from_parser(attr.name, DOMString::from(String::from(attr.value)), None);
}
}
fn remove_from_parent(&mut self, target: &Dom<Node>) {
if let Some(ref parent) = target.GetParentNode() {
parent.RemoveChild(&*target).unwrap();
}
}
fn mark_script_already_started(&mut self, node: &Dom<Node>) {
let script = node.downcast::<HTMLScriptElement>();
script.map(|script| script.set_already_started(true));
}
fn complete_script(&mut self, node: &Dom<Node>) -> NextParserState {
if let Some(script) = node.downcast() {
self.script.set(Some(script));
NextParserState::Suspend
} else {
NextParserState::Continue
}
}
fn reparent_children(&mut self, node: &Dom<Node>, new_parent: &Dom<Node>) {
while let Some(ref child) = node.GetFirstChild() {
new_parent.AppendChild(&child).unwrap();
}
}
/// <https://html.spec.whatwg.org/multipage/#html-integration-point>
/// Specifically, the <annotation-xml> cases.
fn is_mathml_annotation_xml_integration_point(&self, handle: &Dom<Node>) -> bool {
let elem = handle.downcast::<Element>().unwrap();
elem.get_attribute(&ns!(), &local_name!("encoding")).map_or(false, |attr| {
attr.value().eq_ignore_ascii_case("text/html")
|| attr.value().eq_ignore_ascii_case("application/xhtml+xml")
})
}
fn set_current_line(&mut self, line_number: u64) {
self.current_line = line_number;
}
fn pop(&mut self, node: &Dom<Node>) {
let node = DomRoot::from_ref(&**node);
vtable_for(&node).pop();
}
}<|fim▁end|>
|
Tokenizer::Xml(self::xml::Tokenizer::new(document, url)),
LastChunkState::NotReceived,
ParserKind::Normal);
parser.parse_string_chunk(String::from(input));
|
<|file_name|>PopupWindow.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package javafx.stage;
import com.sun.javafx.util.Utils;
import com.sun.javafx.event.DirectEvent;
import java.util.ArrayList;
import java.util.List;
import javafx.beans.InvalidationListener;
import javafx.beans.Observable;
import javafx.beans.property.BooleanProperty;
import javafx.beans.property.BooleanPropertyBase;
import javafx.beans.property.ObjectProperty;
import javafx.beans.property.ReadOnlyDoubleProperty;
import javafx.beans.property.ReadOnlyDoubleWrapper;
import javafx.beans.property.SimpleBooleanProperty;
import javafx.beans.property.SimpleObjectProperty;
import javafx.beans.value.ChangeListener;
import javafx.collections.ObservableList;
import javafx.event.Event;
import javafx.event.EventHandler;
import javafx.geometry.BoundingBox;
import javafx.geometry.Bounds;
import javafx.geometry.Rectangle2D;
import javafx.scene.Group;
import javafx.scene.Node;
import javafx.scene.Parent;
import javafx.scene.Scene;
import com.sun.javafx.event.EventHandlerManager;
import com.sun.javafx.event.EventRedirector;
import com.sun.javafx.event.EventUtil;
import com.sun.javafx.perf.PerformanceTracker;
import com.sun.javafx.scene.SceneHelper;
import com.sun.javafx.stage.FocusUngrabEvent;
import com.sun.javafx.stage.PopupWindowPeerListener;
import com.sun.javafx.stage.WindowCloseRequestHandler;
import com.sun.javafx.stage.WindowEventDispatcher;
import com.sun.javafx.tk.Toolkit;
import static com.sun.javafx.FXPermissions.CREATE_TRANSPARENT_WINDOW_PERMISSION;
import com.sun.javafx.scene.NodeHelper;
import com.sun.javafx.stage.PopupWindowHelper;
import com.sun.javafx.stage.WindowHelper;
import javafx.beans.property.ObjectPropertyBase;
import javafx.beans.property.ReadOnlyObjectProperty;
import javafx.beans.property.ReadOnlyObjectWrapper;
import javafx.beans.value.WeakChangeListener;
import javafx.event.EventTarget;
import javafx.event.EventType;
import javafx.scene.input.KeyCombination;
import javafx.scene.input.KeyEvent;
import javafx.scene.input.MouseEvent;
import javafx.scene.input.ScrollEvent;
import javafx.scene.layout.Background;
import javafx.scene.layout.Pane;
/**
* PopupWindow is the parent for a variety of different types of popup
* based windows including {@link Popup} and {@link javafx.scene.control.Tooltip}
* and {@link javafx.scene.control.ContextMenu}.
* <p>
* A PopupWindow is a secondary window which has no window decorations or title bar.
* It doesn't show up in the OS as a top-level window. It is typically
* used for tool tip like notification, drop down boxes, menus, and so forth.
* <p>
* The PopupWindow <strong>cannot be shown without an owner</strong>.
* PopupWindows require that an owner window exist in order to be shown. However,
* it is possible to create a PopupWindow ahead of time and simply set the owner
* (or change the owner) before first being made visible. Attempting to change
* the owner while the PopupWindow is visible will result in an IllegalStateException.
* <p>
* The PopupWindow encapsulates much of the behavior and functionality common to popups,
* such as the ability to close when the "esc" key is pressed, or the ability to
* hide all child popup windows whenever this window is hidden. These abilities can
* be enabled or disabled via properties.
* @since JavaFX 2.0
*/
public abstract class PopupWindow extends Window {
static {
PopupWindowHelper.setPopupWindowAccessor(new PopupWindowHelper.PopupWindowAccessor() {
@Override public void doVisibleChanging(Window window, boolean visible) {
((PopupWindow) window).doVisibleChanging(visible);
}
@Override public void doVisibleChanged(Window window, boolean visible) {
((PopupWindow) window).doVisibleChanged(visible);
}
@Override
public ObservableList<Node> getContent(PopupWindow popupWindow) {
return popupWindow.getContent();
}
});
}
/**
* A private list of all child popups.
*/
private final List<PopupWindow> children = new ArrayList<PopupWindow>();
/**
* Keeps track of the bounds of the content, and adjust the position and
* size of the popup window accordingly. This way as the popup content
* changes, the window will be changed to match.
*/
private final InvalidationListener popupWindowUpdater =
new InvalidationListener() {
@Override
public void invalidated(final Observable observable) {
cachedExtendedBounds = null;
cachedAnchorBounds = null;
updateWindow(getAnchorX(), getAnchorY());
}
};
/**
* RT-28454: When a parent node or parent window we are associated with is not
* visible anymore, possibly because the scene was not valid anymore, we should hide.
*/
private ChangeListener<Boolean> changeListener = (observable, oldValue, newValue) -> {
if (oldValue && !newValue) {
hide();
}
};
private WeakChangeListener<Boolean> weakOwnerNodeListener = new WeakChangeListener(changeListener);
public PopupWindow() {
final Pane popupRoot = new Pane();
popupRoot.setBackground(Background.EMPTY);
popupRoot.getStyleClass().add("popup");
final Scene scene = SceneHelper.createPopupScene(popupRoot);
scene.setFill(null);
super.setScene(scene);
popupRoot.layoutBoundsProperty().addListener(popupWindowUpdater);
popupRoot.boundsInLocalProperty().addListener(popupWindowUpdater);
scene.rootProperty().addListener(
new InvalidationListener() {
private Node oldRoot = scene.getRoot();
@Override
public void invalidated(final Observable observable) {
final Node newRoot = scene.getRoot();
if (oldRoot != newRoot) {
if (oldRoot != null) {
oldRoot.layoutBoundsProperty()
.removeListener(popupWindowUpdater);
oldRoot.boundsInLocalProperty()
.removeListener(popupWindowUpdater);
oldRoot.getStyleClass().remove("popup");
}
if (newRoot != null) {
newRoot.layoutBoundsProperty()
.addListener(popupWindowUpdater);
newRoot.boundsInLocalProperty()
.addListener(popupWindowUpdater);
newRoot.getStyleClass().add("popup");
}
oldRoot = newRoot;
cachedExtendedBounds = null;
cachedAnchorBounds = null;
updateWindow(getAnchorX(), getAnchorY());
}
}
});
PopupWindowHelper.initHelper(this);
}
/*
* Gets the observable, modifiable list of children which are placed in this
* PopupWindow.
*
* @return the PopupWindow content
*/
ObservableList<Node> getContent() {
final Parent rootNode = getScene().getRoot();
if (rootNode instanceof Group) {
return ((Group) rootNode).getChildren();
}
if (rootNode instanceof Pane) {
return ((Pane) rootNode).getChildren();
}
throw new IllegalStateException(
"The content of the Popup can't be accessed");
}
/**
* The window which is the parent of this popup. All popups must have an
* owner window.
*/
private ReadOnlyObjectWrapper<Window> ownerWindow =
new ReadOnlyObjectWrapper<Window>(this, "ownerWindow");
public final Window getOwnerWindow() {
return ownerWindow.get();
}
public final ReadOnlyObjectProperty<Window> ownerWindowProperty() {
return ownerWindow.getReadOnlyProperty();
}
/**
* The node which is the owner of this popup. All popups must have an
* owner window but are not required to be associated with an owner node.
* If an autohide Popup has an owner node, mouse press inside the owner node
* doesn't cause the Popup to hide.
*/
private ReadOnlyObjectWrapper<Node> ownerNode =
new ReadOnlyObjectWrapper<Node>(this, "ownerNode");
public final Node getOwnerNode() {
return ownerNode.get();
}
public final ReadOnlyObjectProperty<Node> ownerNodeProperty() {
return ownerNode.getReadOnlyProperty();
}
/**
* Note to subclasses: the scene used by PopupWindow is very specifically
* managed by PopupWindow. This method is overridden to throw
* UnsupportedOperationException. You cannot specify your own scene.
*
* @param scene the scene to be rendered on this window
*/
@Override protected final void setScene(Scene scene) {
throw new UnsupportedOperationException();
}
/**
* This convenience variable indicates whether, when the popup is shown,
* it should automatically correct its position such that it doesn't end
* up positioned off the screen.
* @defaultValue true
*/
private BooleanProperty autoFix =
new BooleanPropertyBase(true) {
@Override
protected void invalidated() {
handleAutofixActivation(isShowing(), get());
}
@Override
public Object getBean() {
return PopupWindow.this;
}
@Override
public String getName() {
return "autoFix";
}
};
public final void setAutoFix(boolean value) { autoFix.set(value); }
public final boolean isAutoFix() { return autoFix.get(); }
public final BooleanProperty autoFixProperty() { return autoFix; }
/**
* Specifies whether Popups should auto hide. If a popup loses focus and
* autoHide is true, then the popup will be hidden automatically.
* <p>
* The only exception is when owner Node is specified using {@link #show(javafx.scene.Node, double, double)}.
* Focusing owner Node will not hide the PopupWindow.
* </p>
* @defaultValue false
*/
private BooleanProperty autoHide =
new BooleanPropertyBase() {
@Override
protected void invalidated() {
handleAutohideActivation(isShowing(), get());
}
@Override
public Object getBean() {
return PopupWindow.this;
}
@Override
public String getName() {
return "autoHide";
}
};
public final void setAutoHide(boolean value) { autoHide.set(value); }
public final boolean isAutoHide() { return autoHide.get(); }
public final BooleanProperty autoHideProperty() { return autoHide; }
/**
* Called after autoHide is run.
*/
private ObjectProperty<EventHandler<Event>> onAutoHide =
new SimpleObjectProperty<EventHandler<Event>>(this, "onAutoHide");
public final void setOnAutoHide(EventHandler<Event> value) { onAutoHide.set(value); }
public final EventHandler<Event> getOnAutoHide() { return onAutoHide.get(); }
public final ObjectProperty<EventHandler<Event>> onAutoHideProperty() { return onAutoHide; }
/**
* Specifies whether the PopupWindow should be hidden when an unhandled escape key
* is pressed while the popup has focus.
* @defaultValue true
*/
private BooleanProperty hideOnEscape =
new SimpleBooleanProperty(this, "hideOnEscape", true);
public final void setHideOnEscape(boolean value) { hideOnEscape.set(value); }
public final boolean isHideOnEscape() { return hideOnEscape.get(); }
public final BooleanProperty hideOnEscapeProperty() { return hideOnEscape; }
/**
* Specifies whether the event, which caused the Popup to hide, should be
* consumed. Having the event consumed prevents it from triggering some
* additional UI response in the Popup's owner window.
* @defaultValue true
* @since JavaFX 2.2
*/
private BooleanProperty consumeAutoHidingEvents =
new SimpleBooleanProperty(this, "consumeAutoHidingEvents",
true);
public final void setConsumeAutoHidingEvents(boolean value) {
consumeAutoHidingEvents.set(value);
}
public final boolean getConsumeAutoHidingEvents() {
return consumeAutoHidingEvents.get();
}
public final BooleanProperty consumeAutoHidingEventsProperty() {
return consumeAutoHidingEvents;
}
/**
* Show the popup.
* @param owner The owner of the popup. This must not be null.
* @throws NullPointerException if owner is null
* @throws IllegalArgumentException if the specified owner window would
* create cycle in the window hierarchy
*/
public void show(Window owner) {
validateOwnerWindow(owner);
showImpl(owner);
}
/**
* Shows the popup at the specified location on the screen. The popup window
* is positioned in such way that its anchor point ({@link #anchorLocation})
* is displayed at the specified {@code anchorX} and {@code anchorY}
* coordinates.
* <p>
* The popup is associated with the specified owner node. The {@code Window}
* which contains the owner node at the time of the call becomes an owner
* window of the displayed popup.
* </p>
* <p>
* Note that when {@link #autoHideProperty()} is set to true, mouse press on the owner Node
* will not hide the PopupWindow.
* </p>
*
* @param ownerNode The owner Node of the popup. It must not be null
* and must be associated with a Window.
* @param anchorX the x position of the popup anchor in screen coordinates
* @param anchorY the y position of the popup anchor in screen coordinates
* @throws NullPointerException if ownerNode is null
* @throws IllegalArgumentException if the specified owner node is not
* associated with a Window or when the window would create cycle
* in the window hierarchy
*/
public void show(Node ownerNode, double anchorX, double anchorY) {
if (ownerNode == null) {
throw new NullPointerException("The owner node must not be null");
}
final Scene ownerNodeScene = ownerNode.getScene();
if ((ownerNodeScene == null)
|| (ownerNodeScene.getWindow() == null)) {
throw new IllegalArgumentException(
"The owner node needs to be associated with a window");
}
final Window newOwnerWindow = ownerNodeScene.getWindow();
validateOwnerWindow(newOwnerWindow);
this.ownerNode.set(ownerNode);
// PopupWindow should disappear when owner node is not visible
if (ownerNode != null) {
NodeHelper.treeShowingProperty(ownerNode).addListener(weakOwnerNodeListener);
}
updateWindow(anchorX, anchorY);
showImpl(newOwnerWindow);
}
/**
* Shows the popup at the specified location on the screen. The popup window
* is positioned in such way that its anchor point ({@link #anchorLocation})
* is displayed at the specified {@code anchorX} and {@code anchorY}
* coordinates.
*
* @param ownerWindow The owner of the popup. This must not be null.
* @param anchorX the x position of the popup anchor in screen coordinates
* @param anchorY the y position of the popup anchor in screen coordinates
* @throws NullPointerException if ownerWindow is null
* @throws IllegalArgumentException if the specified owner window would
* create cycle in the window hierarchy
*/
public void show(Window ownerWindow, double anchorX, double anchorY) {
validateOwnerWindow(ownerWindow);
updateWindow(anchorX, anchorY);
showImpl(ownerWindow);
}
private void showImpl(final Window owner) {
// Update the owner field
this.ownerWindow.set(owner);
if (owner instanceof PopupWindow) {
((PopupWindow)owner).children.add(this);
}
// PopupWindow should disappear when owner node is not visible
if (owner != null) {
owner.showingProperty().addListener(weakOwnerNodeListener);
}
final Scene sceneValue = getScene();
SceneHelper.parentEffectiveOrientationInvalidated(sceneValue);
// RT-28447
final Scene ownerScene = getRootWindow(owner).getScene();
if (ownerScene != null) {
if (ownerScene.getUserAgentStylesheet() != null) {
sceneValue.setUserAgentStylesheet(ownerScene.getUserAgentStylesheet());
}
sceneValue.getStylesheets().setAll(ownerScene.getStylesheets());
if (sceneValue.getCursor() == null) {
sceneValue.setCursor(ownerScene.getCursor());
}
}
// It is required that the root window exist and be visible to show the popup.
if (getRootWindow(owner).isShowing()) {
// We do show() first so that the width and height of the
// popup window are initialized. This way the x,y location of the
// popup calculated below uses the right width and height values for
// its calculation. (fix for part of RT-10675).
show();
}
}
/**
* Hide this Popup and all its children
*/
@Override public void hide() {
for (PopupWindow c : children) {
if (c.isShowing()) {
c.hide();
}
}
children.clear();
super.hide();
// When popup hides, remove listeners; these are added when the popup shows.
if (getOwnerWindow() != null) getOwnerWindow().showingProperty().removeListener(weakOwnerNodeListener);
if (getOwnerNode() != null) NodeHelper.treeShowingProperty(getOwnerNode()).removeListener(weakOwnerNodeListener);
}
/*
* This can be replaced by listening for the onShowing/onHiding events
* Note: This method MUST only be called via its accessor method.
*/
private void doVisibleChanging(boolean visible) {
PerformanceTracker.logEvent("PopupWindow.storeVisible for [PopupWindow]");
Toolkit toolkit = Toolkit.getToolkit();
if (visible && (getPeer() == null)) {
// Setup the peer
StageStyle popupStyle;
try {
final SecurityManager securityManager =
System.getSecurityManager();
if (securityManager != null) {
securityManager.checkPermission(CREATE_TRANSPARENT_WINDOW_PERMISSION);
}
popupStyle = StageStyle.TRANSPARENT;
} catch (final SecurityException e) {
popupStyle = StageStyle.UNDECORATED;
}
setPeer(toolkit.createTKPopupStage(this, popupStyle, getOwnerWindow().getPeer(), acc));
setPeerListener(new PopupWindowPeerListener(PopupWindow.this));
}
}
private Window rootWindow;
/*
* This can be replaced by listening for the onShown/onHidden events
* Note: This method MUST only be called via its accessor method.
*/
private void doVisibleChanged(boolean visible) {
final Window ownerWindowValue = getOwnerWindow();
if (visible) {
rootWindow = getRootWindow(ownerWindowValue);
startMonitorOwnerEvents(ownerWindowValue);
// currently we consider popup window to be focused when it is
// visible and its owner window is focused (we need to track
// that through listener on owner window focused property)
// a better solution would require some focus manager, which can
// track focus state across multiple windows
bindOwnerFocusedProperty(ownerWindowValue);
WindowHelper.setFocused(this, ownerWindowValue.isFocused());
handleAutofixActivation(true, isAutoFix());
handleAutohideActivation(true, isAutoHide());
} else {
stopMonitorOwnerEvents(ownerWindowValue);
unbindOwnerFocusedProperty(ownerWindowValue);
WindowHelper.setFocused(this, false);
handleAutofixActivation(false, isAutoFix());
handleAutohideActivation(false, isAutoHide());
rootWindow = null;
}
PerformanceTracker.logEvent("PopupWindow.storeVisible for [PopupWindow] finished");
}
/**
* Specifies the x coordinate of the popup anchor point on the screen. If
* the {@code anchorLocation} is set to {@code WINDOW_TOP_LEFT} or
* {@code WINDOW_BOTTOM_LEFT} the {@code x} and {@code anchorX} values will
* be identical.
*
* @since JavaFX 8.0
*/
private final ReadOnlyDoubleWrapper anchorX =
new ReadOnlyDoubleWrapper(this, "anchorX", Double.NaN);
public final void setAnchorX(final double value) {
updateWindow(value, getAnchorY());
}
public final double getAnchorX() {
return anchorX.get();
}
public final ReadOnlyDoubleProperty anchorXProperty() {
return anchorX.getReadOnlyProperty();
}
/**
* Specifies the y coordinate of the popup anchor point on the screen. If
* the {@code anchorLocation} is set to {@code WINDOW_TOP_LEFT} or
* {@code WINDOW_TOP_RIGHT} the {@code y} and {@code anchorY} values will
* be identical.
*
* @since JavaFX 8.0
*/
private final ReadOnlyDoubleWrapper anchorY =
new ReadOnlyDoubleWrapper(this, "anchorY", Double.NaN);
public final void setAnchorY(final double value) {
updateWindow(getAnchorX(), value);
}
public final double getAnchorY() {
return anchorY.get();
}
public final ReadOnlyDoubleProperty anchorYProperty() {
return anchorY.getReadOnlyProperty();
}
/**
* Specifies the popup anchor point which is used in popup positioning. The
* point can be set to a corner of the popup window or a corner of its
* content. In this context the content corners are derived from the popup
* root node's layout bounds.
* <p>
* In general changing of the anchor location won't change the current
* window position. Instead of that, the {@code anchorX} and {@code anchorY}
* values are recalculated to correspond to the new anchor point.
* </p>
* @since JavaFX 8.0
*/
private final ObjectProperty<AnchorLocation> anchorLocation =
new ObjectPropertyBase<AnchorLocation>(
AnchorLocation.WINDOW_TOP_LEFT) {
@Override
protected void invalidated() {
cachedAnchorBounds = null;
updateWindow(windowToAnchorX(getX()),
windowToAnchorY(getY()));
}
@Override
public Object getBean() {
return PopupWindow.this;
}
@Override
public String getName() {
return "anchorLocation";
}
};
public final void setAnchorLocation(final AnchorLocation value) {
anchorLocation.set(value);
}
public final AnchorLocation getAnchorLocation() {
return anchorLocation.get();
}
public final ObjectProperty<AnchorLocation> anchorLocationProperty() {
return anchorLocation;
}
/**
* Anchor location constants for popup anchor point selection.
*
* @since JavaFX 8.0
*/
public enum AnchorLocation {
/** Represents top left window corner. */
WINDOW_TOP_LEFT(0, 0, false),
/** Represents top right window corner. */
WINDOW_TOP_RIGHT(1, 0, false),
/** Represents bottom left window corner. */
WINDOW_BOTTOM_LEFT(0, 1, false),
/** Represents bottom right window corner. */
WINDOW_BOTTOM_RIGHT(1, 1, false),
/** Represents top left content corner. */
CONTENT_TOP_LEFT(0, 0, true),
/** Represents top right content corner. */
CONTENT_TOP_RIGHT(1, 0, true),
/** Represents bottom left content corner. */
CONTENT_BOTTOM_LEFT(0, 1, true),
/** Represents bottom right content corner. */
CONTENT_BOTTOM_RIGHT(1, 1, true);
private final double xCoef;
private final double yCoef;
private final boolean contentLocation;
private AnchorLocation(final double xCoef, final double yCoef,
final boolean contentLocation) {
this.xCoef = xCoef;
this.yCoef = yCoef;
this.contentLocation = contentLocation;
}
double getXCoef() {
return xCoef;
}
double getYCoef() {
return yCoef;
}
boolean isContentLocation() {
return contentLocation;
}
};
@Override
void setXInternal(final double value) {
updateWindow(windowToAnchorX(value), getAnchorY());
}
@Override
void setYInternal(final double value) {
updateWindow(getAnchorX(), windowToAnchorY(value));
}
@Override
void notifyLocationChanged(final double newX, final double newY) {<|fim▁hole|>
private Bounds cachedExtendedBounds;
private Bounds cachedAnchorBounds;
private Bounds getExtendedBounds() {
if (cachedExtendedBounds == null) {
final Parent rootNode = getScene().getRoot();
cachedExtendedBounds = union(rootNode.getLayoutBounds(),
rootNode.getBoundsInLocal());
}
return cachedExtendedBounds;
}
private Bounds getAnchorBounds() {
if (cachedAnchorBounds == null) {
cachedAnchorBounds = getAnchorLocation().isContentLocation()
? getScene().getRoot()
.getLayoutBounds()
: getExtendedBounds();
}
return cachedAnchorBounds;
}
private void updateWindow(final double newAnchorX,
final double newAnchorY) {
final AnchorLocation anchorLocationValue = getAnchorLocation();
final Parent rootNode = getScene().getRoot();
final Bounds extendedBounds = getExtendedBounds();
final Bounds anchorBounds = getAnchorBounds();
final double anchorXCoef = anchorLocationValue.getXCoef();
final double anchorYCoef = anchorLocationValue.getYCoef();
final double anchorDeltaX = anchorXCoef * anchorBounds.getWidth();
final double anchorDeltaY = anchorYCoef * anchorBounds.getHeight();
double anchorScrMinX = newAnchorX - anchorDeltaX;
double anchorScrMinY = newAnchorY - anchorDeltaY;
if (autofixActive) {
final Screen currentScreen =
Utils.getScreenForPoint(newAnchorX, newAnchorY);
final Rectangle2D screenBounds =
Utils.hasFullScreenStage(currentScreen)
? currentScreen.getBounds()
: currentScreen.getVisualBounds();
if (anchorXCoef <= 0.5) {
// left side of the popup is more important, try to keep it
// visible if the popup width is larger than screen width
anchorScrMinX = Math.min(anchorScrMinX,
screenBounds.getMaxX()
- anchorBounds.getWidth());
anchorScrMinX = Math.max(anchorScrMinX, screenBounds.getMinX());
} else {
// right side of the popup is more important
anchorScrMinX = Math.max(anchorScrMinX, screenBounds.getMinX());
anchorScrMinX = Math.min(anchorScrMinX,
screenBounds.getMaxX()
- anchorBounds.getWidth());
}
if (anchorYCoef <= 0.5) {
// top side of the popup is more important
anchorScrMinY = Math.min(anchorScrMinY,
screenBounds.getMaxY()
- anchorBounds.getHeight());
anchorScrMinY = Math.max(anchorScrMinY, screenBounds.getMinY());
} else {
// bottom side of the popup is more important
anchorScrMinY = Math.max(anchorScrMinY, screenBounds.getMinY());
anchorScrMinY = Math.min(anchorScrMinY,
screenBounds.getMaxY()
- anchorBounds.getHeight());
}
}
final double windowScrMinX =
anchorScrMinX - anchorBounds.getMinX()
+ extendedBounds.getMinX();
final double windowScrMinY =
anchorScrMinY - anchorBounds.getMinY()
+ extendedBounds.getMinY();
// update popup dimensions
setWidth(extendedBounds.getWidth());
setHeight(extendedBounds.getHeight());
// update transform
rootNode.setTranslateX(-extendedBounds.getMinX());
rootNode.setTranslateY(-extendedBounds.getMinY());
// update popup position
// don't set Window.xExplicit unnecessarily
if (!Double.isNaN(windowScrMinX)) {
super.setXInternal(windowScrMinX);
}
// don't set Window.yExplicit unnecessarily
if (!Double.isNaN(windowScrMinY)) {
super.setYInternal(windowScrMinY);
}
// set anchor x, anchor y
anchorX.set(anchorScrMinX + anchorDeltaX);
anchorY.set(anchorScrMinY + anchorDeltaY);
}
private Bounds union(final Bounds bounds1, final Bounds bounds2) {
final double minX = Math.min(bounds1.getMinX(), bounds2.getMinX());
final double minY = Math.min(bounds1.getMinY(), bounds2.getMinY());
final double maxX = Math.max(bounds1.getMaxX(), bounds2.getMaxX());
final double maxY = Math.max(bounds1.getMaxY(), bounds2.getMaxY());
return new BoundingBox(minX, minY, maxX - minX, maxY - minY);
}
private double windowToAnchorX(final double windowX) {
final Bounds anchorBounds = getAnchorBounds();
return windowX - getExtendedBounds().getMinX()
+ anchorBounds.getMinX()
+ getAnchorLocation().getXCoef()
* anchorBounds.getWidth();
}
private double windowToAnchorY(final double windowY) {
final Bounds anchorBounds = getAnchorBounds();
return windowY - getExtendedBounds().getMinY()
+ anchorBounds.getMinY()
+ getAnchorLocation().getYCoef()
* anchorBounds.getHeight();
}
/**
*
* Gets the root (non PopupWindow) Window for the provided window.
*
* @param win the Window for which to get the root window
*/
private static Window getRootWindow(Window win) {
// should be enough to traverse PopupWindow hierarchy here to get to the
// first non-popup focusable window
while (win instanceof PopupWindow) {
win = ((PopupWindow) win).getOwnerWindow();
}
return win;
}
void doAutoHide() {
// There is a timing problem here. I would like to have this isVisible
// check, such that we don't send an onAutoHide event if it was already
// invisible. However, visible is already false by the time this method
// gets called, when done by certain code paths.
// if (isVisible()) {
// hide this popup
hide();
if (getOnAutoHide() != null) {
getOnAutoHide().handle(new Event(this, this, Event.ANY));
}
// }
}
@Override
WindowEventDispatcher createInternalEventDispatcher() {
return new WindowEventDispatcher(new PopupEventRedirector(this),
new WindowCloseRequestHandler(this),
new EventHandlerManager(this));
}
@Override
Window getWindowOwner() {
return getOwnerWindow();
}
private void startMonitorOwnerEvents(final Window ownerWindowValue) {
final EventRedirector parentEventRedirector =
ownerWindowValue.getInternalEventDispatcher()
.getEventRedirector();
parentEventRedirector.addEventDispatcher(getEventDispatcher());
}
private void stopMonitorOwnerEvents(final Window ownerWindowValue) {
final EventRedirector parentEventRedirector =
ownerWindowValue.getInternalEventDispatcher()
.getEventRedirector();
parentEventRedirector.removeEventDispatcher(getEventDispatcher());
}
private ChangeListener<Boolean> ownerFocusedListener;
private void bindOwnerFocusedProperty(final Window ownerWindowValue) {
ownerFocusedListener =
(observable, oldValue, newValue) -> WindowHelper.setFocused(this, newValue);
ownerWindowValue.focusedProperty().addListener(ownerFocusedListener);
}
private void unbindOwnerFocusedProperty(final Window ownerWindowValue) {
ownerWindowValue.focusedProperty().removeListener(ownerFocusedListener);
ownerFocusedListener = null;
}
private boolean autofixActive;
private void handleAutofixActivation(final boolean visible,
final boolean autofix) {
final boolean newAutofixActive = visible && autofix;
if (autofixActive != newAutofixActive) {
autofixActive = newAutofixActive;
if (newAutofixActive) {
Screen.getScreens().addListener(popupWindowUpdater);
updateWindow(getAnchorX(), getAnchorY());
} else {
Screen.getScreens().removeListener(popupWindowUpdater);
}
}
}
private boolean autohideActive;
private void handleAutohideActivation(final boolean visible,
final boolean autohide) {
final boolean newAutohideActive = visible && autohide;
if (autohideActive != newAutohideActive) {
// assert rootWindow != null;
autohideActive = newAutohideActive;
if (newAutohideActive) {
rootWindow.increaseFocusGrabCounter();
} else {
rootWindow.decreaseFocusGrabCounter();
}
}
}
private void validateOwnerWindow(final Window owner) {
if (owner == null) {
throw new NullPointerException("Owner window must not be null");
}
if (wouldCreateCycle(owner, this)) {
throw new IllegalArgumentException(
"Specified owner window would create cycle"
+ " in the window hierarchy");
}
if (isShowing() && (getOwnerWindow() != owner)) {
throw new IllegalStateException(
"Popup is already shown with different owner window");
}
}
private static boolean wouldCreateCycle(Window parent, final Window child) {
while (parent != null) {
if (parent == child) {
return true;
}
parent = parent.getWindowOwner();
}
return false;
}
static class PopupEventRedirector extends EventRedirector {
private static final KeyCombination ESCAPE_KEY_COMBINATION =
KeyCombination.keyCombination("Esc");
private final PopupWindow popupWindow;
public PopupEventRedirector(final PopupWindow popupWindow) {
super(popupWindow);
this.popupWindow = popupWindow;
}
@Override
protected void handleRedirectedEvent(final Object eventSource,
final Event event) {
if (event instanceof KeyEvent) {
handleKeyEvent((KeyEvent) event);
return;
}
final EventType<?> eventType = event.getEventType();
if (eventType == MouseEvent.MOUSE_PRESSED
|| eventType == ScrollEvent.SCROLL) {
handleAutoHidingEvents(eventSource, event);
return;
}
if (eventType == FocusUngrabEvent.FOCUS_UNGRAB) {
handleFocusUngrabEvent();
return;
}
}
private void handleKeyEvent(final KeyEvent event) {
if (event.isConsumed()) {
return;
}
final Scene scene = popupWindow.getScene();
if (scene != null) {
final Node sceneFocusOwner = scene.getFocusOwner();
final EventTarget eventTarget =
(sceneFocusOwner != null) ? sceneFocusOwner : scene;
if (EventUtil.fireEvent(eventTarget, new DirectEvent(event.copyFor(popupWindow, eventTarget)))
== null) {
event.consume();
return;
}
}
if ((event.getEventType() == KeyEvent.KEY_PRESSED)
&& ESCAPE_KEY_COMBINATION.match(event)) {
handleEscapeKeyPressedEvent(event);
}
}
private void handleEscapeKeyPressedEvent(final Event event) {
if (popupWindow.isHideOnEscape()) {
popupWindow.doAutoHide();
if (popupWindow.getConsumeAutoHidingEvents()) {
event.consume();
}
}
}
private void handleAutoHidingEvents(final Object eventSource,
final Event event) {
// we handle mouse pressed only for the immediate parent window,
// where we can check whether the mouse press is inside of the owner
// control or not, we will force possible child popups to close
// by sending the FOCUS_UNGRAB event
if (popupWindow.getOwnerWindow() != eventSource) {
return;
}
if (popupWindow.isAutoHide() && !isOwnerNodeEvent(event)) {
// the mouse press is outside of the owner control,
// fire FOCUS_UNGRAB to child popups
Event.fireEvent(popupWindow, new FocusUngrabEvent());
popupWindow.doAutoHide();
if (popupWindow.getConsumeAutoHidingEvents()) {
event.consume();
}
}
}
private void handleFocusUngrabEvent() {
if (popupWindow.isAutoHide()) {
popupWindow.doAutoHide();
}
}
private boolean isOwnerNodeEvent(final Event event) {
final Node ownerNode = popupWindow.getOwnerNode();
if (ownerNode == null) {
return false;
}
final EventTarget eventTarget = event.getTarget();
if (!(eventTarget instanceof Node)) {
return false;
}
Node node = (Node) eventTarget;
do {
if (node == ownerNode) {
return true;
}
node = node.getParent();
} while (node != null);
return false;
}
}
}<|fim▁end|>
|
super.notifyLocationChanged(newX, newY);
anchorX.set(windowToAnchorX(newX));
anchorY.set(windowToAnchorY(newY));
}
|
<|file_name|>FSRepositoryTest.java<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2005-2011 Juan F. Codagnone <http://juan.zaubersoftware.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ar.com.leak.iolsucker.view.common;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collection;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
import junit.framework.TestCase;
import org.apache.commons.lang.Validate;
import ar.com.leak.common.fs.FilesystemUtils;
import ar.com.leak.iolsucker.impl.common.RelativeLocationValidator;
import ar.com.leak.iolsucker.impl.mock.MockCourse;
import ar.com.leak.iolsucker.impl.mock.MockIolDao;
import ar.com.leak.iolsucker.impl.mock.MockMaterialDir;
import ar.com.leak.iolsucker.impl.mock.MockMaterialFile;
import ar.com.leak.iolsucker.model.Course;
import ar.com.leak.iolsucker.model.IolDAO;
import ar.com.leak.iolsucker.model.Material;
import ar.com.leak.iolsucker.view.Repository;
import ar.com.leak.iolsucker.view.Repository.ObservableAction;
/**
* testeo de unidad para FSRepository
*
* @author Juan F. Codagnone
* @since Apr 30, 2005
*/
public final class FSRepositoryTest extends TestCase {
/**
* @throws Exception on error
*/
public void testEvilPaths() throws Exception {
final IolDAO dao = new MockIolDao(new Course[] {
new MockCourse("testeo", "10.1", -3,
Arrays.asList(new Material [] {
/* al tener null de data, si el repositorio lo quiere leer va a
* saltar una excepcion
*/
new MockMaterialFile("../pepe", null),
new MockMaterialDir(".."),
new MockMaterialDir("soy/malo/../../../../../proc/version"),
new MockMaterialFile("soy/malo/../../../../test", null),
new MockMaterialFile("soy/malo/../../../../", null),
new MockMaterialFile("directorio_valido/archivo", "buu"),
new MockMaterialDir("otro_directorio_valido"),
}))
}, null, null);
// TODO no harcodear path
final File location = new File(getTmpDirectory(), "evilPaths");
if(!location.exists()) {
location.mkdirs();
}
final Repository repository = new FSRepository(location,
new NullDownloadMeter(), new RelativeLocationValidator(),
new NullRepublishRepositoryStrategy(), 1);
/** TODO assert for the correct # */
repository.addRepositoryListener(new Observer() {
public void update(final Observable o, final Object arg) {
Repository.ObservableAction action = (ObservableAction)arg;
System.out.println(action.getType() + " -- " + action.getMsg());
}
});
final Collection evilCourses = dao.getUserCourses();
for(Iterator i = evilCourses.iterator(); i.hasNext();) {
Course evilCourse = (Course)i.next();
repository.syncMaterial(evilCourse);
}
}
public void testSlashMateriaOkPaths() throws Exception {
final IolDAO dao = new MockIolDao(new Course[] {
new MockCourse("Estadistica K/V", "10.1", -3,
Arrays.asList(new Material [] {
new MockMaterialDir("otro_directorio_valido"),
}))
}, null, null);
// TODO no harcodear path
final File location = new File(getTmpDirectory(), "evilPaths");
if(!location.exists()) {
location.mkdirs();
}
final FSRepository repository = new FSRepository(location,
new NullDownloadMeter(), new RelativeLocationValidator(),
new NullRepublishRepositoryStrategy(), 1);
for(Iterator i = dao.getUserCourses().iterator(); i.hasNext();) {
Course evilCourse = (Course)i.next();
repository.syncMaterial(evilCourse);
}
assertEquals(0, repository.getExceptions().size());
}
/**
* testea que nos comportemos correctamente en una republicación de un
* archivo
*
* @throws Exception on error
*/
public void testRepublish() throws Exception {
final File location = new File(getTmpDirectory(), "jiol-testRepublish");
if(location.exists()) {
FilesystemUtils.removeDir(location);
}
final FSRepository repository = new FSRepository(location,
new NullDownloadMeter(), new RelativeLocationValidator(),
new TagRepublishRepositoryStrategy(), 1);
// armo un curso que tiene un archivo. dos horas mas tarde se sube
// otro archivo, con mismo nombre
final String courseName = "TESTS-101";
final String courseCode = "1.23";
final int courseLevel = Course.LEVEL_GRADO;
final String fileName = "file1.txt";
final Calendar calendar = GregorianCalendar.getInstance();
final Date beforeDate = calendar.getTime();
calendar.add(Calendar.HOUR, 2);
final Date afterDate = calendar.getTime();
final Date epochDate = new Date(0);
final String []content = {
"contenido 0",
"contenido 1",
"contenido 2"
};
<|fim▁hole|> final Course afterCourse = new MockCourse(courseName, courseCode,
courseLevel, Arrays.asList(new Material[] {
new MockMaterialFile(fileName, content[1], afterDate)
}));
final Course epochCourse = new MockCourse(courseName, courseCode,
courseLevel, Arrays.asList(new Material[] {
new MockMaterialFile(fileName, content[2], epochDate)
}));
try {
repository.syncMaterial(beforeCourse);
repository.syncMaterial(afterCourse);
repository.syncMaterial(epochCourse);
assertEquals(repository.getDestDir(beforeCourse).listFiles().length,
2);
assertEquals(content[1], // tuvo que haber quedado el contendo 1
getFileContent(new File(
repository.getDestDir(beforeCourse), fileName)));
} finally {
if(location.exists()) {
FilesystemUtils.removeDir(location);
}
}
}
/** @return el directorio temporario */
private File getTmpDirectory() {
final String tmpDir = System.getProperty("java.io.tmpdir");
Validate.notNull(tmpDir, "java.io.tmpdir is not set!!");
return new File(tmpDir);
}
/**
* @param f el archivo a abrir
* @return el contenido de f
* @throws IOException on error
*/
private String getFileContent(final File f) throws IOException {
final StringBuilder sb = new StringBuilder();
final Reader reader = new FileReader(f);
final int bufferSize = 1024;
final char []buff = new char[bufferSize];
int len;
while((len = reader.read(buff)) != -1) {
sb.append(buff, 0, len);
}
reader.close();
return sb.toString();
}
}<|fim▁end|>
|
final Course beforeCourse = new MockCourse(courseName, courseCode,
courseLevel , Arrays.asList(new Material[] {
new MockMaterialFile(fileName, content[0], beforeDate)
}));
|
<|file_name|>expression_test.go<|end_file_name|><|fim▁begin|>// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"fmt"
. "github.com/pingcap/check"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/mock"
"github.com/pingcap/tidb/util/testleak"
"github.com/pingcap/tidb/util/testutil"
)
var _ = Suite(&testExpressionSuite{})
type testExpressionSuite struct {
*parser.Parser
ctx sessionctx.Context
}
func (s *testExpressionSuite) SetUpSuite(c *C) {
s.Parser = parser.New()
s.ctx = mock.NewContext()
}
func (s *testExpressionSuite) TearDownSuite(c *C) {
}
func (s *testExpressionSuite) parseExpr(c *C, expr string) ast.ExprNode {
st, err := s.ParseOneStmt("select "+expr, "", "")
c.Assert(err, IsNil)<|fim▁hole|>type testCase struct {
exprStr string
resultStr string
}
func (s *testExpressionSuite) runTests(c *C, tests []testCase) {
for _, tt := range tests {
expr := s.parseExpr(c, tt.exprStr)
val, err := evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
valStr := fmt.Sprintf("%v", val.GetValue())
c.Assert(valStr, Equals, tt.resultStr, Commentf("for %s", tt.exprStr))
}
}
func (s *testExpressionSuite) TestBetween(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{exprStr: "1 between 2 and 3", resultStr: "0"},
{exprStr: "1 not between 2 and 3", resultStr: "1"},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestCaseWhen(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "case 1 when 1 then 'str1' when 2 then 'str2' end",
resultStr: "str1",
},
{
exprStr: "case 2 when 1 then 'str1' when 2 then 'str2' end",
resultStr: "str2",
},
{
exprStr: "case 3 when 1 then 'str1' when 2 then 'str2' end",
resultStr: "<nil>",
},
{
exprStr: "case 4 when 1 then 'str1' when 2 then 'str2' else 'str3' end",
resultStr: "str3",
},
}
s.runTests(c, tests)
// When expression value changed, result set back to null.
valExpr := ast.NewValueExpr(1)
whenClause := &ast.WhenClause{Expr: ast.NewValueExpr(1), Result: ast.NewValueExpr(1)}
caseExpr := &ast.CaseExpr{
Value: valExpr,
WhenClauses: []*ast.WhenClause{whenClause},
}
v, err := evalAstExpr(s.ctx, caseExpr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum(int64(1)))
valExpr.SetValue(4)
v, err = evalAstExpr(s.ctx, caseExpr)
c.Assert(err, IsNil)
c.Assert(v.Kind(), Equals, types.KindNull)
}
func (s *testExpressionSuite) TestCast(c *C) {
defer testleak.AfterTest(c)()
f := types.NewFieldType(mysql.TypeLonglong)
expr := &ast.FuncCastExpr{
Expr: ast.NewValueExpr(1),
Tp: f,
}
ast.SetFlag(expr)
v, err := evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum(int64(1)))
f.Flag |= mysql.UnsignedFlag
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum(uint64(1)))
f.Tp = mysql.TypeString
f.Charset = charset.CharsetBin
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum([]byte("1")))
f.Tp = mysql.TypeString
f.Charset = "utf8"
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v, testutil.DatumEquals, types.NewDatum("1"))
expr.Expr = ast.NewValueExpr(nil)
v, err = evalAstExpr(s.ctx, expr)
c.Assert(err, IsNil)
c.Assert(v.Kind(), Equals, types.KindNull)
}
func (s *testExpressionSuite) TestPatternIn(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "1 not in (1, 2, 3)",
resultStr: "0",
},
{
exprStr: "1 in (1, 2, 3)",
resultStr: "1",
},
{
exprStr: "1 in (2, 3)",
resultStr: "0",
},
{
exprStr: "NULL in (2, 3)",
resultStr: "<nil>",
},
{
exprStr: "NULL not in (2, 3)",
resultStr: "<nil>",
},
{
exprStr: "NULL in (NULL, 3)",
resultStr: "<nil>",
},
{
exprStr: "1 in (1, NULL)",
resultStr: "1",
},
{
exprStr: "1 in (NULL, 1)",
resultStr: "1",
},
{
exprStr: "2 in (1, NULL)",
resultStr: "<nil>",
},
{
exprStr: "(-(23)++46/51*+51) in (+23)",
resultStr: "0",
},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestIsNull(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "1 IS NULL",
resultStr: "0",
},
{
exprStr: "1 IS NOT NULL",
resultStr: "1",
},
{
exprStr: "NULL IS NULL",
resultStr: "1",
},
{
exprStr: "NULL IS NOT NULL",
resultStr: "0",
},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestCompareRow(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "row(1,2,3)=row(1,2,3)",
resultStr: "1",
},
{
exprStr: "row(1,2,3)=row(1+3,2,3)",
resultStr: "0",
},
{
exprStr: "row(1,2,3)<>row(1,2,3)",
resultStr: "0",
},
{
exprStr: "row(1,2,3)<>row(1+3,2,3)",
resultStr: "1",
},
{
exprStr: "row(1+3,2,3)<>row(1+3,2,3)",
resultStr: "0",
},
{
exprStr: "row(1,2,3)<row(1,NULL,3)",
resultStr: "<nil>",
},
{
exprStr: "row(1,2,3)<row(2,NULL,3)",
resultStr: "1",
},
{
exprStr: "row(1,2,3)>=row(0,NULL,3)",
resultStr: "1",
},
{
exprStr: "row(1,2,3)<=row(2,NULL,3)",
resultStr: "1",
},
}
s.runTests(c, tests)
}
func (s *testExpressionSuite) TestIsTruth(c *C) {
defer testleak.AfterTest(c)()
tests := []testCase{
{
exprStr: "1 IS TRUE",
resultStr: "1",
},
{
exprStr: "2 IS TRUE",
resultStr: "1",
},
{
exprStr: "0 IS TRUE",
resultStr: "0",
},
{
exprStr: "NULL IS TRUE",
resultStr: "0",
},
{
exprStr: "1 IS FALSE",
resultStr: "0",
},
{
exprStr: "2 IS FALSE",
resultStr: "0",
},
{
exprStr: "0 IS FALSE",
resultStr: "1",
},
{
exprStr: "NULL IS NOT FALSE",
resultStr: "1",
},
{
exprStr: "1 IS NOT TRUE",
resultStr: "0",
},
{
exprStr: "2 IS NOT TRUE",
resultStr: "0",
},
{
exprStr: "0 IS NOT TRUE",
resultStr: "1",
},
{
exprStr: "NULL IS NOT TRUE",
resultStr: "1",
},
{
exprStr: "1 IS NOT FALSE",
resultStr: "1",
},
{
exprStr: "2 IS NOT FALSE",
resultStr: "1",
},
{
exprStr: "0 IS NOT FALSE",
resultStr: "0",
},
{
exprStr: "NULL IS NOT FALSE",
resultStr: "1",
},
}
s.runTests(c, tests)
}<|fim▁end|>
|
stmt := st.(*ast.SelectStmt)
return stmt.Fields.Fields[0].Expr
}
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup, find_packages
setup(
name="tomorrow",
version="0.2.4",
author="Madison May",
author_email="[email protected]",
packages=find_packages(
exclude=[
'tests'
]
),
install_requires=[
"futures >= 2.2.0"
],
description="""<|fim▁hole|> url="https://github.com/madisonmay/tomorrow"
)<|fim▁end|>
|
Magic decorator syntax for asynchronous code.
""",
license="MIT License (See LICENSE)",
long_description=open("README.rst").read(),
|
<|file_name|>trakt_lookup.py<|end_file_name|><|fim▁begin|>from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
from flexget.manager import Session
try:
from flexget.plugins.api_trakt import ApiTrakt
lookup_series = ApiTrakt.lookup_series
lookup_episode = ApiTrakt.lookup_episode
except ImportError:
raise plugin.DependencyError(issued_by='trakt_lookup', missing='api_trakt',
message='trakt_lookup requires the `api_trakt` plugin')
log = logging.getLogger('trakt_lookup')<|fim▁hole|>
class PluginTraktLookup(object):
"""Retrieves trakt information for entries. Uses series_name,
series_season, series_episode from series plugin.
Example:
trakt_lookup: yes
Primarily used for passing trakt information to other plugins.
Among these is the IMDB url for the series.
This information is provided (via entry):
series info:
trakt_series_name
trakt_series_runtime
trakt_series_first_aired_epoch
trakt_series_first_aired_iso
trakt_series_air_time
trakt_series_content_rating
trakt_series_genres
trakt_sereis_banner_url
trakt_sereis_fanart_url
trakt_series_imdb_url
trakt_series_trakt_url
trakt_series_imdb_id
trakt_series_tvdb_id
trakt_series_actors
trakt_series_country
trakt_series_year
trakt_series_tvrage_id
trakt_series_status
trakt_series_overview
trakt_ep_name
trakt_ep_season
trakt_ep_number
trakt_ep_overview
trakt_ep_first_aired_epoch
trakt_ep_first_aired_iso
trakt_ep_image_url
trakt_ep_id
trakt_ep_tvdb_id
"""
# Series info
series_map = {
'trakt_series_name': 'title',
'trakt_series_runtime': 'runtime',
'trakt_series_first_aired_epoch': 'first_aired',
'trakt_series_first_aired_iso': 'first_aired_iso',
'trakt_series_air_time': 'air_time',
'trakt_series_content_rating': 'certification',
'trakt_series_genres': lambda series: [genre.name for genre in series.genre],
'trakt_series_network': 'network',
'trakt_series_banner_url': 'banner',
'trakt_series_fanart_url': 'fanart',
'trakt_series_poster_url': 'poster',
'imdb_url': lambda series: series.imdb_id and 'http://www.imdb.com/title/%s' % series.imdb_id,
'trakt_series_url': 'url',
'trakt_series_imdb_id': 'imdb_id',
'trakt_series_tvdb_id': 'tvdb_id',
'trakt_series_actors': lambda series: [actors.name for actors in series.actors],
'trakt_series_country': 'country',
'trakt_series_year': 'year',
'trakt_series_tvrage_id': 'tvrage_id',
'trakt_series_status': 'status',
'trakt_series_overview': 'overview'}
# Episode info
episode_map = {
'trakt_ep_name': 'episode_name',
'trakt_ep_first_aired_epoch': 'first_aired',
'trakt_ep_first_aired_iso': 'first_aired_iso',
'trakt_ep_image_url': 'screen',
'trakt_ep_overview': 'overview',
'trakt_season': 'season',
'trakt_episode': 'number',
'trakt_ep_id': lambda ep: 'S%02dE%02d' % (ep.season, ep.number),
'trakt_ep_tvdb_id': 'tvdb_id'}
schema = {'type': 'boolean'}
def lazy_series_lookup(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
with Session(expire_on_commit=False) as session:
try:
series = lookup_series(entry.get('series_name', eval_lazy=False),
tvdb_id=entry.get('tvdb_id', eval_lazy=False), session=session)
except LookupError as e:
log.debug(e.message)
else:
entry.update_using_map(self.series_map, series)
def lazy_episode_lookup(self, entry):
with Session(expire_on_commit=False) as session:
lookupargs = {'title': entry.get('series_name', eval_lazy=False),
'tvdb_id': entry.get('tvdb_id', eval_lazy=False),
'seasonnum': entry['series_season'],
'episodenum': entry['series_episode'],
'session': session}
try:
episode = lookup_episode(**lookupargs)
except LookupError as e:
log.debug('Error looking up trakt episode information for %s: %s' % (entry['title'], e.args[0]))
else:
entry.update_using_map(self.episode_map, episode)
# Run after series and metainfo series
@plugin.priority(110)
def on_task_metainfo(self, task, config):
if not config:
return
for entry in task.entries:
if entry.get('series_name') or entry.get('tvdb_id', eval_lazy=False):
entry.register_lazy_func(self.lazy_series_lookup, self.series_map)
if 'series_season' in entry and 'series_episode' in entry:
entry.register_lazy_func(self.lazy_episode_lookup, self.episode_map)
@event('plugin.register')
def register_plugin():
plugin.register(PluginTraktLookup, 'trakt_lookup', api_ver=2)<|fim▁end|>
| |
<|file_name|>__main__.py<|end_file_name|><|fim▁begin|>#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 by Anselm Kruis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
=====================
Pyheapdump.__main__
=====================
Debug heap dumps.
.. warning::
This is alpha quality code.
.. autofunction:: main
"""<|fim▁hole|>
from __future__ import absolute_import, print_function, unicode_literals, division
import argparse
import sys
import os
from pyheapdump import debug_dump
def main(argv=None):
"""Debug a Python heap dump file.
You can invoke this function using the following command::
python -m pyheapdump [OPTIONS] pyheapdump
Use the option '-h' to get help::
python -m pyheapdump -h
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description='debug a Python heap dump', prog=os.path.basename(sys.executable) + " -m pyheapdump")
parser.add_argument('--debugger', '-d', choices=['auto', 'pdb', 'pydevd'], default="auto", help="select the debugger, default is 'auto'")
parser.add_argument('--debugger-dir', help='pydevd only: path to the Python files of PyDev, usually <ECLIPSE_INSTALATION_DIR>/plugins/org.python.pydev_<VERSION>/pysrc/')
parser.add_argument('--host', help='pydevd only: the user may specify another host, if the debug server is not in the same machine')
parser.add_argument('--port', type=int, default=5678, help='pydevd only: specifies which port to use for communicating with the server. Default is port 5678')
parser.add_argument('--stdout', choices=['server', 'console'], default='server', help='pydevd only: pass the stdout to the debug server so that it is printed in its console or to this process console')
parser.add_argument('--stderr', choices=['server', 'console'], default='server', help='pydevd only: pass the stderr to the debug server so that it is printed in its console or to this process console')
parser.add_argument('--debug-pyheapdump', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('dumpfile', type=argparse.FileType(mode='rb'), help="the heap dump file")
namespace = parser.parse_args(argv)
if namespace.debug_pyheapdump:
# It is better to use remote debugging, because of the debugger specific code later on
sys.path.append(namespace.debugger_dir)
import pydevd # @UnresolvedImport
pydevd.settrace(stdoutToServer=True, stderrToServer=True, suspend=True, trace_only_current_thread=True)
return debug_dump(dumpfile=namespace.dumpfile, debugger_options=vars(namespace))
if __name__ == '__main__':
sys.exit(main())<|fim▁end|>
| |
<|file_name|>deploy_symbols_s3.py<|end_file_name|><|fim▁begin|>import sys
import os.path
import lzma
import time
import os
import subprocess
if "APPVEYOR_REPO_TAG" not in os.environ or os.environ["APPVEYOR_REPO_TAG"] != "true":
print("Not uploading symbols since build is not tagged")
sys.exit()
symbol_filename = sys.argv[1]<|fim▁hole|>
tokens = first_line.split()
expected_tokens = ['MODULE', 'windows', 'x86']
if tokens[0:3] != expected_tokens:
raise RuntimeError("Expected first tokens to be " + str(expected_tokens) + ", but was: " + str(tokens[0:3]))
file_hash = tokens[3]
file_name = tokens[4]
basename = os.path.basename(symbol_filename)
target_path = "%s/%s/%s.xz" % (file_name, file_hash, basename)
# Compress symbols with LZMA to save bandwidth
print("Compressing symbol file...")
t_start = time.perf_counter()
with open(symbol_filename, "rb") as fh:
symbol_data = fh.read()
symbol_data_len = len(symbol_data)
compressed_symbols = lzma.compress(symbol_data)
compression_ratio = len(compressed_symbols) * 100 / symbol_data_len
print("Compressed symbol data (ratio %d%%) in %fs" % (compression_ratio, time.perf_counter() - t_start))
print("Uploading symbols to ", target_path)
with open("TemplePlus.sym.xz", "wb") as fh:
fh.write(compressed_symbols)
subprocess.run(["aws", "s3", "cp", "TemplePlus.sym.xz", "s3://templeplus-symbols/" + target_path], check=True, shell=True)
print("Uploaded symbols to S3.")<|fim▁end|>
|
with open(symbol_filename, "rt") as fh:
first_line = next(fh).strip()
|
<|file_name|>forEach.js<|end_file_name|><|fim▁begin|>describe("The ot object has a forEach method, which allows you: ", function () {
it("To iterate over an array", function () {
var array = [1, 2, 4, 8, 16];
var sum = 0;
var sumIndex = 0;
ot.forEach(array, function (value, index) {
sum += value;
sumIndex += index;
expect(this.context).toBe(true);
}, {context: true});
expect(sum).toBe(1 + 2 + 4 + 8 + 16);
expect(sumIndex).toBe(1 + 2 + 3 + 4);
});
it("To iterate over an object's properties", function () {
var obj = {
prop1: false,
prop2: false,
prop3: false
};
<|fim▁hole|> obj[key] = !value;
expect(this.context).toBe(true);
}, {context: true});
expect(obj.prop1).toBe(true);
expect(obj.prop2).toBe(true);
expect(obj.prop3).toBe(true);
});
it("To iterate over user set function properties", function () {
var fnWithProps = function aName() {
};
fnWithProps.prop1 = false;
fnWithProps.prop2 = false;
fnWithProps.prop3 = false;
ot.forEach(fnWithProps, function (value, key) {
fnWithProps[key] = !value;
expect(this.context).toBe(true);
}, {context: true});
expect(fnWithProps.prop1).toBe(true);
expect(fnWithProps.prop2).toBe(true);
expect(fnWithProps.prop3).toBe(true);
});
it("To iterate over an object with a forEach method", function () {
var objectWithForEach = {
forEach: function (iterator, context) {
iterator.call(context, true);
}
};
ot.forEach(objectWithForEach, function(calledFromForEach) {
expect(calledFromForEach).toBe(true);
expect(this.context).toBe(true);
}, {context: true});
});
});<|fim▁end|>
|
ot.forEach(obj, function (value, key) {
|
<|file_name|>api_tests.rs<|end_file_name|><|fim▁begin|>// Copyright (C) 2017 Red Hat, Inc.
//
// This file is part of bdcs-api-server.
//
// bdcs-api-server is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// bdcs-api-server is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with bdcs-api-server. If not, see <http://www.gnu.org/licenses/>.
#![feature(plugin)]
#![plugin(rocket_codegen)]
extern crate bdcs;
#[macro_use] extern crate lazy_static;
#[macro_use] extern crate pretty_assertions;
extern crate rocket;
extern crate serde_json;
extern crate toml;
use std::fs::{File, remove_dir_all};
use std::io::Write;
use bdcs::{RocketToml, RocketConfig};
use bdcs::api::v0;
use bdcs::db::DBPool;
use bdcs::recipe::{self, RecipeRepo};
use rocket::http::{ContentType, Method, Status};
use rocket::testing::MockRequest;
use serde_json::Value;
const DB_PATH: &'static str = "./tests/metadata.db";
// XXX This path is REMOVED on each run.
const RECIPE_PATH: &'static str = "/var/tmp/bdcs-recipes-test/";
/// Use lazy_static to write the config once, at runtime.
struct TestFramework {
initialized: bool,
rocket: rocket::Rocket
}
impl TestFramework {
fn setup() -> TestFramework {
write_config();
setup_repo();
let db_pool = DBPool::new(DB_PATH);
let recipe_repo = RecipeRepo::new(RECIPE_PATH);
// Mount the API and run a request against it
let rocket = rocket::ignite().mount("/",
routes![v0::test,
v0::isos,
v0::compose_types,
v0::projects_list_default, v0::projects_list_filter,
v0::projects_info,
v0::projects_depsolve,
v0::modules_info,
v0::modules_list_noargs_default, v0::modules_list_noargs_filter,
v0::recipes_list_default, v0::recipes_list_filter,
v0::recipes_info,
v0::recipes_freeze,
v0::recipes_changes_default, v0::recipes_changes_filter,
v0::recipes_diff,
v0::recipes_new_json, v0::recipes_new_toml,
v0::recipes_workspace_json, v0::recipes_workspace_toml,
v0::recipes_delete,
v0::recipes_undo,
v0::recipes_depsolve,
v0::recipes_tag])
.manage(db_pool)
.manage(recipe_repo);
TestFramework {
initialized: true,
rocket: rocket
}
}
}
lazy_static! {
static ref FRAMEWORK: TestFramework = {
TestFramework::setup()
};
}
/// Write Rocket.toml
///
/// The tests need access to a directory for recipes and a copy of the BDCS database
/// They cannot be passed on the cmdline, so for now they are hard-coded here.
///
/// # TODO
///
/// Setup the test environment properly.
///
fn write_config() {
// Ignore ENOENT, fail on anything else
match remove_dir_all(RECIPE_PATH) {
Ok(_) => (),
Err(e) => match e.kind() {
std::io::ErrorKind::NotFound => (),
_ => panic!("Unable to remove {}: {}", RECIPE_PATH, e)
}
};
// Write out the config to a Rocket.toml (this is easier than using rocket::custom)
let rocket_config = RocketToml {
global: RocketConfig {
address: "127.0.0.1".to_string(),
port: 4000,
bdcs_path: "".to_string(),
db_path: DB_PATH.to_string(),
recipe_path: RECIPE_PATH.to_string(),
log_path: "/var/log/bdcs-api.log".to_string(),
mockfiles_path: "./tests/results/v0/".to_string()
}
};
// Write out a Rocket.toml config with [global] settings
let rocket_toml = toml::to_string(&rocket_config).unwrap();
File::create("Rocket.toml").unwrap()
.write_all(rocket_toml.as_bytes()).unwrap();
}
/// Setup the Recipe git repo and import example recipes into it.
fn setup_repo() {
let repo = recipe::init_repo(RECIPE_PATH).unwrap();
recipe::add_dir(&repo, "./examples/recipes/", "master", false).unwrap();
}
#[test]
fn test_v0_test() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
let mut req = MockRequest::new(Method::Get, "/test");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("API v0 test".to_string()));
}
#[test]
fn test_v0_isos() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_isos()
let mut req = MockRequest::new(Method::Get, "/isos");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("Unimplemented".to_string()));
}
#[test]
fn test_v0_compose_types() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_compose_types()
let expected = include_str!("results/v0/compose-types.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/compose/types");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected.to_string()));
}
#[test]
fn test_v0_projects_list() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_projects_list()
let expected_default = include_str!("results/v0/projects-list.json").trim_right();
let expected_filter = include_str!("results/v0/projects-list-filter.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/projects/list");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_default.to_string()));
let mut req = MockRequest::new(Method::Get, "/projects/list?offset=2&limit=2");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_filter.to_string()));
}
// NOTE the minimal database doesn't depsolve, so this checks for an empty response
#[test]
fn test_projects_depsolve() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_projects_depsolve()
let expected = include_str!("results/v0/projects-depsolve.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/projects/depsolve/bash");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected.to_string()));
}
#[test]
fn test_v0_projects_info() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_projects_info()
let expected_default = include_str!("results/v0/projects-info.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/projects/info/bash");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_default.to_string()));
}
#[test]
fn test_v0_modules_info() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_modules_info()
let expected_default = include_str!("results/v0/modules-info.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/modules/info/basesystem");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_default.to_string()));
}
#[test]
fn test_v0_modules_list_noargs() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_modules_list_noargs()
let expected_default = include_str!("results/v0/modules-list.json").trim_right();
let expected_filter = include_str!("results/v0/modules-list-filter.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/modules/list");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_default.to_string()));
let mut req = MockRequest::new(Method::Get, "/modules/list?offset=2&limit=2");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_filter.to_string()));
}
#[test]
fn test_v0_recipes_info() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_recipes_info()
let expected_default = include_str!("results/v0/recipes-info.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/recipes/info/jboss,kubernetes");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_default.to_string()));
}
#[test]
fn test_v0_recipes_changes() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_recipes_changes()
let mut req = MockRequest::new(Method::Get, "/recipes/changes/atlas,kubernetes");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["recipes"][0]["name"], "atlas".to_string());
assert_eq!(j["recipes"][0]["changes"][0]["message"], "Recipe atlas, version 0.0.1 saved".to_string());
assert_eq!(j["recipes"][0]["total"], Value::from(1));
assert_eq!(j["recipes"][1]["name"], "kubernetes".to_string());
assert_eq!(j["recipes"][1]["changes"][0]["message"], "Recipe kubernetes, version 0.0.1 saved".to_string());
assert_eq!(j["recipes"][1]["total"], Value::from(1));
let mut req = MockRequest::new(Method::Get, "/recipes/changes/atlas?limit=1");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["recipes"][0]["name"], "atlas".to_string());
assert_eq!(j["recipes"][0]["changes"][0]["message"], "Recipe atlas, version 0.0.1 saved".to_string());
let mut req = MockRequest::new(Method::Get, "/recipes/changes/atlas?offset=1&limit=1");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["recipes"][0]["name"], "atlas".to_string());
assert_eq!(j["recipes"][0]["changes"], Value::Array(vec![]));
assert_eq!(j["recipes"][0]["total"], Value::from(1));
}
// NOTE the minimal database doesn't depsolve, so this checks for an empty response
#[test]
fn test_recipes_depsolve() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_recipes_depsolve()
let expected = include_str!("results/v0/recipes-depsolve.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/recipes/depsolve/kubernetes");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected.to_string()));
}
// NOTE the minimal database doesn't depsolve, so this checks for an empty response
#[test]
fn test_recipes_freeze() {
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_recipes_freeze()
let expected = include_str!("results/v0/recipes-freeze.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/recipes/freeze/http-server");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected.to_string()));
}
#[test]
fn test_v0_recipes() {
// NOTE All the recipe tests need to be in the same thread, otherwise they will
// interfere with each other
assert_eq!(FRAMEWORK.initialized, true);
let rocket = &FRAMEWORK.rocket;
// v0_recipes_list()
// TODO Copy ./examples/recipes/ to a temporary directory
let expected_default = include_str!("results/v0/recipes-list.json").trim_right();
let expected_filter = include_str!("results/v0/recipes-list-filter.json").trim_right();
let mut req = MockRequest::new(Method::Get, "/recipes/list/");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_default.to_string()));
let mut req = MockRequest::new(Method::Get, "/recipes/list?limit=2");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_filter.to_string()));
// v0_recipes_new()
let recipe_json = include_str!("results/v0/recipes-new.json").trim_right();
let mut req = MockRequest::new(Method::Post, "/recipes/new")
.header(ContentType::JSON)
.body(recipe_json);
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
// v0_recipes_delete
// Delete the test recipe created above
let mut req = MockRequest::new(Method::Delete, "/recipes/delete/recipe-test");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
// v0_recipes_new_toml()
// Update the example http-server recipe with some changes.
let recipe_toml = include_str!("results/v0/http-server.toml").trim_right();
let mut req = MockRequest::new(Method::Post, "/recipes/new")
.header(ContentType::new("text", "x-toml"))
.body(recipe_toml);
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
// v0_recipes_diff()
// Need the commit id from the change to http-server for the next test
let mut req = MockRequest::new(Method::Get, "/recipes/changes/http-server");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["recipes"][0]["name"], "http-server".to_string());
assert_eq!(j["recipes"][0]["changes"][1]["message"], "Recipe http-server, version 0.0.1 saved".to_string());
// Convert serde::Value to a &str
let commit_id = match j["recipes"][0]["changes"][1]["commit"].as_str() {
Some(str) => str,
None => ""
};
// Check the diff
let expected_diff = include_str!("results/v0/recipes-diff.json").trim_right();
let mut req = MockRequest::new(Method::Get, format!("/recipes/diff/http-server/{}/NEWEST", commit_id));
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_diff.to_string()));
// First write some changes to the recipe
let recipe_json = include_str!("results/v0/recipes-new-v2.json").trim_right();
// v0_recipes_workspace
// Write the new recipe to the workspace first, confirm recipes/info returns changed:true
let mut req = MockRequest::new(Method::Post, "/recipes/workspace")
.header(ContentType::JSON)
.body(recipe_json);
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
let mut req = MockRequest::new(Method::Get, "/recipes/info/recipe-test");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["changes"][0]["name"], "recipe-test".to_string());
assert_eq!(j["changes"][0]["changed"], Value::Bool(true));
// Test workspace changes booleans
let mut req = MockRequest::new(Method::Post, "/recipes/new")
.header(ContentType::JSON)
.body(recipe_json);
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
// Confirm that info now shows changed:false
let mut req = MockRequest::new(Method::Get, "/recipes/info/recipe-test");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["changes"][0]["name"], "recipe-test".to_string());
assert_eq!(j["changes"][0]["changed"], Value::Bool(false));
// Tag the latest recipe commit (it will be revision 1)
let mut req = MockRequest::new(Method::Post, "/recipes/tag/recipe-test");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
// Write some new changes to the workspace
let recipe_json = include_str!("results/v0/recipes-new-v3.json").trim_right();
let mut req = MockRequest::new(Method::Post, "/recipes/workspace")
.header(ContentType::JSON)
.body(recipe_json);
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
// Check the diff between the NEWEST and WORKSPACE
let expected_diff = include_str!("results/v0/recipes-diff-workspace.json").trim_right();
<|fim▁hole|> let mut req = MockRequest::new(Method::Get, "/recipes/diff/recipe-test/NEWEST/WORKSPACE");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some(expected_diff.to_string()));
// Get the original commit
let mut req = MockRequest::new(Method::Get, "/recipes/changes/recipe-test");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["recipes"][0]["name"], "recipe-test".to_string());
assert_eq!(j["recipes"][0]["changes"][0]["revision"], 1);
// Convert serde::Value to a &str
let commit_id = match j["recipes"][0]["changes"][1]["commit"].as_str() {
Some(str) => str,
None => ""
};
// Undo the change
let mut req = MockRequest::new(Method::Post, format!("/recipes/undo/recipe-test/{}", commit_id));
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string());
assert_eq!(body_str, Some("{\"status\":true}".to_string()));
// Confirm that info has reverted to the previous commit and has no revision
let mut req = MockRequest::new(Method::Get, "/recipes/info/recipe-test");
let mut response = req.dispatch_with(rocket);
assert_eq!(response.status(), Status::Ok);
let body_str = response.body().and_then(|b| b.into_string()).unwrap_or_default();
let j: Value = serde_json::from_str(&body_str).unwrap();
assert_eq!(j["recipes"][0]["name"], "recipe-test".to_string());
assert_eq!(j["recipes"][0]["version"], "0.3.12".to_string());
assert_eq!(j["recipes"][0]["changes"][0]["revision"], Value::Null);
}<|fim▁end|>
| |
<|file_name|>test_watcher.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
""" watcher test cases """
<|fim▁hole|>from zk_shell.watcher import ChildWatcher
class WatcherTestCase(ShellTestCase):
""" test watcher """
def test_add_update(self):
watcher = ChildWatcher(self.client, print_func=self.shell.show_output)
path = "%s/watch" % self.tests_path
self.shell.onecmd("create %s ''" % path)
watcher.add(path, True)
# update() calls remove() as well, if the path exists.
watcher.update(path)
expected = "\n/tests/watch:\n\n"
self.assertEquals(expected, self.output.getvalue())<|fim▁end|>
|
from .shell_test_case import ShellTestCase
|
<|file_name|>MailAlarm.java<|end_file_name|><|fim▁begin|>package com.taobao.zeus.broadcast.alarm;
import java.net.InetAddress;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.NoSuchProviderException;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import com.taobao.zeus.model.LogDescriptor;
import com.taobao.zeus.store.UserManager;
import com.taobao.zeus.store.mysql.MysqlLogManager;
import com.taobao.zeus.store.mysql.persistence.ZeusUser;
import com.taobao.zeus.util.Environment;
public class MailAlarm extends AbstractZeusAlarm {
private static Logger log = LoggerFactory.getLogger(MailAlarm.class);
@Autowired
private UserManager userManager;
@Autowired
private MysqlLogManager zeusLogManager;
private static String host = Environment.getHost();// 邮件服务器
private static String port = Environment.getPort();// 端口
private static String from = Environment.getSendFrom();// 发送者
private static String user = Environment.getUsername();// 用户名
private static String password = Environment.getPassword();// 密码
@Override
public void alarm(String jobId, List<String> users, String title, String content)
throws Exception {
List<ZeusUser> userList = userManager.findListByUidByOrder(users);
List<String> emails = new ArrayList<String>();
if (userList != null && userList.size() > 0) {
for (ZeusUser user : userList) {
String userEmail = user.getEmail();
if (userEmail != null && !userEmail.isEmpty()
&& userEmail.contains("@")) {
if (userEmail.contains(";")) {
String[] userEmails = userEmail.split(";");
for (String ems : userEmails) {
if (ems.contains("@")) {
emails.add(ems);
}
}
} else {
emails.add(userEmail);
}
}
}
if (emails.size() > 0) {
content = content.replace("<br/>", "\r\n");
sendEmail(jobId, emails, title, content);
/*try{
LogDescriptor logDescriptor = new LogDescriptor();
logDescriptor.setLogType("email");
logDescriptor.setIp(InetAddress.getLocalHost().getHostAddress());
logDescriptor.setUserName("zeus");
logDescriptor.setUrl(jobId);
logDescriptor.setRpc(emails.toString());
logDescriptor.setDelegate(title);
logDescriptor.setMethod("");
// logDescriptor.setDescription((content.length()>4000 ? content.substring(4000) : content));
logDescriptor.setDescription("");
zeusLogManager.addLog(logDescriptor);
}catch(Exception ex){
log.error(ex.toString());
}*/
}
}
}
public void sendEmail(String jobId, List<String> emails, String subject,
String body) {
try {<|fim▁hole|> props.put("mail.smtp.port", port);
props.put("mail.smtp.auth", "true");
Transport transport = null;
Session session = Session.getDefaultInstance(props, null);
transport = session.getTransport("smtp");
transport.connect(host, user, password);
MimeMessage msg = new MimeMessage(session);
msg.setSentDate(new Date());
InternetAddress fromAddress = new InternetAddress(from);
msg.setFrom(fromAddress);
InternetAddress[] toAddress = new InternetAddress[emails.size()];
for (int i = 0; i < emails.size(); i++) {
toAddress[i] = new InternetAddress(emails.get(i));
}
msg.setRecipients(Message.RecipientType.TO, toAddress);
msg.setSubject(subject, "UTF-8");
msg.setText(body, "UTF-8");
msg.saveChanges();
transport.sendMessage(msg, msg.getAllRecipients());
log.info("jobId: " + jobId + " send email: " + emails + "; from: " + from + " subject: "
+ subject + ", send success!");
} catch (NoSuchProviderException e) {
log.error("jobId: " + jobId + " fail to send the mail. ", e);
} catch (MessagingException e) {
log.error("jobId: " + jobId + " fail to send the mail. ", e);
} catch (Exception e) {
log.error("jobId: " + jobId + " fail to send the mail. ", e);
}
}
}<|fim▁end|>
|
log.info( "jobId: " + jobId +" begin to send the email!");
Properties props = new Properties();
props.put("mail.smtp.host", host);
|
<|file_name|>index.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react';
import PropTypes from 'prop-types';
import RcSelect, { Option, OptGroup } from 'rc-select';
import classNames from 'classnames';
import LocaleReceiver from '../locale-provider/LocaleReceiver';
import defaultLocale from '../locale-provider/default';
export interface AbstractSelectProps {
prefixCls?: string;
className?: string;
size?: 'default' | 'large' | 'small';
notFoundContent?: React.ReactNode | null;
transitionName?: string;
choiceTransitionName?: string;
showSearch?: boolean;
allowClear?: boolean;
disabled?: boolean;
style?: React.CSSProperties;
placeholder?: string;
dropdownClassName?: string;
dropdownStyle?: React.CSSProperties;
dropdownMenuStyle?: React.CSSProperties;
onSearch?: (value: string) => any;
filterOption?: boolean | ((inputValue: string, option: React.ReactElement<OptionProps>) => any);
}
export interface LabeledValue {
key: string;
label: React.ReactNode;
}
export type SelectValue = string | any[] | LabeledValue | LabeledValue[];
export interface SelectProps extends AbstractSelectProps {
value?: SelectValue;
defaultValue?: SelectValue;
mode?: 'default' | 'multiple' | 'tags' | 'combobox';
optionLabelProp?: string;
onChange?: (value: SelectValue) => void;
onSelect?: (value: SelectValue, option: Object) => any;
onDeselect?: (value: SelectValue) => any;
onBlur?: () => any;
onFocus?: () => any;
dropdownMatchSelectWidth?: boolean;
optionFilterProp?: string;
defaultActiveFirstOption?: boolean;
labelInValue?: boolean;
getPopupContainer?: (triggerNode: Element) => HTMLElement;
tokenSeparators?: string[];
getInputElement?: () => React.ReactElement<any>;
autoFocus?: boolean;
}
export interface OptionProps {
disabled?: boolean;
value?: any;
title?: string;
children?: React.ReactNode;
}
export interface OptGroupProps {
label?: React.ReactNode;
}
export interface SelectLocale {
notFoundContent?: string;
}
const SelectPropTypes = {
prefixCls: PropTypes.string,
className: PropTypes.string,
size: PropTypes.oneOf(['default', 'large', 'small']),
combobox: PropTypes.bool,
notFoundContent: PropTypes.any,
showSearch: PropTypes.bool,
optionLabelProp: PropTypes.string,
transitionName: PropTypes.string,
choiceTransitionName: PropTypes.string,
};
// => It is needless to export the declaration of below two inner components.
// export { Option, OptGroup };
export default class Select extends React.Component<SelectProps, {}> {
static Option = Option as React.ClassicComponentClass<OptionProps>;
static OptGroup = OptGroup as React.ClassicComponentClass<OptGroupProps>;
static defaultProps = {
prefixCls: 'ant-select',
showSearch: false,
transitionName: 'slide-up',
choiceTransitionName: 'zoom',
};
static propTypes = SelectPropTypes;
private rcSelect: any;
focus() {
this.rcSelect.focus();
}
blur() {
this.rcSelect.blur();
}
saveSelect = (node: any) => {
this.rcSelect = node;
}
renderSelect = (locale: SelectLocale) => {
const {
prefixCls,
className = '',
size,
mode,
...restProps,
} = this.props;
const cls = classNames({
[`${prefixCls}-lg`]: size === 'large',
[`${prefixCls}-sm`]: size === 'small',
}, className);
let { notFoundContent, optionLabelProp } = this.props;
const isCombobox = mode === 'combobox';
if (isCombobox) {
// children 带 dom 结构时,无法填入输入框
optionLabelProp = optionLabelProp || 'value';
}
const modeConfig = {
multiple: mode === 'multiple',
tags: mode === 'tags',
combobox: isCombobox,
};
const notFoundContentLocale = isCombobox ?
null : notFoundContent || locale.notFoundContent;
return (
<RcSelect
{...restProps}
{...modeConfig}
prefixCls={prefixCls}
className={cls}
optionLabelProp={optionLabelProp || 'children'}
notFoundContent={notFoundContentLocale}
ref={this.saveSelect}
/><|fim▁hole|> render() {
return (
<LocaleReceiver
componentName="Select"
defaultLocale={defaultLocale.Select}
>
{this.renderSelect}
</LocaleReceiver>
);
}
}<|fim▁end|>
|
);
}
|
<|file_name|>notparallel_chain_fit.py<|end_file_name|><|fim▁begin|># Copied from LMC documentation
# Modified to use MPI (but not enable parallelization), to increase the parameter degeneracy, and to disperse the start points
# Here is a simple example. As shown it will run in non-parallel mode; comments indicate what to do for parallelization.
from lmc import *
## for MPI
from mpi4py import MPI
mpi_rank = MPI.COMM_WORLD.Get_rank()
from numpy.random import rand
### Define some parameters.
startx = [-10.0, -10.0, 10.0, 10.0]
starty = [-10.0, 10.0, -10.0, 10.0]
x = Parameter(name='x', value=startx[mpi_rank], width=0.1)
y = Parameter(name='y', value=starty[mpi_rank], width=0.1)
### This is the object that will be passed to the likelihood function.
### In this simple case, it just holds the parameter objects, but in general it could be anything.
### E.g., usually it would also contain or point to the data being used to constrain the model. A good idea is to write the state of any updaters to a file after each adaptation (using the on_adapt functionality), in which case keeping pointers to the updaters here is convenient. Also commonly useful: a DerivedParameter which holds the value of the posterior log-density for each sample.
class Thing:
def __init__(self, x, y):
self.x = x
self.y = y
thing = Thing(x, y)
### The log-posterior function. Here we just assume a bivariate Gaussian posterior with marginal standard deviations s(x)=2 and s(y)=3, correlation coefficient 0.75, and means <x>=-1, <y>=1.
def post(thing):
r = 0.99
sx = 2.0
sy = 3.0
mx = -1.0
my = 1.0
return -0.5/(1.0-r**2)*( (thing.x()-mx)**2/sx**2 + (thing.y()-my)**2/sy**2 - 2.0*r*(thing.x()-mx)/sx*(thing.y()-my)/sy )
### Create a parameter space consisting of x and y, and associate the log-posterior function with it.
space = ParameterSpace([thing.x, thing.y], post)
### If we'd bothered to define a DerivedParameter in Thing which would hold the posterior density, we might want to define a larger ParameterSpace and pass it to the Engine later on to be saved in the Backends (instead of space).
#trace = ParameterSpace([thing.x, thing.y, thing.logP])
### Use slice sampling for robustness. Adapt the proposal distribution every 100 iterations starting with the 100th.
step = Metropolis()
parallel = None
## for MPI parallelization
#parallel = MPI.COMM_WORLD
## for parallelization via the filesystem, this would have to be set to a different value for each concurrently running instance
#parallel = 1
updater = MultiDimSequentialUpdater(space, step, 100, 100, parallel=parallel)
### Create an Engine and tell it to drive this Updater and to store the values of the free parameters.
engine = Engine([updater], space)
### Store the chain in a text file.
#chainfile = open("chain.txt", 'w')
## For filesystem parallelization, each instance should write to a different file.
## For MPI, the same is true, e.g.
chainfile = open("notparallel" + str(MPI.COMM_WORLD.Get_rank()) + ".txt", 'w')
backends = [ textBackend(chainfile) ]<|fim▁hole|>### Run the chain for 10000 iterations
engine(10000, thing, backends)
### Close the text file to clean up.
chainfile.close()
## If this was a parallel run, print the convergence criterion for each parameter.
# print updater.R<|fim▁end|>
|
### Print the chain to the terminal as well
#backends.append( stdoutBackend() )
|
<|file_name|>lint-non-uppercase-associated-const.rs<|end_file_name|><|fim▁begin|>#![deny(non_upper_case_globals)]
#![allow(dead_code)]
<|fim▁hole|>impl Foo {
const not_upper: bool = true;
}
//~^^ ERROR associated constant `not_upper` should have an upper case name
fn main() {}<|fim▁end|>
|
struct Foo;
|
<|file_name|>CardImg.d.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>import { CSSModule } from './utils';
export interface CardImgProps
extends React.ImgHTMLAttributes<HTMLImageElement> {
[key: string]: any;
tag?: React.ElementType;
top?: boolean;
bottom?: boolean;
cssModule?: CSSModule;
}
declare class CardImg extends React.Component<CardImgProps> {}
export default CardImg;<|fim▁end|>
|
import * as React from 'react';
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015-2020 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.<|fim▁hole|>// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
pub mod field_eq;
pub mod field_exists;
pub mod field_grep;
pub mod field_gt;
pub mod field_isempty;
pub mod field_istype;
pub mod field_lt;
pub mod field_path;
pub mod field_predicate;
pub mod version;<|fim▁end|>
|
//
// This library is distributed in the hope that it will be useful,
|
<|file_name|>endpoint_handler.py<|end_file_name|><|fim▁begin|>"""
HTTP handeler to serve specific endpoint request like
http://myserver:9004/endpoints/mymodel
For how generic endpoints requests is served look
at endpoints_handler.py
"""
import json
import logging
import shutil
from tabpy.tabpy_server.common.util import format_exception
from tabpy.tabpy_server.handlers import ManagementHandler
from tabpy.tabpy_server.handlers.base_handler import STAGING_THREAD
from tabpy.tabpy_server.management.state import get_query_object_path
from tabpy.tabpy_server.psws.callbacks import on_state_change
from tabpy.tabpy_server.handlers.util import AuthErrorStates
from tornado import gen
class EndpointHandler(ManagementHandler):
def initialize(self, app):
super(EndpointHandler, self).initialize(app)
def get(self, endpoint_name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing GET for /endpoints/{endpoint_name}")
self._add_CORS_header()
if not endpoint_name:
self.write(json.dumps(self.tabpy_state.get_endpoints()))
else:
if endpoint_name in self.tabpy_state.get_endpoints():
self.write(json.dumps(self.tabpy_state.get_endpoints()[endpoint_name]))
else:
self.error_out(
404,
"Unknown endpoint",
info=f"Endpoint {endpoint_name} is not found",
)
@gen.coroutine
def put(self, name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing PUT for /endpoints/{name}")
try:
if not self.request.body:
self.error_out(400, "Input body cannot be empty")
self.finish()
return
try:
request_data = json.loads(self.request.body.decode("utf-8"))
except BaseException as ex:
self.error_out(
400, log_message="Failed to decode input body", info=str(ex)
)
self.finish()
return
# check if endpoint exists
endpoints = self.tabpy_state.get_endpoints(name)
if len(endpoints) == 0:
self.error_out(404, f"endpoint {name} does not exist.")
self.finish()
return
new_version = int(endpoints[name]["version"]) + 1
self.logger.log(logging.INFO, f"Endpoint info: {request_data}")
err_msg = yield self._add_or_update_endpoint(
"update", name, new_version, request_data
)
if err_msg:
self.error_out(400, err_msg)
self.finish()
else:
self.write(self.tabpy_state.get_endpoints(name))
self.finish()
except Exception as e:
err_msg = format_exception(e, "update_endpoint")
self.error_out(500, err_msg)
self.finish()
@gen.coroutine
def delete(self, name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing DELETE for /endpoints/{name}")
try:
endpoints = self.tabpy_state.get_endpoints(name)
if len(endpoints) == 0:
self.error_out(404, f"endpoint {name} does not exist.")
self.finish()
return
# update state
try:
endpoint_info = self.tabpy_state.delete_endpoint(name)
except Exception as e:
self.error_out(400, f"Error when removing endpoint: {e.message}")
self.finish()
return
<|fim▁hole|> self.settings["state_file_path"], name, None
)
try:
yield self._delete_po_future(delete_path)
except Exception as e:
self.error_out(400, f"Error while deleting: {e}")
self.finish()
return
self.set_status(204)
self.finish()
except Exception as e:
err_msg = format_exception(e, "delete endpoint")
self.error_out(500, err_msg)
self.finish()
on_state_change(
self.settings, self.tabpy_state, self.python_service, self.logger
)
@gen.coroutine
def _delete_po_future(self, delete_path):
future = STAGING_THREAD.submit(shutil.rmtree, delete_path)
ret = yield future
raise gen.Return(ret)<|fim▁end|>
|
# delete files
if endpoint_info["type"] != "alias":
delete_path = get_query_object_path(
|
<|file_name|>app.ts<|end_file_name|><|fim▁begin|>import {Component} from 'angular2/core';
import {RouteConfig, ROUTER_DIRECTIVES} from 'angular2/router';
import {ManagmentFormComponent} from '/app/component/managment/form';<|fim▁hole|>
@Component({
selector: 'my-app',
template: `
<!-- Static navbar -->
<nav class="navbar navbar-default navbar-static-top">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">Bookkeeping</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav">
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">Forms <span class="caret"></span></a>
<ul class="dropdown-menu">
<li><a href="/questionnaire/proposal_benefit">todo</a></li>
</ul>
</li>
</ul>
<ul class="nav navbar-nav">
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">Managment <span class="caret"></span></a>
<ul class="dropdown-menu">
<li><a [routerLink]="['ManagmentForm']">Forms</a></li>
</ul>
</li>
</ul>
</div><!--/.nav-collapse -->
</div>
</nav>
<div class="container">
<router-outlet></router-outlet>
</div> <!-- /container -->
`,
// providers: [DialogService, HeroService],
directives: [ROUTER_DIRECTIVES]
})
@RouteConfig([
{
path: '/managment/form',
name: 'ManagmentForm',
component: ManagmentFormComponent,
}
,{
path: '/managment/form/add',
name: 'ManagmentFormAdd',
component: ManagmentFormAddComponent,
}
])
export class AppComponent { }
/*
Copyright 2016 Google Inc. All Rights Reserved.
Use of this source code is governed by an MIT-style license that
can be found in the LICENSE file at http://angular.io/license
*/<|fim▁end|>
|
import {ManagmentFormAddComponent} from '/app/component/managment/form.add';
|
<|file_name|>wallet.py<|end_file_name|><|fim▁begin|># Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - Imported_Wallet: imported address, no keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import os
import threading
import random
import time
import json
import copy
import errno
import traceback
from functools import partial
from collections import defaultdict
from numbers import Number
from decimal import Decimal
import sys
from .i18n import _
from .util import (NotEnoughFunds, PrintError, UserCancelled, profiler,
format_satoshis, NoDynamicFeeEstimates)
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore
from .storage import multisig_type, STO_EV_PLAINTEXT, STO_EV_USER_PW, STO_EV_XPUB_PW
from . import transaction
from .transaction import Transaction
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV
from . import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .paymentrequest import InvoiceStore
from .contacts import Contacts
TX_STATUS = [
_('Replaceable'),
_('Unconfirmed parent'),
_('Unconfirmed'),
_('Not Verified'),
_('Local only'),
]
TX_HEIGHT_LOCAL = -2
TX_HEIGHT_UNCONF_PARENT = -1
TX_HEIGHT_UNCONFIRMED = 0
def relayfee(network):
from .simple_config import FEERATE_DEFAULT_RELAY
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else FEERATE_DEFAULT_RELAY
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change <= dust threshold is added to the tx fee
return 182 * 3 * relayfee(network) / 1000
def append_utxos_to_inputs(inputs, network, pubkey, txin_type, imax):
if txin_type != 'p2pk':
address = bitcoin.pubkey_to_address(txin_type, pubkey)
sh = bitcoin.address_to_scripthash(address)
else:
script = bitcoin.public_key_to_p2pk_script(pubkey)
sh = bitcoin.script_to_scripthash(script)
address = '(pubkey)'
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
break
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def sweep_preparations(privkeys, network, imax=100):
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, network, pubkey, txin_type, imax)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
if not inputs:
raise BaseException(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise BaseException(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise BaseException(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.BIP_LI01_sort()
tx.set_rbf(True)
tx.sign(keypairs)
return tx
class AddTransactionException(Exception):
pass
class UnrelatedTransactionException(AddTransactionException):
def __str__(self):
return _("Transaction is unrelated to this wallet.")
class NotIsMineTransactionException(AddTransactionException):
def __str__(self):
return _("Only transactions with inputs owned by the wallet can be added.")
class Abstract_Wallet(PrintError):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = ELECTRUM_VERSION
self.storage = storage
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.gap_limit_for_change = 6 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
self.fiat_value = storage.get('fiat_value', {})
self.load_keystore()
self.load_addresses()
self.load_transactions()
self.build_spent_outpoints()
# load requests
self.receive_requests = self.storage.get('payment_requests', {})
# Transactions pending verification. A map from tx hash to transaction
# height. Access is not contended so no lock is needed.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.lock = threading.RLock()
self.transaction_lock = threading.RLock()
self.check_history()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None \
and (tx_hash not in self.pruned_txo.values()):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
self.storage.put('addr_history', self.history)
if write:
self.storage.write()
def clear_history(self):
with self.lock:
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.spent_outpoints = {}
self.history = {}
self.verified_tx = {}
self.transactions = {}
self.save_transactions()
@profiler
def build_spent_outpoints(self):
self.spent_outpoints = {}
for txid, items in self.txi.items():
for addr, l in items.items():
for ser, v in l:
self.spent_outpoints[ser] = txid
@profiler
def check_history(self):
save = False
hist_addrs_mine = list(filter(lambda k: self.is_mine(k), self.history.keys()))
hist_addrs_not_mine = list(filter(lambda k: not self.is_mine(k), self.history.keys()))
for addr in hist_addrs_not_mine:
self.history.pop(addr)
save = True
for addr in hist_addrs_mine:
hist = self.history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
self.storage.put('addresses', {'receiving':self.receiving_addresses, 'change':self.change_addresses})
def load_addresses(self):
d = self.storage.get('addresses', {})
if type(d) != dict: d={}
self.receiving_addresses = d.get('receiving', [])
self.change_addresses = d.get('change', [])
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions(write=True)
def is_up_to_date(self):
with self.lock: return self.up_to_date
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def set_fiat_value(self, txid, ccy, text):
if txid not in self.transactions:
return
if not text:
d = self.fiat_value.get(ccy, {})
if d and txid in d:
d.pop(txid)
else:
return
else:
try:
Decimal(text)
except:
return
if ccy not in self.fiat_value:
self.fiat_value[ccy] = {}
self.fiat_value[ccy][txid] = text
self.storage.put('fiat_value', self.fiat_value)
def get_fiat_value(self, txid, ccy):
fiat_value = self.fiat_value.get(ccy, {}).get(txid)
try:
return Decimal(fiat_value)
except:
return
def is_mine(self, address):
return address in self.get_addresses()
def is_change(self, address):
if not self.is_mine(address):
return False
return self.get_address_index(address)[0]
def get_address_index(self, address):
raise NotImplementedError()
def get_redeem_script(self, address):
return None
def export_private_key(self, address, password):
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
txin_type = self.get_txin_type(address)
redeem_script = self.get_redeem_script(address)
serialized_privkey = bitcoin.serialize_privkey(pk, compressed, txin_type)
return serialized_privkey, redeem_script
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_unverified_tx(self, tx_hash, tx_height):
if tx_height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT) \
and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
# Remove from the unverified map and add to the verified map and
self.unverified_tx.pop(tx_hash, None)
with self.lock:
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.network.trigger_callback('verified', tx_hash, height, conf, timestamp)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
return self.unverified_tx
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" Given a transaction, returns (height, conf, timestamp) """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, None
else:
# local transaction
return TX_HEIGHT_LOCAL, 0, None
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo.values():
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_tx_value(self, txid):
" effect of tx on the entire domain"
delta = 0
for addr, d in self.txi.get(txid, {}).items():
for n, v in d:
delta -= v
for addr, d in self.txo.get(txid, {}).items():
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
addresses = self.get_addresses()
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs():
addr = item.get('address')
if addr in addresses:
is_mine = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_info(self, tx):
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
exp_n = None
can_broadcast = False
can_bump = False
label = ''
height = conf = timestamp = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions.keys():
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = _("{} confirmations").format(conf)
else:
status = _('Not verified')
elif height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED):
status = _('Unconfirmed')
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network.config.has_fee_etas():
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.fee_to_eta(fee_per_kb)
can_bump = is_mine and not tx.is_final()
else:
status = _('Local')
can_broadcast = self.network is not None
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return tx_hash, status, label, can_broadcast, can_bump, amount, fee, height, conf, timestamp, exp_n
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
# return the balance of a bitcoin address: confirmed and matured, unconfirmed, unmatured
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain, config):
confirmed_only = config.get('confirmed_only', False)
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False):
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
for x in utxos.values():
if confirmed_only and x['height'] <= 0:
continue
if mature and x['coinbase'] and x['height'] + COINBASE_MATURITY > self.get_local_height():
continue
coins.append(x)
continue
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
out = []
out += self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.get_addresses()
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, addr):
h = []
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
for tx_hash in self.transactions:
if addr in self.txi.get(tx_hash, []) or addr in self.txo.get(tx_hash, []):
tx_height = self.get_tx_height(tx_hash)[0]
h.append((tx_hash, tx_height))
return h
def get_txin_address(self, txi):
addr = txi.get('address')
if addr != "(pubkey)":
return addr
prevout_hash = txi.get('prevout_hash')
prevout_n = txi.get('prevout_n')
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
self.print_error("found pay-to-pubkey address:", addr)
return addr
def get_txout_address(self, txo):
_type, x, v = txo
if _type == TYPE_ADDRESS:
addr = x
elif _type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(x))
else:
addr = None
return addr
def get_conflicting_transactions(self, tx):
"""Returns a set of transaction hashes from the wallet history that are
directly conflicting with tx, i.e. they have common outpoints being
spent with tx. If the tx is already in wallet history, that will not be
reported as a conflict.
"""
conflicting_txns = set()
with self.transaction_lock:
for txi in tx.inputs():
ser = Transaction.get_outpoint_from_txin(txi)
if ser is None:
continue
spending_tx_hash = self.spent_outpoints.get(ser, None)
if spending_tx_hash is None:
continue
# this outpoint (ser) has already been spent, by spending_tx
assert spending_tx_hash in self.transactions
conflicting_txns |= {spending_tx_hash}
txid = tx.txid()
if txid in conflicting_txns:
# this tx is already in history, so it conflicts with itself
if len(conflicting_txns) > 1:
raise Exception('Found conflicting transactions already in wallet history.')
conflicting_txns -= {txid}
return conflicting_txns
def add_transaction(self, tx_hash, tx):
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
# NOTE: returning if tx in self.transactions might seem like a good idea
# BUT we track is_mine inputs in a txn, and during subsequent calls
# of add_transaction tx, we might learn of more-and-more inputs of
# being is_mine, as we roll the gap_limit forward
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
tx_height = self.get_tx_height(tx_hash)[0]
is_mine = any([self.is_mine(txin['address']) for txin in tx.inputs()])
# do not save if tx is local and not mine
if tx_height == TX_HEIGHT_LOCAL and not is_mine:
# FIXME the test here should be for "not all is_mine"; cannot detect conflict in some cases
raise NotIsMineTransactionException()
# raise exception if unrelated to wallet
is_for_me = any([self.is_mine(self.get_txout_address(txo)) for txo in tx.outputs()])
if not is_mine and not is_for_me:
raise UnrelatedTransactionException()
# Find all conflicting transactions.
# In case of a conflict,
# 1. confirmed > mempool > local
# 2. this new txn has priority over existing ones
# When this method exits, there must NOT be any conflict, so
# either keep this txn and remove all conflicting (along with dependencies)
# or drop this txn
conflicting_txns = self.get_conflicting_transactions(tx)
if conflicting_txns:
existing_mempool_txn = any(
self.get_tx_height(tx_hash2)[0] in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT)
for tx_hash2 in conflicting_txns)
existing_confirmed_txn = any(
self.get_tx_height(tx_hash2)[0] > 0
for tx_hash2 in conflicting_txns)
if existing_confirmed_txn and tx_height <= 0:
# this is a non-confirmed tx that conflicts with confirmed txns; drop.
return False
if existing_mempool_txn and tx_height == TX_HEIGHT_LOCAL:
# this is a local tx that conflicts with non-local txns; drop.
return False
# keep this txn and remove all conflicting
to_remove = set()
to_remove |= conflicting_txns
for conflicting_tx_hash in conflicting_txns:
to_remove |= self.get_depending_transactions(conflicting_tx_hash)
for tx_hash2 in to_remove:
self.remove_transaction(tx_hash2)
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
addr = self.get_txin_address(txi)
if txi['type'] != 'coinbase':
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
# find value from prev output
if addr and self.is_mine(addr):
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
if d.get(addr) is None:
d[addr] = []
d[addr].append((ser, v))
# we only track is_mine spends
self.spent_outpoints[ser] = tx_hash
break
else:
self.pruned_txo[ser] = tx_hash
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs()):
v = txo[2]
ser = tx_hash + ':%d'%n
addr = self.get_txout_address(txo)
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.pruned_txo.get(ser)
if next_tx is not None:
self.pruned_txo.pop(ser)
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = []
dd[addr].append((ser, v))
# save
self.transactions[tx_hash] = tx
return True
def remove_transaction(self, tx_hash):
def undo_spend(outpoint_to_txid_map):
for addr, l in self.txi[tx_hash].items():
for ser, v in l:
outpoint_to_txid_map.pop(ser, None)
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
self.transactions.pop(tx_hash, None)
undo_spend(self.pruned_txo)
undo_spend(self.spent_outpoints)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
l.remove(item)
self.pruned_txo[ser] = next_tx
if l == []:
dd.pop(addr)
else:
dd[addr] = l
try:
self.txi.pop(tx_hash)
self.txo.pop(tx_hash)
except KeyError:
self.print_error("tx was not in history", tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_unverified_tx(tx_hash, tx_height)
self.add_transaction(tx_hash, tx)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# make tx local
self.unverified_tx.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.verifier.merkle_roots.pop(tx_hash, None)
# but remove completely if not is_mine
if self.txi[tx_hash] == {}:
# FIXME the test here should be for "not all is_mine"; cannot detect conflict in some cases
self.remove_transaction(tx_hash)
self.history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
def get_history(self, domain=None):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def balance_at_timestamp(self, domain, target_timestamp):
h = self.get_history(domain)
for tx_hash, height, conf, timestamp, value, balance in h:
if timestamp > target_timestamp:
return balance - value
# return last balance
return balance
@profiler
def get_full_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None, show_addresses=False):
from .util import timestamp_to_datetime, Satoshis, Fiat
out = []
capital_gains = 0
fiat_income = 0
h = self.get_history(domain)
for tx_hash, height, conf, timestamp, value, balance in h:
if from_timestamp and timestamp < from_timestamp:
continue
if to_timestamp and timestamp >= to_timestamp:
continue
item = {
'txid':tx_hash,
'height':height,
'confirmations':conf,
'timestamp':timestamp,
'value': Satoshis(value),
'balance': Satoshis(balance)
}
item['date'] = timestamp_to_datetime(timestamp)
item['label'] = self.get_label(tx_hash)
if show_addresses:
tx = self.transactions.get(tx_hash)
tx.deserialize()
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = self.get_txin_address(x)
if addr is None:
continue
input_addresses.append(addr)
for addr, v in tx.get_outputs():
output_addresses.append(addr)
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(time.time() if conf <= 0 else timestamp)
fiat_value = self.get_fiat_value(tx_hash, fx.ccy)
if fiat_value is None:
fiat_value = fx.historical_value(value, date)
fiat_default = True
else:
fiat_default = False
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_default'] = fiat_default
if value is not None and value < 0:
ap, lp = self.capital_gain(tx_hash, fx.timestamp_rate, fx.ccy)
cg = lp - ap
item['acquisition_price'] = Fiat(ap, fx.ccy)
item['capital_gain'] = Fiat(cg, fx.ccy)
capital_gains += cg
else:
if fiat_value is not None:
fiat_income += fiat_value
out.append(item)
# add summary
if out:
b, v = out[0]['balance'].value, out[0]['value'].value
start_balance = None if b is None or v is None else b - v
end_balance = out[-1]['balance'].value
if from_timestamp is not None and to_timestamp is not None:
start_date = timestamp_to_datetime(from_timestamp)
end_date = timestamp_to_datetime(to_timestamp)
else:
start_date = out[0]['date']
end_date = out[-1]['date']
summary = {
'start_date': start_date,
'end_date': end_date,
'start_balance': Satoshis(start_balance),
'end_balance': Satoshis(end_balance)
}
if fx:
unrealized = self.unrealized_gains(domain, fx.timestamp_rate, fx.ccy)
summary['capital_gains'] = Fiat(capital_gains, fx.ccy)
summary['fiat_income'] = Fiat(fiat_income, fx.ccy)
summary['unrealized_gains'] = Fiat(unrealized, fx.ccy)
if start_date:
summary['start_fiat_balance'] = Fiat(fx.historical_value(start_balance, start_date), fx.ccy)
if end_date:
summary['end_fiat_balance'] = Fiat(fx.historical_value(end_balance, end_date), fx.ccy)
else:
summary = {}
return {
'transactions': out,
'summary': summary
}
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if label is '':
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
from .util import format_time
exp_n = False
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 2, 'unknown'
is_final = tx and tx.is_final()
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_mempool():
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.fee_to_depth(fee_per_kb//1000)
if height == TX_HEIGHT_LOCAL:
status = 4
elif height == TX_HEIGHT_UNCONF_PARENT:
status = 1
elif height == TX_HEIGHT_UNCONFIRMED and not is_final:
status = 0
elif height == TX_HEIGHT_UNCONFIRMED:
status = 2
else:
status = 3
else:
status = 4 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 5 else time_str
if exp_n:
status_str += ' [%d sat/b, %.2f MB]'%(fee_per_kb//1000, exp_n/1000000)
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
# check outputs
i_max = None
for i, o in enumerate(outputs):<|fim▁hole|> if _type == TYPE_ADDRESS:
if not is_address(data):
raise BaseException("Invalid bitcoin address:" + data)
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise NoDynamicFeeEstimates()
if not is_sweep:
for item in inputs:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
# coin_chooser will set change address
change_addrs = []
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
elif isinstance(fixed_fee, Number):
fee_estimator = lambda size: fixed_fee
elif callable(fixed_fee):
fee_estimator = fixed_fee
else:
raise BaseException('Invalid argument fixed_fee: %s' % fixed_fee)
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.get_coin_chooser(config)
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold())
else:
# FIXME?? this might spend inputs with negative effective value...
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs[:])
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs[:])
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
tx.locktime = self.get_local_height()
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
'''Set frozen state of the addresses to FREEZE, True or False'''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses))
return True
return False
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
def start_threads(self, network):
self.network = network
if self.network is not None:
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.synchronizer.release()
self.synchronizer = None
self.verifier = None
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.storage.put('verified_tx3', self.verified_tx)
self.storage.write()
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
h = self.history.get(address,[])
if len(h) == 0:
return False
c, u, x = self.get_addr_balance(address)
return c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height <= 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def bump_fee(self, tx, delta):
if tx.is_final():
raise BaseException(_("Cannot bump fee: transaction is final"))
inputs = copy.deepcopy(tx.inputs())
outputs = copy.deepcopy(tx.outputs())
for txin in inputs:
txin['signatures'] = [None] * len(txin['signatures'])
self.add_input_info(txin)
# use own outputs
s = list(filter(lambda x: self.is_mine(x[1]), outputs))
# ... unless there is none
if not s:
s = outputs
x_fee = run_hook('get_tx_extra_fee', self, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
s = filter(lambda x: x[1]!=x_fee_address, s)
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda x: x[2])
for o in s:
i = outputs.index(o)
otype, address, value = o
if value - delta >= self.dust_threshold():
outputs[i] = otype, address, value - delta
delta = 0
break
else:
del outputs[i]
delta -= value
if delta > 0:
continue
if delta > 0:
raise BaseException(_('Cannot bump fee: could not find suitable outputs'))
locktime = self.get_local_height()
tx_new = Transaction.from_io(inputs, outputs, locktime=locktime)
tx_new.BIP_LI01_sort()
return tx_new
def cpfp(self, tx, fee):
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# segwit needs value to sign
if txin.get('value') is None and Transaction.is_segwit_input(txin):
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_hw_info(self, tx):
# add previous tx for hw wallets
for txin in tx.inputs():
tx_hash = txin['prevout_hash']
txin['prev_tx'] = self.get_input_tx(tx_hash)
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None
tx.output_info = info
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password)
except UserCancelled:
continue
def get_unused_addresses(self):
# fixme: use slots from expired requests
domain = self.get_receiving_addresses()
return [addr for addr in domain if not self.history.get(addr)
and addr not in self.receive_requests.keys()]
def get_unused_address(self):
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
def get_receiving_address(self):
# always return an address
domain = self.get_receiving_addresses()
if not domain:
return
choice = domain[0]
for addr in domain:
if not self.history.get(addr):
if addr not in self.receive_requests.keys():
return addr
else:
choice = addr
return choice
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def get_payment_request(self, addr, config):
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'bitcoin:' + addr + '?amount=' + format_satoshis(out.get('amount'))
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = bh2u(Hash(addr + "%d"%timestamp))[0:10]
r = {'time':timestamp, 'amount':amount, 'exp':expiration, 'address':addr, 'memo':message, 'id':_id}
return r
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)[0]
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req, config):
addr = req['address']
amount = req.get('amount')
message = req.get('memo')
self.receive_requests[addr] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(path, key + '.json'), 'w') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
def f(x):
try:
addr = x.get('address')
return self.get_address_index(addr) or addr
except:
return addr
return sorted(map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys()), key=f)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
def has_password(self):
return self.has_keystore_encryption() or self.has_storage_encryption()
def can_have_keystore_encryption(self):
return self.keystore and self.keystore.may_have_password()
def get_available_storage_encryption_version(self):
"""Returns the type of storage encryption offered to the user.
A wallet file (storage) is either encrypted with this version
or is stored in plaintext.
"""
if isinstance(self.keystore, Hardware_KeyStore):
return STO_EV_XPUB_PW
else:
return STO_EV_USER_PW
def has_keystore_encryption(self):
"""Returns whether encryption is enabled for the keystore.
If True, e.g. signing a transaction will require a password.
"""
if self.can_have_keystore_encryption():
return self.storage.get('use_encryption', False)
return False
def has_storage_encryption(self):
"""Returns whether encryption is enabled for the wallet file on disk."""
return self.storage.is_encrypted()
@classmethod
def may_have_password(cls):
return True
def check_password(self, password):
if self.has_keystore_encryption():
self.keystore.check_password(password)
self.storage.check_password(password)
def update_password(self, old_pw, new_pw, encrypt_storage=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.check_password(old_pw)
if encrypt_storage:
enc_version = self.get_available_storage_encryption_version()
else:
enc_version = STO_EV_PLAINTEXT
self.storage.set_password(new_pw, enc_version)
# note: Encrypting storage with a hw device is currently only
# allowed for non-multisig wallets. Further,
# Hardware_KeyStore.may_have_password() == False.
# If these were not the case,
# extra care would need to be taken when encrypting keystores.
self._update_password_for_keystore(old_pw, new_pw)
encrypt_keystore = self.can_have_keystore_encryption()
self.storage.set_keystore_encryption(bool(new_pw) and encrypt_keystore)
self.storage.write()
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def get_depending_transactions(self, tx_hash):
"""Returns all (grand-)children of tx_hash in this wallet."""
children = set()
for other_hash, tx in self.transactions.items():
for input in (tx.inputs()):
if input["prevout_hash"] == tx_hash:
children.add(other_hash)
children |= self.get_depending_transactions(other_hash)
return children
def txin_value(self, txin):
txid = txin['prevout_hash']
prev_n = txin['prevout_n']
for address, d in self.txo[txid].items():
for n, v, cb in d:
if n == prev_n:
return v
raise BaseException('unknown txin value')
def price_at_timestamp(self, txid, price_func):
height, conf, timestamp = self.get_tx_height(txid)
return price_func(timestamp if timestamp else time.time())
def unrealized_gains(self, domain, price_func, ccy):
coins = self.get_utxos(domain)
now = time.time()
p = price_func(now)
ap = sum(self.coin_price(coin['prevout_hash'], price_func, ccy, self.txin_value(coin)) for coin in coins)
lp = sum([coin['value'] for coin in coins]) * p / Decimal(COIN)
return lp - ap
def capital_gain(self, txid, price_func, ccy):
"""
Difference between the fiat price of coins leaving the wallet because of transaction txid,
and the price of these coins when they entered the wallet.
price_func: function that returns the fiat price given a timestamp
"""
out_value = - self.get_tx_value(txid)/Decimal(COIN)
fiat_value = self.get_fiat_value(txid, ccy)
liquidation_price = - fiat_value if fiat_value else out_value * self.price_at_timestamp(txid, price_func)
acquisition_price = out_value * self.average_price(txid, price_func, ccy)
return acquisition_price, liquidation_price
def average_price(self, txid, price_func, ccy):
""" Average acquisition price of the inputs of a transaction """
input_value = 0
total_price = 0
for addr, d in self.txi.get(txid, {}).items():
for ser, v in d:
input_value += v
total_price += self.coin_price(ser.split(':')[0], price_func, ccy, v)
return total_price / (input_value/Decimal(COIN))
def coin_price(self, txid, price_func, ccy, txin_value):
"""
Acquisition price of a coin.
This assumes that either all inputs are mine, or no input is mine.
"""
if self.txi.get(txid, {}) != {}:
return self.average_price(txid, price_func, ccy) * txin_value/Decimal(COIN)
else:
fiat_value = self.get_fiat_value(txid, ccy)
if fiat_value is not None:
return fiat_value
else:
p = self.price_at_timestamp(txid, price_func)
return p * txin_value/Decimal(COIN)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def _update_password_for_keystore(self, old_pw, new_pw):
if self.keystore and self.keystore.may_have_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class Imported_Wallet(Simple_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
txin_type = 'address'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def is_watching_only(self):
return self.keystore is None
def get_keystores(self):
return [self.keystore] if self.keystore else []
def can_import_privkey(self):
return bool(self.keystore)
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore') if self.storage.get('keystore') else None
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
self.addresses = self.storage.get('addresses', {})
# fixme: a reference to addresses is needed
if self.keystore:
self.keystore.addresses = self.addresses
def save_addresses(self):
self.storage.put('addresses', self.addresses)
def can_import_address(self):
return self.is_watching_only()
def can_delete_address(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address):
return False
def is_mine(self, address):
return address in self.addresses
def get_fingerprint(self):
return ''
def get_addresses(self, include_change=False):
return sorted(self.addresses.keys())
def get_receiving_addresses(self):
return self.get_addresses()
def get_change_addresses(self):
return []
def import_address(self, address):
if not bitcoin.is_address(address):
return ''
if address in self.addresses:
return ''
self.addresses[address] = {}
self.storage.put('addresses', self.addresses)
self.storage.write()
self.add_address(address)
return address
def delete_address(self, address):
if address not in self.addresses:
return
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self.history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self.history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
pubkey = self.get_public_key(address)
self.addresses.pop(address)
if pubkey:
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.storage.put('addresses', self.addresses)
self.storage.write()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.addresses[address].get('pubkey')
def import_private_key(self, sec, pw, redeem_script=None):
try:
txin_type, pubkey = self.keystore.import_privkey(sec, pw)
except Exception:
raise BaseException('Invalid private key', sec)
if txin_type in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
if redeem_script is not None:
raise BaseException('Cannot use redeem script with', txin_type, sec)
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
elif txin_type in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
if redeem_script is None:
raise BaseException('Redeem script required for', txin_type, sec)
addr = bitcoin.redeem_script_to_address(txin_type, redeem_script)
else:
raise NotImplementedError(txin_type)
self.addresses[addr] = {'type':txin_type, 'pubkey':pubkey, 'redeem_script':redeem_script}
self.save_keystore()
self.save_addresses()
self.storage.write()
self.add_address(addr)
return addr
def get_redeem_script(self, address):
d = self.addresses[address]
redeem_script = d['redeem_script']
return redeem_script
def get_txin_type(self, address):
return self.addresses[address].get('type', 'address')
def add_input_sig_info(self, txin, address):
if self.is_watching_only():
x_pubkey = 'fd' + address_to_script(address)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
return
if txin['type'] in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
pubkey = self.addresses[address]['pubkey']
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
else:
redeem_script = self.addresses[address]['redeem_script']
num_sig = 2
num_keys = 3
txin['num_sig'] = num_sig
txin['redeem_script'] = redeem_script
txin['signatures'] = [None] * num_keys
def pubkeys_to_address(self, pubkey):
for addr, v in self.addresses.items():
if v.get('pubkey') == pubkey:
return addr
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def load_addresses(self):
super().load_addresses()
self._addr_to_addr_index = {} # key: address, value: (is_change, index)
for i, addr in enumerate(self.receiving_addresses):
self._addr_to_addr_index[addr] = (False, i)
for i, addr in enumerate(self.change_addresses):
self._addr_to_addr_index[addr] = (True, i)
def create_new_address(self, for_change=False):
assert type(for_change) is bool
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
self._addr_to_addr_index[address] = (for_change, n)
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if list(map(lambda a: self.address_is_old(a), addresses[-limit:] )) == limit*[False]:
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address):
is_change, i = self.get_address_index(address)
addr_list = self.get_change_addresses() if is_change else self.get_receiving_addresses()
limit = self.gap_limit_for_change if is_change else self.gap_limit
if i < limit:
return False
prev_addresses = addr_list[max(0, i - limit):max(0, i)]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def is_mine(self, address):
return address in self._addr_to_addr_index
def get_address_index(self, address):
return self._addr_to_addr_index[address]
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return bitcoin.pubkey_to_address(self.txin_type, pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def pubkeys_to_address(self, pubkeys):
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return bitcoin.redeem_script_to_address(self.txin_type, redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return transaction.multisig_script(sorted(pubkeys), self.m)
def get_redeem_script(self, address):
pubkeys = self.get_public_keys(address)
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return redeem_script
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
def check_password(self, password):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
# multisig wallets are not offered hw device encryption
return STO_EV_USER_PW
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
# pubkeys is set to None to signal that x_pubkeys are unsorted
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise RuntimeError("Unknown wallet type: " + wallet_type)<|fim▁end|>
|
_type, data, value = o
|
<|file_name|>RoleTreeView.java<|end_file_name|><|fim▁begin|>package org.ovirt.engine.ui.uicommonweb.models.configure.roles_ui;
import java.util.ArrayList;
import org.ovirt.engine.core.common.businessentities.ActionGroup;
import org.ovirt.engine.core.common.mode.ApplicationMode;
import org.ovirt.engine.ui.uicommonweb.models.ApplicationModeHelper;
import org.ovirt.engine.ui.uicommonweb.models.common.SelectionTreeNodeModel;
import org.ovirt.engine.ui.uicompat.ConstantsManager;
@SuppressWarnings("unused")
public class RoleTreeView
{
public static ArrayList<SelectionTreeNodeModel> GetRoleTreeView(boolean isReadOnly, boolean isAdmin)
{
RoleNode tree = initTreeView();
ArrayList<ActionGroup> userActionGroups = null;
if (isAdmin == false)
{
userActionGroups = GetUserActionGroups();
}
ArrayList<SelectionTreeNodeModel> roleTreeView = new ArrayList<SelectionTreeNodeModel>();
SelectionTreeNodeModel firstNode = null, secondNode = null, thirdNode = null;
for (RoleNode first : tree.getLeafRoles())
{
firstNode = new SelectionTreeNodeModel();
firstNode.setTitle(first.getName());
firstNode.setDescription(first.getName());
firstNode.setIsChangable(!isReadOnly);
for (RoleNode second : first.getLeafRoles())
{
secondNode = new SelectionTreeNodeModel();
secondNode.setTitle(second.getName());
secondNode.setDescription(second.getName());
secondNode.setIsChangable(!isReadOnly);
secondNode.setTooltip(second.getTooltip());
for (RoleNode third : second.getLeafRoles())
{
thirdNode = new SelectionTreeNodeModel();
thirdNode.setTitle(third.getName());
thirdNode.setDescription(third.getDesc());
thirdNode.setIsSelectedNotificationPrevent(true);
// thirdNode.IsSelected =
// attachedActions.Contains((VdcActionType) Enum.Parse(typeof (VdcActionType), name)); //TODO:
// suppose to be action group
thirdNode.setIsChangable(!isReadOnly);
thirdNode.setIsSelectedNullable(false);
thirdNode.setTooltip(third.getTooltip());
if (!isAdmin)
{
if (userActionGroups.contains(ActionGroup.valueOf(thirdNode.getTitle())))
{
secondNode.getChildren().add(thirdNode);
}
}
else
{
secondNode.getChildren().add(thirdNode);
}
}
if (secondNode.getChildren().size() > 0)
{
firstNode.getChildren().add(secondNode);
}
}
if (firstNode.getChildren().size() > 0)
{
roleTreeView.add(firstNode);
}
}
return roleTreeView;
}
private static ArrayList<ActionGroup> GetUserActionGroups() {
ArrayList<ActionGroup> array = new ArrayList<ActionGroup>();
array.add(ActionGroup.CREATE_VM);
array.add(ActionGroup.DELETE_VM);
array.add(ActionGroup.EDIT_VM_PROPERTIES);
array.add(ActionGroup.VM_BASIC_OPERATIONS);
array.add(ActionGroup.CHANGE_VM_CD);
array.add(ActionGroup.MIGRATE_VM);
array.add(ActionGroup.CONNECT_TO_VM);
array.add(ActionGroup.CONFIGURE_VM_NETWORK);
array.add(ActionGroup.CONFIGURE_VM_STORAGE);
array.add(ActionGroup.MOVE_VM);
array.add(ActionGroup.MANIPULATE_VM_SNAPSHOTS);
array.add(ActionGroup.CREATE_TEMPLATE);
array.add(ActionGroup.EDIT_TEMPLATE_PROPERTIES);
array.add(ActionGroup.DELETE_TEMPLATE);
array.add(ActionGroup.COPY_TEMPLATE);
array.add(ActionGroup.CONFIGURE_TEMPLATE_NETWORK);
array.add(ActionGroup.CREATE_VM_POOL);
array.add(ActionGroup.EDIT_VM_POOL_CONFIGURATION);
array.add(ActionGroup.DELETE_VM_POOL);
array.add(ActionGroup.VM_POOL_BASIC_OPERATIONS);
array.add(ActionGroup.MANIPULATE_PERMISSIONS);
array.add(ActionGroup.CREATE_DISK);
array.add(ActionGroup.ATTACH_DISK);
array.add(ActionGroup.DELETE_DISK);
array.add(ActionGroup.CONFIGURE_DISK_STORAGE);
array.add(ActionGroup.EDIT_DISK_PROPERTIES);
array.add(ActionGroup.LOGIN);
array.add(ActionGroup.CHANGE_VM_CUSTOM_PROPERTIES);
array.add(ActionGroup.PORT_MIRRORING);
return array;
}
private static RoleNode initTreeView()
{
RoleNode tree =
new RoleNode(ConstantsManager.getInstance().getConstants().rootRoleTree(),
new RoleNode[] {
new RoleNode(ConstantsManager.getInstance().getConstants().systemRoleTree(),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.configureSystemRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.MANIPULATE_USERS,
ConstantsManager.getInstance()
.getConstants()
.allowToAddRemoveUsersFromTheSystemRoleTreeTooltip()),
new RoleNode(ActionGroup.MANIPULATE_PERMISSIONS,
ConstantsManager.getInstance()
.getConstants()
.allowToAddRemovePermissionsForUsersOnObjectsInTheSystemRoleTreeTooltip()),
new RoleNode(ActionGroup.MANIPULATE_ROLES,
ConstantsManager.getInstance()
.getConstants()
.allowToDefineConfigureRolesInTheSystemRoleTreeTooltip()),
new RoleNode(ActionGroup.LOGIN,
ConstantsManager.getInstance()
.getConstants()
.allowToLoginToTheSystemRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_ENGINE,
ConstantsManager.getInstance()
.getConstants()
.allowToGetOrSetSystemConfigurationRoleTreeTooltip()) })),
new RoleNode(ConstantsManager.getInstance().getConstants().dataCenterRoleTree(),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.configureDataCenterRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_STORAGE_POOL,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateDataCenterRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_STORAGE_POOL,
ConstantsManager.getInstance()
.getConstants()
.allowToRemoveDataCenterRoleTreeTooltip()),
new RoleNode(ActionGroup.EDIT_STORAGE_POOL_CONFIGURATION,
ConstantsManager.getInstance()
.getConstants()
.allowToModifyDataCenterPropertiesRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_STORAGE_POOL_NETWORK,
ConstantsManager.getInstance()
.getConstants()
.allowToConfigureLogicalNetworkPerDataCenterRoleTreeTooltip()) })),
new RoleNode(ConstantsManager.getInstance().getConstants().storageDomainRoleTree(),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.configureStorageDomainRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_STORAGE_DOMAIN,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateStorageDomainRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_STORAGE_DOMAIN,
ConstantsManager.getInstance()
.getConstants()
.allowToDeleteStorageDomainRoleTreeTooltip()),
new RoleNode(ActionGroup.EDIT_STORAGE_DOMAIN_CONFIGURATION,
ConstantsManager.getInstance()
.getConstants()
.allowToModifyStorageDomainPropertiesRoleTreeTooltip()),
new RoleNode(ActionGroup.MANIPULATE_STORAGE_DOMAIN,<|fim▁hole|> new RoleNode(ConstantsManager.getInstance().getConstants().clusterRoleTree(),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.configureClusterRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_CLUSTER,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateNewClusterRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_CLUSTER,
ConstantsManager.getInstance()
.getConstants()
.allowToRemoveClusterRoleTreeTooltip()),
new RoleNode(ActionGroup.EDIT_CLUSTER_CONFIGURATION,
ConstantsManager.getInstance()
.getConstants()
.allowToEditClusterPropertiesRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_CLUSTER_NETWORK,
ConstantsManager.getInstance()
.getConstants()
.allowToAddRemoveLogicalNetworksForTheClusterRoleTreeTooltip()) })),
new RoleNode(ConstantsManager.getInstance().getConstants().glusterRoleTree(),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.configureVolumesRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_GLUSTER_VOLUME,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateGlusterVolumesRoleTree()),
new RoleNode(ActionGroup.MANIPULATE_GLUSTER_VOLUME,
ConstantsManager.getInstance()
.getConstants()
.allowToManipulateGlusterVolumesRoleTree()) })),
new RoleNode(ConstantsManager.getInstance().getConstants().hostRoleTree(),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.configureHostRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_HOST,
ConstantsManager.getInstance()
.getConstants()
.allowToAddNewHostToTheClusterRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_HOST,
ConstantsManager.getInstance()
.getConstants()
.allowToRemoveExistingHostFromTheClusterRoleTreeTooltip()),
new RoleNode(ActionGroup.EDIT_HOST_CONFIGURATION,
ConstantsManager.getInstance()
.getConstants()
.allowToEditHostPropertiesRoleTreeTooltip()),
new RoleNode(ActionGroup.MANIPUTLATE_HOST,
ConstantsManager.getInstance()
.getConstants()
.allowToChangeHostStatusRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_HOST_NETWORK,
ConstantsManager.getInstance()
.getConstants()
.allowToConfigureHostsNetworkPhysicalInterfacesRoleTreeTooltip()) })),
new RoleNode(ConstantsManager.getInstance().getConstants().templateRoleTree(),
new RoleNode[] {
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.basicOperationsRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.EDIT_TEMPLATE_PROPERTIES,
ConstantsManager.getInstance()
.getConstants()
.allowToChangeTemplatePropertiesRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_TEMPLATE_NETWORK,
ConstantsManager.getInstance()
.getConstants()
.allowToConfigureTemlateNetworkRoleTreeTooltip()) }),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.provisioningOperationsRoleTree(),
ConstantsManager.getInstance()
.getConstants()
.notePermissionsContainigTheseOperationsShuoldAssociatSdOrAboveRoleTreeTooltip(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_TEMPLATE,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateNewTemplateRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_TEMPLATE,
ConstantsManager.getInstance()
.getConstants()
.allowToRemoveExistingTemplateRoleTreeTooltip()),
new RoleNode(ActionGroup.IMPORT_EXPORT_VM,
ConstantsManager.getInstance()
.getConstants()
.allowImportExportOperationsRoleTreeTooltip()),
new RoleNode(ActionGroup.COPY_TEMPLATE,
ConstantsManager.getInstance()
.getConstants()
.allowToCopyTemplateBetweenStorageDomainsRoleTreeTooltip()) }) }),
new RoleNode(ConstantsManager.getInstance().getConstants().vmRoleTree(),
new RoleNode[] {
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.basicOperationsRoleTree(),
new RoleNode[] {
new RoleNode(ActionGroup.VM_BASIC_OPERATIONS,
ConstantsManager.getInstance()
.getConstants()
.allowBasicVmOperationsRoleTreeTooltip()),
new RoleNode(ActionGroup.CHANGE_VM_CD,
ConstantsManager.getInstance()
.getConstants()
.allowToAttachCdToTheVmRoleTreeTooltip()),
new RoleNode(ActionGroup.CONNECT_TO_VM,
ConstantsManager.getInstance()
.getConstants()
.allowViewingTheVmConsoleScreenRoleTreeTooltip()) }),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.provisioningOperationsRoleTree(),
ConstantsManager.getInstance()
.getConstants()
.notePermissionsContainigTheseOperationsShuoldAssociatSdOrAboveRoleTreeTooltip(),
new RoleNode[] {
new RoleNode(ActionGroup.EDIT_VM_PROPERTIES,
ConstantsManager.getInstance()
.getConstants()
.allowChangeVmPropertiesRoleTreeTooltip()),
new RoleNode(ActionGroup.CREATE_VM,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateNewVmsRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_VM,
ConstantsManager.getInstance()
.getConstants()
.allowToRemoveVmsFromTheSystemRoleTreeTooltip()),
new RoleNode(ActionGroup.IMPORT_EXPORT_VM,
ConstantsManager.getInstance()
.getConstants()
.allowImportExportOperationsRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_VM_NETWORK,
ConstantsManager.getInstance()
.getConstants()
.allowToConfigureVMsNetworkRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_VM_STORAGE,
ConstantsManager.getInstance()
.getConstants()
.allowToAddRemoveDiskToTheVmRoleTreeTooltip()),
new RoleNode(ActionGroup.MANIPULATE_VM_SNAPSHOTS,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateDeleteSnapshotsOfTheVmRoleTreeTooltip()) }),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.administrationOperationsRoleTree(),
ConstantsManager.getInstance()
.getConstants()
.notePermissionsContainigTheseOperationsShuoldAssociatDcOrEqualRoleTreeTooltip(),
new RoleNode[] {
new RoleNode(ActionGroup.MOVE_VM,
ConstantsManager.getInstance()
.getConstants()
.allowToMoveVmImageToAnotherStorageDomainRoleTreeTooltip()),
new RoleNode(ActionGroup.MIGRATE_VM,
ConstantsManager.getInstance()
.getConstants()
.allowMigratingVmBetweenHostsInClusterRoleTreeTooltip()),
new RoleNode(ActionGroup.CHANGE_VM_CUSTOM_PROPERTIES,
ConstantsManager.getInstance()
.getConstants()
.allowMigratingVmBetweenHostsInClusterRoleTreeTooltip()),
new RoleNode(ActionGroup.PORT_MIRRORING,
ConstantsManager.getInstance()
.getConstants()
.allowVmNetworkPortMirroringRoleTreeTooltip()) }) }),
new RoleNode(ConstantsManager.getInstance().getConstants().vmPoolRoleTree(),
new RoleNode[] {
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.basicOperationsRoleTree(),
new RoleNode[] { new RoleNode(ActionGroup.VM_POOL_BASIC_OPERATIONS,
ConstantsManager.getInstance()
.getConstants()
.allowToRunPauseStopVmFromVmPoolRoleTreeTooltip()) }),
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.provisioningOperationsRoleTree(),
ConstantsManager.getInstance()
.getConstants()
.notePermissionsContainigTheseOperationsShuoldAssociatSdOrAboveRoleTreeTooltip(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_VM_POOL,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateVmPoolRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_VM_POOL,
ConstantsManager.getInstance()
.getConstants()
.allowToDeleteVmPoolRoleTreeTooltip()),
new RoleNode(ActionGroup.EDIT_VM_POOL_CONFIGURATION,
ConstantsManager.getInstance()
.getConstants()
.allowToChangePropertiesOfTheVmPoolRoleTreeTooltip()) }) }),
new RoleNode(ConstantsManager.getInstance().getConstants().diskRoleTree(),
new RoleNode[] {
new RoleNode(ConstantsManager.getInstance()
.getConstants()
.provisioningOperationsRoleTree(),
ConstantsManager.getInstance()
.getConstants()
.notePermissionsContainingOperationsRoleTreeTooltip(),
new RoleNode[] {
new RoleNode(ActionGroup.CREATE_DISK,
ConstantsManager.getInstance()
.getConstants()
.allowToCreateDiskRoleTreeTooltip()),
new RoleNode(ActionGroup.DELETE_DISK,
ConstantsManager.getInstance()
.getConstants()
.allowToDeleteDiskRoleTreeTooltip()),
new RoleNode(ActionGroup.CONFIGURE_DISK_STORAGE,
ConstantsManager.getInstance()
.getConstants()
.allowToMoveDiskToAnotherStorageDomainRoleTreeTooltip()),
new RoleNode(ActionGroup.ATTACH_DISK,
ConstantsManager.getInstance()
.getConstants()
.allowToAttachDiskToVmRoleTreeTooltip()),
new RoleNode(ActionGroup.EDIT_DISK_PROPERTIES,
ConstantsManager.getInstance()
.getConstants()
.allowToChangePropertiesOfTheDiskRoleTreeTooltip()) }) }) });
// nothing to filter
if (!ApplicationModeHelper.getUiMode().equals(ApplicationMode.AllModes)) {
ApplicationModeHelper.filterActionGroupTreeByApplictionMode(tree);
}
return tree;
}
}<|fim▁end|>
|
ConstantsManager.getInstance()
.getConstants()
.allowToChangeStorageDomainStatusRoleTreeTooltip()) })),
|
<|file_name|>name-list.service.spec.ts<|end_file_name|><|fim▁begin|>import { NameListService } from "./name-list.service";
<|fim▁hole|> beforeEach(() => {
nameListService = new NameListService;
});
it("should return the list of names", () => {
let names = nameListService.get();
expect(names).toEqual(jasmine.any(Array));
});
});
}<|fim▁end|>
|
export function main() {
describe("NameList Service", () => {
let nameListService: NameListService;
|
<|file_name|>messages.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
* messages.cpp : Information about an item
****************************************************************************
* Copyright (C) 2006-2011 the VideoLAN team
* $Id$
*
* Authors: Jean-Baptiste Kempf <jb (at) videolan.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "dialogs/messages.hpp"
#include <QPlainTextEdit>
#include <QTextCursor>
#include <QTextBlock>
#include <QFileDialog>
#include <QTextStream>
#include <QMessageBox>
#include <QTabWidget>
#include <QTreeWidget>
#include <QTreeWidgetItem>
#include <QMutex>
#include <QLineEdit>
#include <QScrollBar>
#include <QMutex>
#include <QMutexLocker>
#include <assert.h>
enum {
MsgEvent_Type = QEvent::User + MsgEventTypeOffset + 1,
};
class MsgEvent : public QEvent
{
public:
MsgEvent( int, const msg_item_t *, const char * );
int priority;
uintptr_t object_id;
QString object_type;
QString header;
QString module;
QString text;
};
MsgEvent::MsgEvent( int type, const msg_item_t *msg, const char *text )
: QEvent( (QEvent::Type)MsgEvent_Type ),
priority( type ),
object_id( msg->i_object_id ),
object_type( qfu(msg->psz_object_type) ),
header( qfu(msg->psz_header) ),
module( qfu(msg->psz_module) ),
text( qfu(text) )
{
}
MessagesDialog::MessagesDialog( intf_thread_t *_p_intf)
: QVLCFrame( _p_intf )
{
setWindowTitle( qtr( "Messages" ) );
setWindowRole( "vlc-messages" );
/* Build Ui */
ui.setupUi( this );
ui.bottomButtonsBox->addButton( new QPushButton( qtr("&Close"), this ),
QDialogButtonBox::RejectRole );
/* Modules tree */
ui.modulesTree->setHeaderHidden( true );
/* Buttons and general layout */
ui.saveLogButton->setToolTip( qtr( "Saves all the displayed logs to a file" ) );
int i_verbosity = var_InheritInteger( p_intf, "verbose" );
changeVerbosity( i_verbosity );
ui.verbosityBox->setValue( qMin( i_verbosity, 2 ) );
getSettings()->beginGroup( "Messages" );
ui.filterEdit->setText( getSettings()->value( "messages-filter" ).toString() );
getSettings()->endGroup();<|fim▁hole|> updateButton->setFlat( true );
ui.mainTab->setCornerWidget( updateButton );
#ifndef NDEBUG
QWidget *pldebugTab = new QWidget();
QVBoxLayout *pldebugTabLayout = new QVBoxLayout();
pldebugTab->setLayout( pldebugTabLayout );
ui.mainTab->addTab( pldebugTab, "Playlist Tree" );
pldebugTree = new QTreeWidget();
pldebugTree->headerItem()->setText( 0, "Name" );
pldebugTree->headerItem()->setText( 1, "PL id" );
pldebugTree->headerItem()->setText( 2, "Item id" );
pldebugTree->headerItem()->setText( 3, "PL flags" );
pldebugTree->headerItem()->setText( 4, "Item flags" );
pldebugTree->setColumnCount( 5 );
pldebugTabLayout->addWidget( pldebugTree );
#endif
tabChanged(0);
BUTTONACT( updateButton, updateOrClear() );
BUTTONACT( ui.saveLogButton, save() );
CONNECT( ui.filterEdit, editingFinished(), this, updateConfig() );
CONNECT( ui.filterEdit, textChanged(QString), this, filterMessages() );
CONNECT( ui.bottomButtonsBox, rejected(), this, hide() );
CONNECT( ui.verbosityBox, valueChanged( int ),
this, changeVerbosity( int ) );
CONNECT( ui.mainTab, currentChanged( int ), this, tabChanged( int ) );
/* General action */
restoreWidgetPosition( "Messages", QSize( 600, 450 ) );
/* Hook up to LibVLC messaging */
vlc_Subscribe( &sub, MsgCallback, this );
buildTree( NULL, VLC_OBJECT( p_intf->p_libvlc ) );
}
MessagesDialog::~MessagesDialog()
{
saveWidgetPosition( "Messages" );
vlc_Unsubscribe( &sub );
};
void MessagesDialog::changeVerbosity( int i_verbosity )
{
vlc_atomic_set( &this->verbosity, i_verbosity );
}
void MessagesDialog::updateConfig()
{
getSettings()->beginGroup( "Messages" );
getSettings()->setValue( "messages-filter", ui.filterEdit->text() );
getSettings()->endGroup();
}
void MessagesDialog::filterMessages()
{
QMutexLocker locker( &messageLocker );
QPlainTextEdit *messages = ui.messages;
QTextBlock block = messages->document()->firstBlock();
while( block.isValid() )
{
block.setVisible( matchFilter( block.text().toLower() ) );
block = block.next();
}
/* Consider the whole QTextDocument as dirty now */
messages->document()->markContentsDirty( 0, messages->document()->characterCount() );
/* FIXME This solves a bug (Qt?) with the viewport not resizing the
vertical scroll bar when one or more QTextBlock are hidden */
QSize vsize = messages->viewport()->size();
messages->viewport()->resize( vsize + QSize( 1, 1 ) );
messages->viewport()->resize( vsize );
}
bool MessagesDialog::matchFilter( const QString& text )
{
const QString& filter = ui.filterEdit->text();
if( filter.isEmpty() || text.contains( filter.toLower() ) )
return true;
return false;
}
void MessagesDialog::sinkMessage( const MsgEvent *msg )
{
QMutexLocker locker( &messageLocker );
QPlainTextEdit *messages = ui.messages;
/* Only scroll if the viewport is at the end.
Don't bug user by auto-changing/losing viewport on insert(). */
bool b_autoscroll = ( messages->verticalScrollBar()->value()
+ messages->verticalScrollBar()->pageStep()
>= messages->verticalScrollBar()->maximum() );
/* Copy selected text to the clipboard */
if( messages->textCursor().hasSelection() )
messages->copy();
/* Fix selected text bug */
if( !messages->textCursor().atEnd() ||
messages->textCursor().anchor() != messages->textCursor().position() )
messages->moveCursor( QTextCursor::End );
/* Start a new logic block so we can hide it on-demand */
messages->textCursor().insertBlock();
QString buf = QString( "<i><font color='darkblue'>%1</font>" ).arg( msg->module );
switch ( msg->priority )
{
case VLC_MSG_INFO:
buf += "<font color='blue'> info: </font>";
break;
case VLC_MSG_ERR:
buf += "<font color='red'> error: </font>";
break;
case VLC_MSG_WARN:
buf += "<font color='green'> warning: </font>";
break;
case VLC_MSG_DBG:
default:
buf += "<font color='grey'> debug: </font>";
break;
}
/* Insert the prefix */
messages->textCursor().insertHtml( buf /* + "</i>" */ );
/* Insert the message */
messages->textCursor().insertHtml( msg->text );
/* Pass the new message thru the filter */
QTextBlock b = messages->document()->lastBlock();
b.setVisible( matchFilter( b.text() ) );
/* Tell the QTextDocument to recompute the size of the given area */
messages->document()->markContentsDirty( b.position(), b.length() );
if ( b_autoscroll ) messages->ensureCursorVisible();
}
void MessagesDialog::customEvent( QEvent *event )
{
MsgEvent *msge = static_cast<MsgEvent *>(event);
assert( msge );
sinkMessage( msge );
}
bool MessagesDialog::save()
{
QString saveLogFileName = QFileDialog::getSaveFileName(
this, qtr( "Save log file as..." ),
QVLCUserDir( VLC_DOCUMENTS_DIR ),
qtr( "Texts / Logs (*.log *.txt);; All (*.*) ") );
if( !saveLogFileName.isNull() )
{
QFile file( saveLogFileName );
if ( !file.open( QFile::WriteOnly | QFile::Text ) ) {
QMessageBox::warning( this, qtr( "Application" ),
qtr( "Cannot write to file %1:\n%2." )
.arg( saveLogFileName )
.arg( file.errorString() ) );
return false;
}
QTextStream out( &file );
QTextBlock block = ui.messages->document()->firstBlock();
while( block.isValid() )
{
if( block.isVisible() )
out << block.text() << "\n";
block = block.next();
}
return true;
}
return false;
}
void MessagesDialog::buildTree( QTreeWidgetItem *parentItem,
vlc_object_t *p_obj )
{
QTreeWidgetItem *item;
if( parentItem )
item = new QTreeWidgetItem( parentItem );
else
item = new QTreeWidgetItem( ui.modulesTree );
char *name = vlc_object_get_name( p_obj );
item->setText( 0, QString("%1%2 (0x%3)")
.arg( qfu( p_obj->psz_object_type ) )
.arg( ( name != NULL )
? QString( " \"%1\"" ).arg( qfu( name ) )
: "" )
.arg( (uintptr_t)p_obj, 0, 16 )
);
free( name );
item->setExpanded( true );
vlc_list_t *l = vlc_list_children( p_obj );
for( int i=0; i < l->i_count; i++ )
buildTree( item, l->p_values[i].p_object );
vlc_list_release( l );
}
void MessagesDialog::updateOrClear()
{
if( ui.mainTab->currentIndex() == 1)
{
ui.modulesTree->clear();
buildTree( NULL, VLC_OBJECT( p_intf->p_libvlc ) );
}
else if( ui.mainTab->currentIndex() == 0 )
ui.messages->clear();
#ifndef NDEBUG
else
updatePLTree();
#endif
}
void MessagesDialog::tabChanged( int i )
{
updateButton->setIcon( i != 0 ? QIcon(":/update") : QIcon(":/toolbar/clear") );
updateButton->setToolTip( i != 0 ? qtr("Update the tree")
: qtr("Clear the messages") );
}
void MessagesDialog::MsgCallback( void *self, int type, const msg_item_t *item,
const char *format, va_list ap )
{
MessagesDialog *dialog = (MessagesDialog *)self;
char *str;
int verbosity = vlc_atomic_get( &dialog->verbosity );
if( verbosity < 0 || verbosity < (type - VLC_MSG_ERR)
|| unlikely(vasprintf( &str, format, ap ) == -1) )
return;
int canc = vlc_savecancel();
QApplication::postEvent( dialog, new MsgEvent( type, item, str ) );
vlc_restorecancel( canc );
free( str );
}
#ifndef NDEBUG
static QTreeWidgetItem * PLWalk( playlist_item_t *p_node )
{
QTreeWidgetItem *current = new QTreeWidgetItem();
current->setText( 0, qfu( p_node->p_input->psz_name ) );
current->setToolTip( 0, qfu( p_node->p_input->psz_uri ) );
current->setText( 1, QString("%1").arg( p_node->i_id ) );
current->setText( 2, QString("%1").arg( p_node->p_input->i_id ) );
current->setText( 3, QString("0x%1").arg( p_node->i_flags, 0, 16 ) );
current->setText( 4, QString("0x%1").arg( p_node->p_input->i_type, 0, 16 ) );
for ( int i = 0; p_node->i_children > 0 && i < p_node->i_children; i++ )
current->addChild( PLWalk( p_node->pp_children[ i ] ) );
return current;
}
void MessagesDialog::updatePLTree()
{
playlist_t *p_playlist = THEPL;
pldebugTree->clear();
PL_LOCK;
pldebugTree->addTopLevelItem( PLWalk( p_playlist->p_root_category ) );
PL_UNLOCK;
pldebugTree->expandAll();
for ( int i=0; i< 5; i++ )
pldebugTree->resizeColumnToContents( i );
}
#endif<|fim▁end|>
|
updateButton = new QPushButton( QIcon(":/update"), "" );
|
<|file_name|>helper_blackbox_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2015 The Kubernetes Authors.
<|fim▁hole|> http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discovery_test
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"strings"
"testing"
"k8s.io/client-go/discovery"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/testapi"
"k8s.io/client-go/pkg/apimachinery/registered"
uapi "k8s.io/client-go/pkg/apis/meta/v1"
"k8s.io/client-go/pkg/runtime"
"k8s.io/client-go/pkg/runtime/schema"
"k8s.io/client-go/rest"
"k8s.io/client-go/rest/fake"
)
func objBody(object interface{}) io.ReadCloser {
output, err := json.MarshalIndent(object, "", "")
if err != nil {
panic(err)
}
return ioutil.NopCloser(bytes.NewReader([]byte(output)))
}
func TestNegotiateVersion(t *testing.T) {
tests := []struct {
name string
requiredVersion *schema.GroupVersion
expectedVersion *schema.GroupVersion
serverVersions []string
clientVersions []schema.GroupVersion
expectErr func(err error) bool
sendErr error
statusCode int
}{
{
name: "server supports client default",
serverVersions: []string{"version1", registered.GroupOrDie(api.GroupName).GroupVersion.String()},
clientVersions: []schema.GroupVersion{{Version: "version1"}, registered.GroupOrDie(api.GroupName).GroupVersion},
expectedVersion: &schema.GroupVersion{Version: "version1"},
statusCode: http.StatusOK,
},
{
name: "server falls back to client supported",
serverVersions: []string{"version1"},
clientVersions: []schema.GroupVersion{{Version: "version1"}, registered.GroupOrDie(api.GroupName).GroupVersion},
expectedVersion: &schema.GroupVersion{Version: "version1"},
statusCode: http.StatusOK,
},
{
name: "explicit version supported",
requiredVersion: &schema.GroupVersion{Version: "v1"},
serverVersions: []string{"/version1", registered.GroupOrDie(api.GroupName).GroupVersion.String()},
clientVersions: []schema.GroupVersion{{Version: "version1"}, registered.GroupOrDie(api.GroupName).GroupVersion},
expectedVersion: &schema.GroupVersion{Version: "v1"},
statusCode: http.StatusOK,
},
{
name: "explicit version not supported on server",
requiredVersion: &schema.GroupVersion{Version: "v1"},
serverVersions: []string{"version1"},
clientVersions: []schema.GroupVersion{{Version: "version1"}, registered.GroupOrDie(api.GroupName).GroupVersion},
expectErr: func(err error) bool { return strings.Contains(err.Error(), `server does not support API version "v1"`) },
statusCode: http.StatusOK,
},
{
name: "explicit version not supported on client",
requiredVersion: &schema.GroupVersion{Version: "v1"},
serverVersions: []string{"v1"},
clientVersions: []schema.GroupVersion{{Version: "version1"}},
expectErr: func(err error) bool { return strings.Contains(err.Error(), `client does not support API version "v1"`) },
statusCode: http.StatusOK,
},
{
name: "connection refused error",
serverVersions: []string{"version1"},
clientVersions: []schema.GroupVersion{{Version: "version1"}, registered.GroupOrDie(api.GroupName).GroupVersion},
sendErr: errors.New("connection refused"),
expectErr: func(err error) bool { return strings.Contains(err.Error(), "connection refused") },
statusCode: http.StatusOK,
},
{
name: "discovery fails due to 403 Forbidden errors and thus serverVersions is empty, use default GroupVersion",
clientVersions: []schema.GroupVersion{{Version: "version1"}, registered.GroupOrDie(api.GroupName).GroupVersion},
expectedVersion: &schema.GroupVersion{Version: "version1"},
statusCode: http.StatusForbidden,
},
{
name: "discovery fails due to 404 Not Found errors and thus serverVersions is empty, use requested GroupVersion",
requiredVersion: &schema.GroupVersion{Version: "version1"},
clientVersions: []schema.GroupVersion{{Version: "version1"}, registered.GroupOrDie(api.GroupName).GroupVersion},
expectedVersion: &schema.GroupVersion{Version: "version1"},
statusCode: http.StatusNotFound,
},
{
name: "discovery fails due to 403 Forbidden errors and thus serverVersions is empty, no fallback GroupVersion",
expectErr: func(err error) bool { return strings.Contains(err.Error(), "failed to negotiate an api version;") },
statusCode: http.StatusForbidden,
},
}
for _, test := range tests {
fakeClient := &fake.RESTClient{
NegotiatedSerializer: testapi.Default.NegotiatedSerializer(),
Resp: &http.Response{
StatusCode: test.statusCode,
Body: objBody(&uapi.APIVersions{Versions: test.serverVersions}),
},
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
if test.sendErr != nil {
return nil, test.sendErr
}
header := http.Header{}
header.Set("Content-Type", runtime.ContentTypeJSON)
return &http.Response{StatusCode: test.statusCode, Header: header, Body: objBody(&uapi.APIVersions{Versions: test.serverVersions})}, nil
}),
}
c := discovery.NewDiscoveryClientForConfigOrDie(&rest.Config{})
c.RESTClient().(*rest.RESTClient).Client = fakeClient.Client
response, err := discovery.NegotiateVersion(c, test.requiredVersion, test.clientVersions)
if err == nil && test.expectErr != nil {
t.Errorf("expected error, got nil for [%s].", test.name)
}
if err != nil {
if test.expectErr == nil || !test.expectErr(err) {
t.Errorf("unexpected error for [%s]: %v.", test.name, err)
}
continue
}
if *response != *test.expectedVersion {
t.Errorf("%s: expected version %s, got %s.", test.name, test.expectedVersion, response)
}
}
}<|fim▁end|>
|
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.