ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3afd1765517480de2e082aab7ed0b326b450fa | import pandas as pd
import pickle
print('\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n')
print('Starting data labelling...')
with open('cache/data_labeler_vars.pickle', 'rb') as f:
lastIndex, datajobs = pickle.load(f)
print('Starting from index: ' + str(lastIndex))
for i in range(lastIndex, datajobs.shape[0]):
print('\n\n\n\n\n\n\n')
print('Current Index: ' + str(i))
print("JOB TITLE: " + datajobs.loc[i, "job_title"])
print("JOB DESCRIPTION:")
print(*datajobs.loc[i, "job_description"].split('.'), sep='\n')
print('Entry level job? (Y/n) \n Type Q to quit.')
while True:
response = input()
if response == 'Y':
#Save response as 1
datajobs.loc[i, "entry_level_q"] = 1
print('Response saved as yes.')
break
elif response == 'n':
#Save response as 0
datajobs.loc[i, "entry_level_q"] = 0
print('Response saved as no.')
break
elif response == 'Q':
print('Quiting...')
break
else:
print('Input not recognized. Please enter either Y for yes, n for no, or Q to quit.')
if response == 'Q':
break
lastIndex += 1
#Save updated lastIndex
with open('cache/data_labeler_vars.pickle', 'wb') as f:
pickle.dump((lastIndex, datajobs), f)
|
py | 1a3afdadfeb371be20d7a8e73d35853db2e87e5c | ##################
# jeu de nim #
# Mehdi #
# & #
# #
# #
##################
def affichage_tas(liste):
pass
def choix_allumettes(joueur):
#adapter au nom du joueur
return input(joueur+" choisis un nombre d'allumettes: ")
def choix_tas(joueur):
#adapter au nom du joueur
return input(joueur+" choisis un tas")
def affichage_allumettes(liste):
pass
# fonction initialisation
def initialisation(liste=[1,2,3,4,5,6], liste2=[1,2,3,4,5,6]):
joueur1=input("nom joueur1: ")
joueur2=input('nom joueur2: ')
total_alu=0
for i in range(liste):
total_alu = total_alu+ liste[i]*liste2[i]
#retourner une liste avec les donnees [joueur1, joueur2, liste ,liste2, total_alu]
return [joueur1, joueur2, liste, liste2, total_alu]
#fonction principale
def main():
joueur1, joueur2, liste, liste2, total_alu = initialisation()
# 0 veut dire joueur1, 1 veut dire joueur2
# mit a 1 pour commencer avec le joueur1
dernier_joueur = 1
joueur=0
while total_alu <0:
if dernier_joueur==1:
joueur=joueur1
dernier_joueur = 0
else:
joueur = joueur2
dernier_joueur=1
affichage_tas(liste)
affichage_allumettes(liste2)
tas = choix_tas(joueur1)
nb_alu = choix_allumettes(joueur1)
|
py | 1a3aff778c7a94d22deeab8e97a6f726acb83b03 | import sublime
import sublime_plugin
import os
from collections import OrderedDict, namedtuple
from hyperhelp.common import log, hh_syntax
from hyperhelp.core import help_index_list, lookup_help_topic
###----------------------------------------------------------------------------
# A representation of what is going to be linted.
LintTarget = namedtuple("LintTarget", [
"target_type", "pkg_info", "files"
])
# Linters produce an array of these tuples to indicate problems found in files.
# type can be one of "info", "warning" or "error".
LintResult = namedtuple("LintResult", [
"type", "file", "line", "column", "message"
])
###----------------------------------------------------------------------------
class LinterBase():
"""
The base class for all lint operations in the help linter.
"""
def __init__(self, pkg_info):
self.pkg_info = pkg_info
self.issues = list()
self.index_file = os.path.relpath(
pkg_info.index_file,
"Packages/%s/" % (self.pkg_info.doc_root))
def lint(self, view, file_name):
"""
This is invoked with a view that contains raw help text from the help
file, which is contained in the help index given in the constructor.
This will be invoked for each file to be linted.
"""
pass
def add(self, view, m_type, file, point, msg, *args):
"""
Add a result to the internal result list. point is the location that is
the focus of the error. If view is None, the point is ignored and the
issue is added at line 1, column 1.
"""
pos = view.rowcol(point) if view is not None else (0, 0)
msg = msg % args
self.issues.append(LintResult(m_type, file, pos[0] + 1, pos[1]+1, msg))
def add_index(self, m_type, msg, *args):
"""
Add a result that is focused on the help index. As there is no way to
know the proper location except by hand parsing the index, no view is
needed and the position of the issue is always row 1, column 1.
"""
return self.add(None, m_type, self.index_file, 0, msg, *args)
def results(self):
"""
This is invoked after all calls to the lint() method have finished to
collect the final results of the lint operation.
This should return a list of LintResult tuples that indicate the issues
that have been found or an empty list if there are no issues.
The default is to return the results instance variable.
"""
return self.issues
###----------------------------------------------------------------------------
def can_lint_view(view):
"""
Determine if the provided view can be the source of a lint. To be valid
the view must represent a hyperhelp data file that has a path rooted in the
Packages folder inside of a package whose help index is known.
"""
if (view is not None and view.file_name() is not None and
view.file_name().startswith(sublime.packages_path()) and
view.match_selector(0, "text.hyperhelp")):
name = os.path.relpath(view.file_name(), sublime.packages_path())
pkg_name = name[:name.index(os.sep)]
return pkg_name in help_index_list()
return False
def find_lint_target(view):
"""
Examine a given view and return a LintTarget that describes what is being
linted. None is returned if the view is not a valid lint target.
"""
if not can_lint_view(view):
return None
name = view.file_name()
parts = os.path.relpath(name, sublime.packages_path()).split(os.sep)
pkg_name = parts[0]
target = parts[-1]
pkg_info = help_index_list().get(pkg_name)
if view.match_selector(0, "text.hyperhelp.help"):
return LintTarget("single", pkg_info, [target])
return LintTarget("package", pkg_info, list(pkg_info.help_files))
def get_linters(target):
"""
Given a LintTarget, return back an array of all of the linters that should
be run for that target.
Some targets may only be run on the package as a whole while others may be
allowed on a file by file basis. The returned linters may also be affected
by user settings.
"""
linters = []
linters.append(MissingLinkAnchorLinter(target.pkg_info))
if target.target_type == "package":
linters.append(MissingHelpSourceLinter(target.pkg_info))
return linters
def get_lint_file(filename):
"""
Return a view that that contains the contents of the provided file name.
If the file is not aready loaded, it is loaded into a hidden view and that
is returned instead.
Can return None if the file is not open and cannot be loaded.
"""
for window in sublime.windows():
view = window.find_open_file(filename)
if view is not None:
return view
content = None
try:
with open(filename, 'r') as file:
content = file.read()
except:
pass
if content:
view = sublime.active_window().create_output_panel("_hha_tmp", True)
view.run_command("select_all")
view.run_command("left_delete")
view.run_command("append", {"characters": content})
view.assign_syntax(hh_syntax("HyperHelp.sublime-syntax"))
return view
return None
def format_lint(pkg_info, issues, window=None):
"""
Takes a list of LintResult issues for a package and returns back output
suitable for passing to display_lint().
If a window is provided, display_lint() is called prior to returning in
order to display the output first.
"""
files = OrderedDict()
for issue in issues:
if issue.file not in files:
files[issue.file] = []
files[issue.file].append(issue)
output = ["Linting in help package: %s\n" % pkg_info.package]
warn = 0
err = 0
for file in files:
output.append("%s:" % file)
for issue in files[file]:
issue_pos = "%d:%d" % (issue.line, issue.column)
output.append(" %-7s @ %-7s %s" % (
issue.type, issue_pos, issue.message))
if issue.type == "warning":
warn += 1
elif issue.type == "error":
err += 1
output.append("")
output.append("%d warning%s, %d error%s" % (
warn,
"" if warn == 1 else "s",
err,
"" if err == 1 else "s"))
if window:
display_lint(window, pkg_info, output)
return output
def display_lint(window, pkg_info, output):
"""
Display the lint output provided into the given window. The output is
assumed to have been generated from the provided package, which is used to
know where the help files are located.
"""
view = window.create_output_panel("HyperHelpAuthor Lint", False)
basedir = os.path.join(sublime.packages_path(), pkg_info.doc_root)
if not isinstance(output, str):
output = "\n".join(output)
view.assign_syntax(hh_syntax("HyperHelpLinter.sublime-syntax"))
settings = view.settings()
settings.set("result_base_dir", basedir)
settings.set("result_file_regex", r"^([^:]+):$")
settings.set("result_line_regex", r"^.*?@ (\d+):(\d+)\s+(.*)$")
view.set_read_only(False)
view.run_command("append", {"characters": output})
view.set_read_only(True)
window.run_command("show_panel", {"panel": "output.HyperHelpAuthor Lint"})
###----------------------------------------------------------------------------
class MissingLinkAnchorLinter(LinterBase):
"""
Lint one or more help files to find all links that are currently broken
because their targets are not known.
"""
def lint(self, view, file_name):
topics = self.pkg_info.help_topics
regions = view.find_by_selector("meta.link, meta.anchor")
for pos in regions:
link = view.substr(pos)
if lookup_help_topic(self.pkg_info, link) is not None:
continue
stub = "link references unknown anchor '%s'"
if view.match_selector(pos.begin(), "meta.anchor"):
stub = "anchor '%s' is not in the help index"
self.add(view, "warning", file_name, pos.begin(),
stub % link.replace("\t", " "))
class MissingHelpSourceLinter(LinterBase):
"""
Lint the help index to determine if the list of help files listed in the
index matches the list of help files that exist for the package.
"""
def __init__(self, pkg_info):
super().__init__(pkg_info)
root = "Packages/%s/" % (self.pkg_info.doc_root)
d_files = {file[len(root):] for file in sublime.find_resources("*.txt")
if file.startswith(root)}
i_files = {key for key in self.pkg_info.help_files.keys()}
for file in d_files - i_files:
self.add_index(
"warning",
"Help file '%s' is in Packages/%s/ but missing from the index",
file, self.pkg_info.doc_root)
for file in i_files - d_files:
self.add_index(
"error",
"Help file '%s' is in the index but not in Packages/%s/",
file, self.pkg_info.doc_root)
###----------------------------------------------------------------------------
|
py | 1a3b012315509cc1270a39f424c533698297fea0 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create two boxes and interpolate between them
#
pts = vtk.vtkPoints()
pts.InsertNextPoint(-1,-1,-1)
pts.InsertNextPoint(1,-1,-1)
pts.InsertNextPoint(1,1,-1)
pts.InsertNextPoint(-1,1,-1)
pts.InsertNextPoint(-1,-1,1)
pts.InsertNextPoint(1,-1,1)
pts.InsertNextPoint(1,1,1)
pts.InsertNextPoint(-1,1,1)
faces = vtk.vtkCellArray()
faces.InsertNextCell(4)
faces.InsertCellPoint(0)
faces.InsertCellPoint(3)
faces.InsertCellPoint(2)
faces.InsertCellPoint(1)
faces.InsertNextCell(4)
faces.InsertCellPoint(4)
faces.InsertCellPoint(5)
faces.InsertCellPoint(6)
faces.InsertCellPoint(7)
faces.InsertNextCell(4)
faces.InsertCellPoint(0)
faces.InsertCellPoint(1)
faces.InsertCellPoint(5)
faces.InsertCellPoint(4)
faces.InsertNextCell(4)
faces.InsertCellPoint(1)
faces.InsertCellPoint(2)
faces.InsertCellPoint(6)
faces.InsertCellPoint(5)
faces.InsertNextCell(4)
faces.InsertCellPoint(2)
faces.InsertCellPoint(3)
faces.InsertCellPoint(7)
faces.InsertCellPoint(6)
faces.InsertNextCell(4)
faces.InsertCellPoint(3)
faces.InsertCellPoint(0)
faces.InsertCellPoint(4)
faces.InsertCellPoint(7)
faceColors = vtk.vtkUnsignedCharArray()
faceColors.SetNumberOfComponents(3)
faceColors.SetNumberOfTuples(3)
faceColors.InsertComponent(0,0,255)
faceColors.InsertComponent(0,1,0)
faceColors.InsertComponent(0,2,0)
faceColors.InsertComponent(1,0,0)
faceColors.InsertComponent(1,1,255)
faceColors.InsertComponent(1,2,0)
faceColors.InsertComponent(2,0,255)
faceColors.InsertComponent(2,1,255)
faceColors.InsertComponent(2,2,0)
faceColors.InsertComponent(3,0,0)
faceColors.InsertComponent(3,1,0)
faceColors.InsertComponent(3,2,255)
faceColors.InsertComponent(4,0,255)
faceColors.InsertComponent(4,1,0)
faceColors.InsertComponent(4,2,255)
faceColors.InsertComponent(5,0,0)
faceColors.InsertComponent(5,1,255)
faceColors.InsertComponent(5,2,255)
cube = vtk.vtkPolyData()
cube.SetPoints(pts)
cube.SetPolys(faces)
cube.GetCellData().SetScalars(faceColors)
t1 = vtk.vtkTransform()
t1.Translate(1,2,3)
t1.RotateX(15)
t1.Scale(4,2,1)
tpdf1 = vtk.vtkTransformPolyDataFilter()
tpdf1.SetInputData(cube)
tpdf1.SetTransform(t1)
cube1Mapper = vtk.vtkPolyDataMapper()
cube1Mapper.SetInputConnection(tpdf1.GetOutputPort())
cube1 = vtk.vtkActor()
cube1.SetMapper(cube1Mapper)
t2 = vtk.vtkTransform()
t2.Translate(5,10,15)
t2.RotateX(22.5)
t2.RotateY(15)
t2.RotateZ(85)
t2.Scale(1,2,4)
tpdf2 = vtk.vtkTransformPolyDataFilter()
tpdf2.SetInputData(cube)
tpdf2.SetTransform(t2)
cube2Mapper = vtk.vtkPolyDataMapper()
cube2Mapper.SetInputConnection(tpdf2.GetOutputPort())
cube2 = vtk.vtkActor()
cube2.SetMapper(cube2Mapper)
t3 = vtk.vtkTransform()
t3.Translate(5,-10,15)
t3.RotateX(13)
t3.RotateY(72)
t3.RotateZ(-15)
t3.Scale(2,4,1)
tpdf3 = vtk.vtkTransformPolyDataFilter()
tpdf3.SetInputData(cube)
tpdf3.SetTransform(t3)
cube3Mapper = vtk.vtkPolyDataMapper()
cube3Mapper.SetInputConnection(tpdf3.GetOutputPort())
cube3 = vtk.vtkActor()
cube3.SetMapper(cube3Mapper)
t4 = vtk.vtkTransform()
t4.Translate(10,-5,5)
t4.RotateX(66)
t4.RotateY(19)
t4.RotateZ(24)
t4.Scale(2,.5,1)
tpdf4 = vtk.vtkTransformPolyDataFilter()
tpdf4.SetInputData(cube)
tpdf4.SetTransform(t4)
cube4Mapper = vtk.vtkPolyDataMapper()
cube4Mapper.SetInputConnection(tpdf4.GetOutputPort())
cube4 = vtk.vtkActor()
cube4.SetMapper(cube4Mapper)
# Interpolate the transformation
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputData(cube)
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
# Interpolate some transformations, test along the way
interpolator = vtk.vtkTransformInterpolator()
#interpolator SetInterpolationTypeToLinear
interpolator.SetInterpolationTypeToSpline()
interpolator.AddTransform(0.0,cube1)
interpolator.AddTransform(8.0,cube2)
interpolator.AddTransform(18.2,cube3)
interpolator.AddTransform(24.4,cube4)
interpolator.Initialize()
#puts [interpolator GetNumberOfTransforms]
interpolator.AddTransform(0.0,t1)
interpolator.AddTransform(8.0,t2)
interpolator.AddTransform(18.2,t3)
interpolator.AddTransform(24.4,t4)
#puts [interpolator GetNumberOfTransforms]
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(cube1)
ren1.AddActor(cube2)
ren1.AddActor(cube3)
ren1.AddActor(cube4)
ren1.AddActor(cubeActor)
ren1.SetBackground(0,0,0)
renWin.SetSize(300,300)
ren1.SetBackground(0.1,0.2,0.4)
# render the image
#
camera = vtk.vtkCamera()
camera.SetClippingRange(31.2977,81.697)
camera.SetFocalPoint(3.0991,-2.00445,9.78648)
camera.SetPosition(-44.8481,-25.871,10.0645)
camera.SetViewAngle(30)
camera.SetViewUp(-0.0356378,0.0599728,-0.997564)
ren1.SetActiveCamera(camera)
renWin.Render()
# prevent the tk window from showing up then start the event loop
xform = vtk.vtkTransform()
def animate():
numSteps = 250
min = interpolator.GetMinimumT()
max = interpolator.GetMaximumT()
i = 0
while i <= numSteps:
t = float(i)*(max-min)/float(numSteps)
interpolator.InterpolateTransform(t,xform)
cubeActor.SetUserMatrix(xform.GetMatrix())
renWin.Render()
i = i + 1
interpolator.InterpolateTransform(13.2,xform)
cubeActor.SetUserMatrix(xform.GetMatrix())
renWin.Render()
#animate()
# --- end of script --
|
py | 1a3b019dc1c4eacadc8b32c71fe8be029b7ee3c2 | import factory
import app.factories.common as common
from app.factories.event import EventFactoryBasic
from app.models.tax import db, Tax
class TaxFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Tax
sqlalchemy_session = db.session
event = factory.RelatedFactory(EventFactoryBasic)
country = common.country_
name = common.string_
rate = common.float_
tax_id = "123456789"
should_send_invoice = False
registered_company = common.string_
address = common.string_
city = common.string_
state = common.string_
zip = "123456"
invoice_footer = common.string_
is_tax_included_in_price = False
event_id = 1
|
py | 1a3b031605e135056b9f1a36d6dc3974a43b8846 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.23.6
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1EndpointSlice(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'address_type': 'str',
'api_version': 'str',
'endpoints': 'list[V1Endpoint]',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'ports': 'list[DiscoveryV1EndpointPort]'
}
attribute_map = {
'address_type': 'addressType',
'api_version': 'apiVersion',
'endpoints': 'endpoints',
'kind': 'kind',
'metadata': 'metadata',
'ports': 'ports'
}
def __init__(self, address_type=None, api_version=None, endpoints=None, kind=None, metadata=None, ports=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointSlice - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._address_type = None
self._api_version = None
self._endpoints = None
self._kind = None
self._metadata = None
self._ports = None
self.discriminator = None
self.address_type = address_type
if api_version is not None:
self.api_version = api_version
self.endpoints = endpoints
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if ports is not None:
self.ports = ports
@property
def address_type(self):
"""Gets the address_type of this V1EndpointSlice. # noqa: E501
addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. # noqa: E501
:return: The address_type of this V1EndpointSlice. # noqa: E501
:rtype: str
"""
return self._address_type
@address_type.setter
def address_type(self, address_type):
"""Sets the address_type of this V1EndpointSlice.
addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. # noqa: E501
:param address_type: The address_type of this V1EndpointSlice. # noqa: E501
:type address_type: str
"""
if self.local_vars_configuration.client_side_validation and address_type is None: # noqa: E501
raise ValueError("Invalid value for `address_type`, must not be `None`") # noqa: E501
self._address_type = address_type
@property
def api_version(self):
"""Gets the api_version of this V1EndpointSlice. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1EndpointSlice. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1EndpointSlice.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1EndpointSlice. # noqa: E501
:type api_version: str
"""
self._api_version = api_version
@property
def endpoints(self):
"""Gets the endpoints of this V1EndpointSlice. # noqa: E501
endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints. # noqa: E501
:return: The endpoints of this V1EndpointSlice. # noqa: E501
:rtype: list[V1Endpoint]
"""
return self._endpoints
@endpoints.setter
def endpoints(self, endpoints):
"""Sets the endpoints of this V1EndpointSlice.
endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints. # noqa: E501
:param endpoints: The endpoints of this V1EndpointSlice. # noqa: E501
:type endpoints: list[V1Endpoint]
"""
if self.local_vars_configuration.client_side_validation and endpoints is None: # noqa: E501
raise ValueError("Invalid value for `endpoints`, must not be `None`") # noqa: E501
self._endpoints = endpoints
@property
def kind(self):
"""Gets the kind of this V1EndpointSlice. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1EndpointSlice. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1EndpointSlice.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1EndpointSlice. # noqa: E501
:type kind: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1EndpointSlice. # noqa: E501
:return: The metadata of this V1EndpointSlice. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1EndpointSlice.
:param metadata: The metadata of this V1EndpointSlice. # noqa: E501
:type metadata: V1ObjectMeta
"""
self._metadata = metadata
@property
def ports(self):
"""Gets the ports of this V1EndpointSlice. # noqa: E501
ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports. # noqa: E501
:return: The ports of this V1EndpointSlice. # noqa: E501
:rtype: list[DiscoveryV1EndpointPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1EndpointSlice.
ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports. # noqa: E501
:param ports: The ports of this V1EndpointSlice. # noqa: E501
:type ports: list[DiscoveryV1EndpointPort]
"""
self._ports = ports
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointSlice):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointSlice):
return True
return self.to_dict() != other.to_dict()
|
py | 1a3b035b45027d0b18146e6675147a54f4f87694 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.io_cert_manager_v1beta1_certificate_list import IoCertManagerV1beta1CertificateList # noqa: E501
from kubernetes.client.rest import ApiException
class TestIoCertManagerV1beta1CertificateList(unittest.TestCase):
"""IoCertManagerV1beta1CertificateList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IoCertManagerV1beta1CertificateList
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.io_cert_manager_v1beta1_certificate_list.IoCertManagerV1beta1CertificateList() # noqa: E501
if include_optional :
return IoCertManagerV1beta1CertificateList(
api_version = '0',
items = [
kubernetes.client.models.io/cert_manager/v1beta1/certificate.io.cert-manager.v1beta1.Certificate(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta_v2.v1.ObjectMeta_v2(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference_v2.v1.OwnerReference_v2(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
spec = kubernetes.client.models.io_cert_manager_v1beta1_certificate_spec.io_cert_manager_v1beta1_Certificate_spec(
common_name = '0',
dns_names = [
'0'
],
duration = '0',
email_sa_ns = [
'0'
],
encode_usages_in_request = True,
ip_addresses = [
'0'
],
is_ca = True,
issuer_ref = kubernetes.client.models.io_cert_manager_v1_certificate_spec_issuer_ref.io_cert_manager_v1_Certificate_spec_issuerRef(
group = '0',
kind = '0',
name = '0', ),
keystores = kubernetes.client.models.io_cert_manager_v1alpha2_certificate_spec_keystores.io_cert_manager_v1alpha2_Certificate_spec_keystores(
jks = kubernetes.client.models.io_cert_manager_v1alpha2_certificate_spec_keystores_jks.io_cert_manager_v1alpha2_Certificate_spec_keystores_jks(
create = True,
password_secret_ref = kubernetes.client.models.io_cert_manager_v1_certificate_spec_keystores_jks_password_secret_ref.io_cert_manager_v1_Certificate_spec_keystores_jks_passwordSecretRef(
key = '0',
name = '0', ), ),
pkcs12 = kubernetes.client.models.io_cert_manager_v1alpha2_certificate_spec_keystores_pkcs12.io_cert_manager_v1alpha2_Certificate_spec_keystores_pkcs12(
create = True,
password_secret_ref = kubernetes.client.models.io_cert_manager_v1_certificate_spec_keystores_pkcs12_password_secret_ref.io_cert_manager_v1_Certificate_spec_keystores_pkcs12_passwordSecretRef(
key = '0',
name = '0', ), ), ),
private_key = kubernetes.client.models.io_cert_manager_v1beta1_certificate_spec_private_key.io_cert_manager_v1beta1_Certificate_spec_privateKey(
algorithm = 'RSA',
encoding = 'PKCS1',
rotation_policy = '0',
size = 56, ),
renew_before = '0',
revision_history_limit = 56,
secret_name = '0',
secret_template = kubernetes.client.models.io_cert_manager_v1_certificate_spec_secret_template.io_cert_manager_v1_Certificate_spec_secretTemplate(),
subject = kubernetes.client.models.io_cert_manager_v1_certificate_spec_subject.io_cert_manager_v1_Certificate_spec_subject(
countries = [
'0'
],
localities = [
'0'
],
organizational_units = [
'0'
],
organizations = [
'0'
],
postal_codes = [
'0'
],
provinces = [
'0'
],
serial_number = '0',
street_addresses = [
'0'
], ),
uri_sa_ns = [
'0'
],
usages = [
'signing'
], ),
status = kubernetes.client.models.io_cert_manager_v1_certificate_status.io_cert_manager_v1_Certificate_status(
conditions = [
kubernetes.client.models.io_cert_manager_v1_certificate_status_conditions.io_cert_manager_v1_Certificate_status_conditions(
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
observed_generation = 56,
reason = '0',
status = 'True',
type = '0', )
],
last_failure_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
next_private_key_secret_name = '0',
not_after = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
not_before = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
renewal_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
revision = 56, ), )
],
kind = '0',
metadata = kubernetes.client.models.v1/list_meta.v1.ListMeta(
continue = '0',
remaining_item_count = 56,
resource_version = '0',
self_link = '0', )
)
else :
return IoCertManagerV1beta1CertificateList(
items = [
kubernetes.client.models.io/cert_manager/v1beta1/certificate.io.cert-manager.v1beta1.Certificate(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta_v2.v1.ObjectMeta_v2(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference_v2.v1.OwnerReference_v2(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
spec = kubernetes.client.models.io_cert_manager_v1beta1_certificate_spec.io_cert_manager_v1beta1_Certificate_spec(
common_name = '0',
dns_names = [
'0'
],
duration = '0',
email_sa_ns = [
'0'
],
encode_usages_in_request = True,
ip_addresses = [
'0'
],
is_ca = True,
issuer_ref = kubernetes.client.models.io_cert_manager_v1_certificate_spec_issuer_ref.io_cert_manager_v1_Certificate_spec_issuerRef(
group = '0',
kind = '0',
name = '0', ),
keystores = kubernetes.client.models.io_cert_manager_v1alpha2_certificate_spec_keystores.io_cert_manager_v1alpha2_Certificate_spec_keystores(
jks = kubernetes.client.models.io_cert_manager_v1alpha2_certificate_spec_keystores_jks.io_cert_manager_v1alpha2_Certificate_spec_keystores_jks(
create = True,
password_secret_ref = kubernetes.client.models.io_cert_manager_v1_certificate_spec_keystores_jks_password_secret_ref.io_cert_manager_v1_Certificate_spec_keystores_jks_passwordSecretRef(
key = '0',
name = '0', ), ),
pkcs12 = kubernetes.client.models.io_cert_manager_v1alpha2_certificate_spec_keystores_pkcs12.io_cert_manager_v1alpha2_Certificate_spec_keystores_pkcs12(
create = True,
password_secret_ref = kubernetes.client.models.io_cert_manager_v1_certificate_spec_keystores_pkcs12_password_secret_ref.io_cert_manager_v1_Certificate_spec_keystores_pkcs12_passwordSecretRef(
key = '0',
name = '0', ), ), ),
private_key = kubernetes.client.models.io_cert_manager_v1beta1_certificate_spec_private_key.io_cert_manager_v1beta1_Certificate_spec_privateKey(
algorithm = 'RSA',
encoding = 'PKCS1',
rotation_policy = '0',
size = 56, ),
renew_before = '0',
revision_history_limit = 56,
secret_name = '0',
secret_template = kubernetes.client.models.io_cert_manager_v1_certificate_spec_secret_template.io_cert_manager_v1_Certificate_spec_secretTemplate(),
subject = kubernetes.client.models.io_cert_manager_v1_certificate_spec_subject.io_cert_manager_v1_Certificate_spec_subject(
countries = [
'0'
],
localities = [
'0'
],
organizational_units = [
'0'
],
organizations = [
'0'
],
postal_codes = [
'0'
],
provinces = [
'0'
],
serial_number = '0',
street_addresses = [
'0'
], ),
uri_sa_ns = [
'0'
],
usages = [
'signing'
], ),
status = kubernetes.client.models.io_cert_manager_v1_certificate_status.io_cert_manager_v1_Certificate_status(
conditions = [
kubernetes.client.models.io_cert_manager_v1_certificate_status_conditions.io_cert_manager_v1_Certificate_status_conditions(
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
observed_generation = 56,
reason = '0',
status = 'True',
type = '0', )
],
last_failure_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
next_private_key_secret_name = '0',
not_after = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
not_before = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
renewal_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
revision = 56, ), )
],
)
def testIoCertManagerV1beta1CertificateList(self):
"""Test IoCertManagerV1beta1CertificateList"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
py | 1a3b0364b77c3415bf6f9be676f40ca8ea091de4 | #!/usr/bin/env python3
"""Shows the crash in the faucet log produced by given input"""
import logging
import os
import sys
from faucet import faucet
from ryu.controller import dpset
from faucet import faucet_experimental_api
import Fake
def main():
# go through all files in directory
# read file and store in string
with open(sys.argv[1]) as pkt:
packet_data = pkt.read()
# start faucet
application = faucet.Faucet(dpset=dpset.DPSet(), faucet_experimental_api=faucet_experimental_api.FaucetExperimentalAPI())
application.start()
# make sure dps are running
for dp_id, valve in list(application.valves.items()):
valve.dp.running = True
# create data from read file
byte_data = None
try:
byte_data = bytearray.fromhex(packet_data)
except (ValueError, TypeError):
pass
if byte_data is not None:
# create fake packet
dp = Fake.Datapath(1)
msg = Fake.Message(datapath=dp, cookie=1524372928, port=1, data=byte_data, in_port=1)
pkt = Fake.RyuEvent(msg)
# send packet to faucet and display error produced
application.packet_in_handler(pkt)
if __name__ == "__main__":
# make sure user specifies the afl crash folder
if len(sys.argv) == 2:
main()
else:
print('USAGE: python3 display_packet_crash.py <AFL_CRASH_FILE>')
os._exit(0)
|
py | 1a3b0415dc97031a15aa0da89aa13a260bdaf957 | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from mlrun.api import schemas
class DBError(Exception):
pass
class DBInterface(ABC):
@abstractmethod
def initialize(self, session):
pass
@abstractmethod
def store_log(
self, session, uid, project="", body=None, append=False,
):
pass
@abstractmethod
def get_log(self, session, uid, project="", offset=0, size=0):
pass
@abstractmethod
def store_run(
self, session, struct, uid, project="", iter=0,
):
pass
@abstractmethod
def update_run(self, session, updates: dict, uid, project="", iter=0):
pass
@abstractmethod
def read_run(self, session, uid, project="", iter=0):
pass
@abstractmethod
def list_runs(
self,
session,
name="",
uid=None,
project="",
labels=None,
states=None,
sort=True,
last=0,
iter=False,
start_time_from=None,
start_time_to=None,
last_update_time_from=None,
last_update_time_to=None,
partition_by: schemas.RunPartitionByField = None,
rows_per_partition: int = 1,
partition_sort_by: schemas.SortField = None,
partition_order: schemas.OrderType = schemas.OrderType.desc,
):
pass
@abstractmethod
def del_run(self, session, uid, project="", iter=0):
pass
@abstractmethod
def del_runs(self, session, name="", project="", labels=None, state="", days_ago=0):
pass
@abstractmethod
def store_artifact(
self, session, key, artifact, uid, iter=None, tag="", project="",
):
pass
@abstractmethod
def read_artifact(self, session, key, tag="", iter=None, project=""):
pass
@abstractmethod
def list_artifacts(
self,
session,
name="",
project="",
tag="",
labels=None,
since=None,
until=None,
kind=None,
category: schemas.ArtifactCategories = None,
iter: int = None,
best_iteration: bool = False,
):
pass
@abstractmethod
def del_artifact(self, session, key, tag="", project=""):
pass
@abstractmethod
def del_artifacts(self, session, name="", project="", tag="", labels=None):
pass
# TODO: Make these abstract once filedb implements them
def store_metric(
self, session, uid, project="", keyvals=None, timestamp=None, labels=None
):
warnings.warn("store_metric not implemented yet")
def read_metric(self, session, keys, project="", query=""):
warnings.warn("store_metric not implemented yet")
@abstractmethod
def store_function(
self, session, function, name, project="", tag="", versioned=False,
) -> str:
pass
@abstractmethod
def get_function(self, session, name, project="", tag="", hash_key=""):
pass
@abstractmethod
def delete_function(self, session, project: str, name: str):
pass
@abstractmethod
def list_functions(self, session, name=None, project="", tag="", labels=None):
pass
@abstractmethod
def create_schedule(
self,
session,
project: str,
name: str,
kind: schemas.ScheduleKinds,
scheduled_object: Any,
cron_trigger: schemas.ScheduleCronTrigger,
concurrency_limit: int,
labels: Dict = None,
):
pass
@abstractmethod
def update_schedule(
self,
session,
project: str,
name: str,
scheduled_object: Any = None,
cron_trigger: schemas.ScheduleCronTrigger = None,
labels: Dict = None,
last_run_uri: str = None,
concurrency_limit: int = None,
):
pass
@abstractmethod
def list_schedules(
self,
session,
project: str = None,
name: str = None,
labels: str = None,
kind: schemas.ScheduleKinds = None,
) -> List[schemas.ScheduleRecord]:
pass
@abstractmethod
def get_schedule(self, session, project: str, name: str) -> schemas.ScheduleRecord:
pass
@abstractmethod
def delete_schedule(self, session, project: str, name: str):
pass
@abstractmethod
def delete_schedules(self, session, project: str):
pass
@abstractmethod
def generate_projects_summaries(
self, session, projects: List[str]
) -> List[schemas.ProjectSummary]:
pass
@abstractmethod
def delete_project_related_resources(self, session, name: str):
pass
@abstractmethod
def verify_project_has_no_related_resources(self, session, name: str):
pass
@abstractmethod
# adding **kwargs to leave room for other projects store implementations see mlrun.api.crud.projects.delete_project
# for explanations
def is_project_exists(self, session, name: str, **kwargs):
pass
@abstractmethod
def list_projects(
self,
session,
owner: str = None,
format_: schemas.ProjectsFormat = schemas.ProjectsFormat.full,
labels: List[str] = None,
state: schemas.ProjectState = None,
names: Optional[List[str]] = None,
) -> schemas.ProjectsOutput:
pass
@abstractmethod
def get_project(
self, session, name: str = None, project_id: int = None
) -> schemas.Project:
pass
@abstractmethod
async def get_project_resources_counters(
self,
) -> Tuple[
Dict[str, int],
Dict[str, int],
Dict[str, int],
Dict[str, int],
Dict[str, int],
Dict[str, int],
]:
pass
@abstractmethod
def create_project(self, session, project: schemas.Project):
pass
@abstractmethod
def store_project(self, session, name: str, project: schemas.Project):
pass
@abstractmethod
def patch_project(
self,
session,
name: str,
project: dict,
patch_mode: schemas.PatchMode = schemas.PatchMode.replace,
):
pass
@abstractmethod
def delete_project(
self,
session,
name: str,
deletion_strategy: schemas.DeletionStrategy = schemas.DeletionStrategy.default(),
):
pass
@abstractmethod
def create_feature_set(
self, session, project, feature_set: schemas.FeatureSet, versioned=True,
) -> str:
pass
@abstractmethod
def store_feature_set(
self,
session,
project,
name,
feature_set: schemas.FeatureSet,
tag=None,
uid=None,
versioned=True,
always_overwrite=False,
) -> str:
pass
@abstractmethod
def get_feature_set(
self, session, project: str, name: str, tag: str = None, uid: str = None
) -> schemas.FeatureSet:
pass
@abstractmethod
def list_features(
self,
session,
project: str,
name: str = None,
tag: str = None,
entities: List[str] = None,
labels: List[str] = None,
) -> schemas.FeaturesOutput:
pass
@abstractmethod
def list_entities(
self,
session,
project: str,
name: str = None,
tag: str = None,
labels: List[str] = None,
) -> schemas.EntitiesOutput:
pass
@abstractmethod
def list_feature_sets(
self,
session,
project: str,
name: str = None,
tag: str = None,
state: str = None,
entities: List[str] = None,
features: List[str] = None,
labels: List[str] = None,
partition_by: schemas.FeatureStorePartitionByField = None,
rows_per_partition: int = 1,
partition_sort_by: schemas.SortField = None,
partition_order: schemas.OrderType = schemas.OrderType.desc,
) -> schemas.FeatureSetsOutput:
pass
@abstractmethod
def list_feature_sets_tags(
self, session, project: str,
) -> List[Tuple[str, str, str]]:
"""
:return: a list of Tuple of (project, feature_set.name, tag)
"""
pass
@abstractmethod
def patch_feature_set(
self,
session,
project,
name,
feature_set_patch: dict,
tag=None,
uid=None,
patch_mode: schemas.PatchMode = schemas.PatchMode.replace,
) -> str:
pass
@abstractmethod
def delete_feature_set(self, session, project, name, tag=None, uid=None):
pass
@abstractmethod
def create_feature_vector(
self, session, project, feature_vector: schemas.FeatureVector, versioned=True,
) -> str:
pass
@abstractmethod
def get_feature_vector(
self, session, project: str, name: str, tag: str = None, uid: str = None
) -> schemas.FeatureVector:
pass
@abstractmethod
def list_feature_vectors(
self,
session,
project: str,
name: str = None,
tag: str = None,
state: str = None,
labels: List[str] = None,
partition_by: schemas.FeatureStorePartitionByField = None,
rows_per_partition: int = 1,
partition_sort_by: schemas.SortField = None,
partition_order: schemas.OrderType = schemas.OrderType.desc,
) -> schemas.FeatureVectorsOutput:
pass
@abstractmethod
def list_feature_vectors_tags(
self, session, project: str,
) -> List[Tuple[str, str, str]]:
"""
:return: a list of Tuple of (project, feature_vector.name, tag)
"""
pass
@abstractmethod
def store_feature_vector(
self,
session,
project,
name,
feature_vector: schemas.FeatureVector,
tag=None,
uid=None,
versioned=True,
always_overwrite=False,
) -> str:
pass
@abstractmethod
def patch_feature_vector(
self,
session,
project,
name,
feature_vector_update: dict,
tag=None,
uid=None,
patch_mode: schemas.PatchMode = schemas.PatchMode.replace,
) -> str:
pass
@abstractmethod
def delete_feature_vector(
self, session, project, name, tag=None, uid=None,
):
pass
def list_artifact_tags(self, session, project):
return []
def create_marketplace_source(
self, session, ordered_source: schemas.IndexedMarketplaceSource
):
pass
def store_marketplace_source(
self, session, name, ordered_source: schemas.IndexedMarketplaceSource
):
pass
def list_marketplace_sources(
self, session
) -> List[schemas.IndexedMarketplaceSource]:
pass
def delete_marketplace_source(self, session, name):
pass
def get_marketplace_source(self, session, name) -> schemas.IndexedMarketplaceSource:
pass
|
py | 1a3b051e3cb272b857cb77efcdd0ef9b70dfe7b1 | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Optional
from prometheus_client import Counter
from synapse.logging.context import make_deferred_yieldable
from synapse.util import json_decoder, json_encoder
if TYPE_CHECKING:
from synapse.server import HomeServer
set_counter = Counter(
"synapse_external_cache_set",
"Number of times we set a cache",
labelnames=["cache_name"],
)
get_counter = Counter(
"synapse_external_cache_get",
"Number of times we get a cache",
labelnames=["cache_name", "hit"],
)
logger = logging.getLogger(__name__)
class ExternalCache:
"""A cache backed by an external Redis. Does nothing if no Redis is
configured.
"""
def __init__(self, hs: "HomeServer"):
self._redis_connection = hs.get_outbound_redis_connection()
def _get_redis_key(self, cache_name: str, key: str) -> str:
return "cache_v1:%s:%s" % (cache_name, key)
def is_enabled(self) -> bool:
"""Whether the external cache is used or not.
It's safe to use the cache when this returns false, the methods will
just no-op, but the function is useful to avoid doing unnecessary work.
"""
return self._redis_connection is not None
async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> None:
"""Add the key/value to the named cache, with the expiry time given."""
if self._redis_connection is None:
return
set_counter.labels(cache_name).inc()
# txredisapi requires the value to be string, bytes or numbers, so we
# encode stuff in JSON.
encoded_value = json_encoder.encode(value)
logger.debug("Caching %s %s: %r", cache_name, key, encoded_value)
return await make_deferred_yieldable(
self._redis_connection.set(
self._get_redis_key(cache_name, key),
encoded_value,
pexpire=expiry_ms,
)
)
async def get(self, cache_name: str, key: str) -> Optional[Any]:
"""Look up a key/value in the named cache."""
if self._redis_connection is None:
return None
result = await make_deferred_yieldable(
self._redis_connection.get(self._get_redis_key(cache_name, key))
)
logger.debug("Got cache result %s %s: %r", cache_name, key, result)
get_counter.labels(cache_name, result is not None).inc()
if not result:
return None
# For some reason the integers get magically converted back to integers
if isinstance(result, int):
return result
return json_decoder.decode(result)
|
py | 1a3b05d93297931d9a07f0d409ff4332d52b0801 | import logging
from django.contrib import auth
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin # https://stackoverflow.com/questions/42232606/django
# -exception-middleware-typeerror-object-takes-no-parameters
from django.conf import settings
from ...permissions import is_authenticated
from ...utils import set_session_key
from ... import app_settings
logger = logging.getLogger('django_sso_app')
ADMIN_URL = '/{}'.format(getattr(settings, 'ADMIN_URL', 'admin/'))
PROFILE_INCOMPLETE_ENABLED_PATHS = [
reverse('javascript-catalog'),
reverse('profile.complete'),
]
USER_TO_SUBSCRIBE_ENABLED_PATHS = PROFILE_INCOMPLETE_ENABLED_PATHS
class DjangoSsoAppAuthenticationBaseMiddleware(MiddlewareMixin):
"""
See django.contrib.auth.middleware.RemoteUserMiddleware.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
consumer_id_header = app_settings.APIGATEWAY_CONSUMER_CUSTOM_ID_HEADER
anonymous_consumer_custom_ids = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_IDS
anonymous_consumer_header = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_HEADER
anonymous_consumer_header_value = app_settings.APIGATEWAY_ANONYMOUS_CONSUMER_HEADER_VALUE
@staticmethod
def _clear_response_jwt(request):
set_session_key(request, '__dssoa__clear_response_jwt', True)
@staticmethod
def _remove_invalid_user(request):
"""
Removes the current authenticated user in the request which is invalid.
"""
if is_authenticated(request.user):
logger.info('removing invalid user "{}"'.format(request.user))
auth.logout(request)
@staticmethod
def _is_admin_path(request):
return request.path.startswith(ADMIN_URL)
@staticmethod
def _request_path_is_disabled_for_incomplete_users(request):
request_path = request.path
return (request_path not in PROFILE_INCOMPLETE_ENABLED_PATHS) and \
not (request_path.startswith('/static/')) and \
not (request_path.startswith('/media/')) and \
not (request_path.startswith('/logout/')) and \
not (request_path.startswith('/password/reset/')) and \
not (request_path.startswith('/confirm-email/')) and \
not (request_path.startswith('/__debug__/')) and \
not (request_path.startswith('/api/v1/')) # keep api endpoints enabled
@staticmethod
def _request_path_is_disabled_for_users_to_subscribe(request):
request_path = request.path
return (request_path not in USER_TO_SUBSCRIBE_ENABLED_PATHS) and \
not (request_path.startswith('/static/')) and \
not (request_path.startswith('/media/')) and \
not (request_path.startswith('/logout/')) and \
not (request_path.startswith('/password/reset/')) and \
not (request_path.startswith('/confirm-email/')) and \
not (request_path.startswith('/api/v1/')) # keep api endpoints enabled
def process_request(self, request):
raise NotImplementedError('process_request')
def process_response(self, request, response):
raise NotImplementedError('process_response')
|
py | 1a3b06540f2911ea8cdd67a75499f7f0910346fe |
import sys, os.path
sys.path.insert( 0, os.path.join( os.path.dirname( __file__ ), '..' ) )
import options
import builtin_options
del sys.path[0]
_Options = options.Options
_StrOption = options.StrOption
_IntOption = options.IntOption
_PathOption = options.PathOption
_EnumOption = options.EnumOption
_VersionOption = options.VersionOption
_BoolOption = options.BoolOption
_BuiltinOptions = builtin_options.BuiltinOptions
_EnvLinkedOptions = options.EnvLinkedOptions
options = _Options()
bool_opt = _BoolOption( 'false' )
options.bool_opt = bool_opt
options.bool_opt = bool_opt
options.bool_opt = bool_opt
options.bool_opt = 'true'
assert bool_opt == 'true'
a = 1
def a_condition2( options ):
global a
if a == 2:
return 1
else:
return 0
bool_opt.SetIf( a_condition2, 'false' )
def a_condition3( options ):
global a
if a == 3:
return 1
else:
return 0
bool_opt.SetIf( a_condition3, 'false' )
assert bool_opt == 'true'
a = 2
assert bool_opt == 'false'
a = 3
assert bool_opt == 'false'
a = 4
assert bool_opt == 'true'
#//---------------------------------------------------------------------------//
int_opt = _IntOption( 0, min = 0, max = 5, is_list = 1 )
options.int_opt = int_opt
options.int_opt = int_opt
int_opt.Append( 3 );
int_opt.Append( 5 );
int_opt.AppendIf( a_condition2, 4 )
int_opt.AppendIf( a_condition3, 1 )
int_opt.RemoveIf( a_condition3, 3 )
assert 3 in int_opt
assert 5 in int_opt
assert 4 not in int_opt
a = 2
assert 3 in int_opt
assert 4 in int_opt
a = 3
assert 1 in int_opt
assert 4 not in int_opt
assert 3 not in int_opt
#//===========================================================================//
#//===========================================================================//
opt = _BuiltinOptions()
assert opt.build_variant == 'debug'
opt.optim = 'size'
opt.If().optim['size'].debug_symbols = 'off'
assert opt.debug_symbols == 'off'
opt.If().optim['off'].inline = 'off'
c = opt.If().optim['size']
c.debug_symbols['off'].inline = 'on'
assert opt.inline == 'on'
c.lint = 'single'
assert opt.lint == 'single'
opt.If().optim['speed'].inline = 'full'
#~ opt.If().warn.ge(3).warn_err = 'true'
#~ opt.If().warn.ge(2).warn.le(4).warn_err = 'true'
#~ opt.If().warn(2,4).warn_err = 'true'
opt.If().warn_err['true'].warn = 4
opt.If().warn_err.eq('true').warn = 4
opt.If()['warn_err'].eq('true').warn = 4
opt.optim = 'off'
assert opt.debug_symbols == 'on'
assert opt.inline == 'off'
assert opt.lint == 'off'
opt.optim = 'size'
assert opt.inline == 'on'
assert opt.lint == 'single'
opt.optim = 'speed'
assert opt.inline == 'full'
assert opt.lint == 'off'
opt.defines = _StrOption( is_list = 1 )
opt.If().optim.ne('off').defines += 'RELEASE'
opt.If().optim['off'].defines += [ 'DEBUG' ]
opt.defines += 'PROF'
opt.optim = 'off'
assert opt.defines == [ 'DEBUG', 'PROF' ]
opt.optim = 'size'
assert opt.defines == [ 'RELEASE', 'PROF' ]
opt.optim = 'speed'
assert opt.defines == [ 'RELEASE', 'PROF' ]
opt.path = _PathOption( [], is_list = 1 )
opt.path += '../test/../src;test/..'
opt.path += '../test/../src/lib'
assert opt.path == [ '../src', '.', '../src/lib' ]
opt.build_variant = 'debug'
env1 = opt.LinkToKW( ccflags = '-O9', defines = 'USE_LOCAL_1', build_variant = 'release' )
env2 = opt.LinkToKW( ccflags = '-g', defines = 'USE_LOCAL_2' )
assert '-O9' not in opt.ccflags
assert '-g' not in opt.ccflags
assert 'USE_LOCAL_1' not in opt.defines
assert 'USE_LOCAL_2' not in opt.defines
assert opt.build_variant == 'debug'
env = env1.copy()
env.update( env2 )
env['_AQL_OPTIONS'] = opt
env_opt = _EnvLinkedOptions( env )
assert '-O9' in env_opt.ccflags
assert '-g' in env_opt.ccflags
assert 'USE_LOCAL_1' in env_opt.defines
assert 'USE_LOCAL_2' in env_opt.defines
assert env_opt.build_variant == 'release'
#~ assert opt.build_variant == 'debug'
#//===========================================================================//
opt.runtime_linking = 'shared'
opt.lint_passes = 3
env1 = {}
opt.LinkToEnv( env1 )
opt.runtime_linking = 'static'
assert opt.runtime_linking == 'shared'
env2 = env1.copy()
opt.LinkToEnv( env2 )
opt.lint_passes = 4
assert opt.runtime_linking == 'shared'
assert opt.lint_passes == 3
opt.UnlinkToEnv()
assert opt.runtime_linking == 'shared'
assert opt.lint_passes == 3
env2['_AQL_OPTIONS'] = opt
env_opt = _EnvLinkedOptions( env2 )
assert env_opt.runtime_linking == 'static'
assert env_opt.lint_passes == 4
assert opt.runtime_linking == 'shared'
assert opt.lint_passes == 3
opt.UnlinkToEnv()
assert opt.runtime_linking == 'shared'
assert opt.lint_passes == 3
#//===========================================================================//
assert '-O9' not in opt.ccflags
assert '-g' not in opt.ccflags
assert 'USE_LOCAL_1' not in opt.defines
assert 'USE_LOCAL_2' not in opt.defines
assert opt.build_variant == 'debug'
opt.defines += opt.build_variant
print opt.defines.Get()
assert opt.defines == 'RELEASE PROF debug'
opt.build_variant = 'release'
assert opt.defines == 'RELEASE PROF release_speed'
print opt.build_dir
assert str(opt.build_dir) == 'build//release_speed'
opt.cc_name = 'gcc'
opt.cc_ver = '4.2.1'
opt.target_os = 'linux'
opt.target_machine = 'arm'
assert str(opt.build_dir) == 'build/linux_arm_gcc-4.2.1/release_speed'
opt.target_os = 'linux'
opt.target_cpu = 'arm1136j-s'
assert str(opt.build_dir) == 'build/linux_arm-arm1136j-s_gcc-4.2.1/release_speed'
def BuildDir( options, bv_id ):
import os.path
bv = options.build_variant
bv.Set( bv_id )
build_dir = os.path.normpath( str( options.build_dir ) )
bv.Undo()
return build_dir
print opt.build_variant
print BuildDir( opt, 'debug' )
print BuildDir( opt, 'release_speed' )
print BuildDir( opt, 'release_size' )
print opt.build_variant
|
py | 1a3b0726792888d152edc1ac528ec7559542b36f | from ripsaw.genetics.selection import roulette
from ripsaw.genetics.crossovers import point_crossover
from ripsaw.genetics.genotype import Chromosome
from ripsaw.util.logging import Logger
import math
import time
import logging
import multiprocessing as mp
from datetime import datetime
class Optimiser:
def __init__(self, population_size, chromosome_function,
num_xovers, num_xover_points,
p_gene_mutate, p_total_mutate,
cwd, parallel_exe, exe_file_path, target_dir_path,
input_file_path, region_identifier,
output_score_func, output_file_path,
output_log_func, output_log_file,
target_score=math.inf, num_epochs=math.inf, max_time=math.inf,
population=list()):
# Object parameterisation
self.population_size = population_size
self.chromosome_function = chromosome_function
self.num_xovers = num_xovers
self.num_xover_points = num_xover_points
self.p_gene_mutate = p_gene_mutate
self.p_total_mutate = p_total_mutate
self.cwd = cwd
self.exe_file_path = exe_file_path
self.parallel_exe = parallel_exe
self.target_dir_path = target_dir_path
self.input_file_path = input_file_path
self.region_identifier = region_identifier
self.output_score_func = output_score_func
self.output_file_path = output_file_path
self.output_log_func = output_log_func
self.output_log_file = output_log_file
self.target_score = target_score
self.num_epochs = num_epochs
self.max_time = max_time
self.population = population
# Internal Fields
self.epoch_number = None
self.logger = None
self.best_score = None
self.mean_score = None
self.std_dev_score = None
self.internal_dict = {"epoch_num": 0}
@staticmethod
def evaluate(chromosome):
""" For the purposes of multiprocessing, this is a mapped function for a list of chromosomes."""
chromosome.evaluate()
return chromosome
@staticmethod
def sort_chromosome_key(chromosome):
""" Designed to put None before lowest fitness. None at the end was interfering with immortal logic on sort."""
fitness = chromosome.get_fitness()
if fitness is None:
return -math.inf
else:
return fitness
@staticmethod
def stopping_criteria_met(start_time, max_time, current_epoch, max_epochs, best_score, target_score):
""" We go through some stopping criteria. If any are met, True is returned."""
if best_score >= target_score:
return True
if time.time() - start_time > max_time:
return True
if current_epoch >= max_epochs:
return True
return False
def epoch(self, chromosomes=list()):
""" Going through the Evaluate -> Selection -> Crossover -> Mutation process once as an epoch."""
logging.debug("At start of epoch - Chromo fitness in order:" +
str([chromosome.fitness for chromosome in chromosomes]))
# 1. Generate new chromosomes for each missing
to_generate = self.population_size - len(chromosomes)
for _ in range(to_generate):
chromosomes.append(Chromosome(chromosome_function=self.chromosome_function))
# 2. Evaluate every chromosome which doesn't have a fitness.
for chromosome in chromosomes:
chromosome.setup(self.cwd, self.exe_file_path, self.target_dir_path,
self.input_file_path, self.region_identifier,
self.output_score_func, self.output_file_path,
self.output_log_func, self.output_log_file,
self.internal_dict
)
if self.parallel_exe:
with mp.Pool(int(mp.cpu_count())-2) as p:
chromosomes = p.map(Optimiser.evaluate, chromosomes)
else:
for chromosome in chromosomes:
chromosome.evaluate()
# 3. Logging
for chromosome in chromosomes:
optimiser_log = [self.internal_dict["epoch_num"]]
optimiser_log.extend(chromosome.get_log_row())
self.logger.log_to_csv(optimiser_log)
chromosomes.sort(key=Optimiser.sort_chromosome_key)
logging.debug("Before Crossover - Chromo fitness in order:" +
str([chromosome.fitness for chromosome in chromosomes]))
scores = [chromosome.get_fitness() for chromosome in chromosomes]
self.best_score = max(scores)
self.mean_score = sum([chromosome.get_fitness() for chromosome in chromosomes]) / self.population_size
self.std_dev_score = sum([abs(self.mean_score - score) for score in scores]) / self.population_size
# 4. Crossovers
selection = roulette(population=chromosomes, num_samples=self.num_xovers)
offspring = point_crossover(chromosomes=selection, num_points=self.num_xover_points)
chromosomes = chromosomes[len(offspring):] # Cull the weakest.
chromosomes.extend(offspring)
# 5. Mutate
chromosomes.sort(key=Optimiser.sort_chromosome_key)
logging.debug("After Crossover - Chromo fitness in order:" +
str([chromosome.fitness for chromosome in chromosomes]))
for i, chromosome in enumerate(chromosomes):
if chromosome.fitness != self.best_score: # The minus one offset is to protect the immortal.
chromosome.mutate(p_gene_mutate=self.p_gene_mutate,
p_total_mutate=self.p_total_mutate)
else:
logging.debug("Immortal protected, fitness:" + str(chromosomes[i].get_fitness()))
return chromosomes
def run(self):
""" In Charge of running epochs until a stopping criteria is met."""
self.best_score = -math.inf
start_time_s = time.time()
start_time_dt = datetime.now()
start_time_hhmmss = start_time_dt.strftime("%H:%M:%S")
self.logger = Logger()
print("Starting the optimiser...")
print("\tStart Time: ", start_time_hhmmss)
while Optimiser.stopping_criteria_met(start_time=start_time_s, max_time=self.max_time,
current_epoch=self.internal_dict["epoch_num"], max_epochs=self.num_epochs,
best_score=self.best_score, target_score=self.target_score) is not True:
self.population = self.epoch(chromosomes=self.population)
self.internal_dict["epoch_num"] += 1
current_time_dt = datetime.now()
print("Epoch", str(self.internal_dict["epoch_num"]), "done.")
print("\tBest score: ", self.best_score)
print("\tAverage Score: ", self.mean_score)
print("\tStandard Deviation: ", self.std_dev_score)
print("\tTime elapsed: ", current_time_dt - start_time_dt)
|
py | 1a3b0787fc921827c71bfc1793b4bcb183141860 | #!/usr/bin/env python
# Python benchmark for gamq
import time
import socket
import threading
# Global variables
HostAddress = "localhost"
HostPort = 48879
Protocol = ""
AckMessages = False
NumberOfMessages = 0
# Helper function to check if a number is valid
def isNumber(givenObject):
try:
int(givenObject)
return True
except:
return False
def getSocket(protocol):
if protocol == "tcp":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif protocol == "udp":
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
print "Invalid protocol: {}".format(protocol)
exit(-1)
s.connect((HostAddress, HostPort))
return s
def writeThread():
s = getSocket(Protocol)
if AckMessages:
s.sendall("setack on\n")
startTime = time.clock()
for i in range(0, int(NumberOfMessages), 1):
s.sendall("pub abc\n")
s.sendall("{}\n".format(i))
s.sendall(".\r\n")
if AckMessages:
response = s.recv(8)
if response[:6] != "PUBACK":
print "Error whilst publishing {}, got response: {}".format(i, response)
endTime = time.clock()
s.close()
print "Took {} seconds to write {} messages".format((endTime - startTime), NumberOfMessages)
def readThread():
s = getSocket("tcp")
startTime = time.clock()
s.sendall("sub abc\n")
for i in range(0, int(NumberOfMessages), 1):
response = ""
while response[-3:] != ".\r\n":
response += s.recv(1)
response = response.translate(None, ".\r\n")
if int() != int(i):
print "Expected {}, got {}".format(i, response)
endTime = time.clock()
s.close()
print "Took {} seconds to read {} messages".format((endTime - startTime), NumberOfMessages)
def readConfig():
global AckMessages, NumberOfMessages, HostAddress, HostPort, Protocol
# Get benchmark parameters
protocol = raw_input("Protocol to use (tcp/udp): ")
if protocol not in ["tcp", "udp"]:
print "Invalid protocol"
exit(-1)
else:
Protocol = protocol
numberOfMessages = raw_input("Number of messages to send: ")
if not isNumber(numberOfMessages):
print "Invalid number"
exit(-1)
else:
NumberOfMessages = int(numberOfMessages)
ackMessages = raw_input("Ack messages (y/n): ")
AckMessages = (ackMessages == "y")
hostAddress = raw_input("Host to connect to: ")
if hostAddress == "":
print "Defaulting to localhost"
else:
HostAddress = hostAddress
hostPort = raw_input("Port to connect to: ")
if hostPort == "":
print "Defaulting to 48879"
elif isNumber(hostPort):
HostPort = hostPort
else:
print "Invalid number"
exit(-1)
readConfig()
writeThread = threading.Thread(target=writeThread)
readThread = threading.Thread(target=readThread)
readThread.daemon = True
writeThread.daemon = True
writeThread.start()
readThread.start()
while threading.active_count() > 1:
time.sleep(1)
|
py | 1a3b0795754e381ffa624a94c2e20746cd75c549 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.ai.luis import LuisApplication, LuisRecognizer
from botbuilder.core import Recognizer, RecognizerResult, TurnContext
class FlightBookingRecognizer(Recognizer):
def __init__(self, configuration: object):
self._recognizer = None
luis_is_configured = (
configuration.luis_app_id
and configuration.luis_api_key
and configuration.luis_api_host_name
)
if luis_is_configured:
luis_application = LuisApplication(
configuration.luis_app_id,
configuration.luis_api_key,
"https://" + configuration.luis_api_host_name,
)
self._recognizer = LuisRecognizer(luis_application)
@property
def is_configured(self) -> bool:
# Returns true if luis is configured in the config.py and initialized.
return self._recognizer is not None
async def recognize(self, turn_context: TurnContext) -> RecognizerResult:
return await self._recognizer.recognize(turn_context)
|
py | 1a3b07a414398df522c7309d9e5663f8aaf3e81c | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets thare were not found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return target_dict['type'] != 'none' or \
target_dict.get('actions') or target_dict.get('rules')
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target.is_executable = target_dicts[target_name]['type'] == 'executable'
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if source in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build))):
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(include) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
invalid_targets = None
if len(unqualified_mapping) != len(config.targets):
invalid_targets = _NamesNotIn(config.targets, unqualified_mapping)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
matched_search_targets = _GetTargetsDependingOn(search_targets)
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if invalid_targets:
result_dict['invalid_targets'] = invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
|
py | 1a3b07f64e45908cff6d42ab3116b1383c430681 | """
This script shows how to count all files in a specific directory.
"""
from argparse import ArgumentParser
import os
from collections import Counter
parser = ArgumentParser()
parser.add_argument('dir', type=str, help='target path')
args = parser.parse_args()
def get_extention(file_name=None):
"""
Return the file name extention, or None if the file doesn't have one.
"""
crumbs = file_name.split(".")
crumbs_num = len(crumbs)
if crumbs_num == 1:
return None
else:
return crumbs[-1]
def count_files(directory=None):
"""
Count all files in directory, and return the dict contains the result.
"""
file_extentions = []
none_extentions_num = 0
for _, _, files in os.walk(directory):
for file in files:
extention = get_extention(file)
if extention is None:
none_extentions_num += 1
else:
file_extentions.append(extention)
ext_counter = Counter(file_extentions)
if none_extentions_num != 0:
ext_counter.update({"None": none_extentions_num})
return ext_counter
def main():
"""
The main entrance.
"""
extention_dict = dict(count_files(args.dir))
total_count = sum(extention_dict.values())
print("Total files:", total_count)
print(extention_dict)
print("Done!")
if __name__ == '__main__':
main()
|
py | 1a3b07fa8a348f3877e0040a39a7481f3b0a6770 | from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from rankings.views import athlete_redirect_athlete_id_to_slug, athlete_redirect_event_id_to_slug, \
redirect_event_id_to_slug, add_result, report_duplicate, request_competition
from . import views
urlpatterns = [
url(
regex=r'^athlete/(?P<slug>[a-z0-9\-]+)/(?P<event_id>[0-9]+)$',
view=athlete_redirect_event_id_to_slug,
name='athlete-event-redirect'
),
url(
regex=r'^athlete/add-time/(?P<athlete_slug>[a-z0-9\-]+)/',
view=add_result,
name='athlete-add-time'
),
url(
regex=r'^athlete/(?P<slug>[a-z0-9\-]+)/(?P<event_name>[a-z0-9\-()]+)/(?P<pool_length>25|50)$',
view=views.EventByAthlete.as_view(),
name='athlete-event'
),
url(
regex=r'^athlete/(?P<athlete_id>[0-9]+)',
view=athlete_redirect_athlete_id_to_slug,
name='athlete-redirect'
),
url(
regex=r'^athlete/(?P<slug>[a-z0-9\-]+)',
view=views.PersonalBests.as_view(),
name='athlete-overview'
),
url(
regex=r'^events',
view=views.EventOverview.as_view(),
name='event-overview'
),
url(
regex=r'^top/(?P<event_id>[0-9]+)/(?P<gender>\bmen\b|\bwomen\b)',
view=redirect_event_id_to_slug,
name='best-by-event-redirect'
),
url(
regex=r'^top/(?P<event_name>[a-z0-9\-()]+)/(?P<gender>\bmen\b|\bwomen\b)',
view=views.BestByEvent.as_view(),
name='best-by-event'
),
url(
regex=r'^merge-athletes',
view=views.merge_athletes,
name='merge-athletes'
),
url(
regex=r'^list-empty-athletes',
view=views.DeleteEmptyAthletes.as_view(),
name='list-empty-athletes'
),
url(
regex=r'^delete-empty-athletes',
view=views.delete_empty_athletes,
name='delete-empty-athletes'
),
url(
regex=r'^search',
view=views.Search.as_view(),
name='search'
),
url(
regex=r'^competitions/request',
view=request_competition,
name='request-competition'
),
url(
regex=r'^competitions',
view=views.CompetitionListView.as_view(),
name='competition-list'
),
url(
regex=r'^competition/(?P<competition_slug>[a-z0-9\-]+)/(?P<event_name>[a-z0-9\-()]+)/(?P<gender>\bmen\b|\bwomen\b)',
view=views.CompetitionEvent.as_view(),
name='competition-event'
),
url(
regex=r'^competition/(?P<competition_slug>[a-z0-9\-]+)',
view=views.CompetitionOverview.as_view(),
name='competition-overview'
),
url(
regex=r'^report-duplicate',
view=report_duplicate
)
]
|
py | 1a3b08b57b065081231d31303175f2f7d8435367 | import logging
from typing import Collection, Container, Iterable, Optional, Tuple, Type
from eth.abc import AtomicDatabaseAPI
from eth.exceptions import BlockNotFound
from eth_utils import to_dict, toolz
from eth2._utils.ssz import validate_imported_block_unchanged
from eth2.beacon.chains.abc import BaseBeaconChain
from eth2.beacon.chains.exceptions import ParentNotFoundError, SlashableBlockError
from eth2.beacon.constants import FAR_FUTURE_SLOT, GENESIS_SLOT
from eth2.beacon.db.abc import BaseBeaconChainDB
from eth2.beacon.db.chain2 import BeaconChainDB, StateNotFound
from eth2.beacon.fork_choice.abc import BaseForkChoice, BlockSink
from eth2.beacon.state_machines.forks.medalla.eth2fastspec import get_attesting_indices
from eth2.beacon.state_machines.forks.medalla.state_machine import (
MedallaStateMachineFast,
MedallaStateMachineTest,
)
from eth2.beacon.tools.misc.ssz_vector import override_lengths
from eth2.beacon.types.attestations import Attestation
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
BaseSignedBeaconBlock,
BeaconBlock,
SignedBeaconBlock,
)
from eth2.beacon.types.checkpoints import Checkpoint, default_checkpoint
from eth2.beacon.types.states import BeaconState
from eth2.beacon.typing import Root, Slot, ValidatorIndex
from eth2.clock import Tick
StateMachineConfiguration = Tuple[Tuple[Slot, Type[MedallaStateMachineFast]], ...]
def _sm_configuration_has_increasing_slot(
sm_configuration: StateMachineConfiguration
) -> bool:
last_slot = GENESIS_SLOT
for (slot, _state_machine_class) in sm_configuration:
if slot < last_slot:
return False
else:
last_slot = slot
return True
def _validate_sm_configuration(sm_configuration: StateMachineConfiguration) -> None:
if not sm_configuration:
raise ValueError(
"The Chain class cannot be instantiated with an empty `sm_configuration`"
)
if not _sm_configuration_has_increasing_slot(sm_configuration):
raise ValueError(
"The Chain class requires a state machine configuration"
" with monotonically increasing slot number"
)
@to_dict
def _load_state_machines(
sm_configuration: StateMachineConfiguration
) -> Iterable[Tuple[Container[int], MedallaStateMachineFast]]:
sm_configuration += ((FAR_FUTURE_SLOT, None),)
for (first_fork, second_fork) in toolz.sliding_window(2, sm_configuration):
valid_range = range(first_fork[0], second_fork[0])
valid_sm = first_fork[1]()
yield (valid_range, valid_sm)
class ChainDBBlockSink(BlockSink):
def __init__(self, chain_db: BaseBeaconChainDB) -> None:
self._chain_db = chain_db
def on_pruned_block(self, block: BaseBeaconBlock, canonical: bool) -> None:
if canonical:
self._chain_db.mark_canonical_block(block)
class BeaconChain(BaseBeaconChain):
logger = logging.getLogger("eth2.beacon.chains.BeaconChain")
_chain_db: BaseBeaconChainDB
_sm_configuration = ((GENESIS_SLOT, MedallaStateMachineFast),)
_fork_choice: BaseForkChoice
_current_head: BeaconBlock
_justified_checkpoint: Checkpoint = default_checkpoint
_finalized_checkpoint: Checkpoint = default_checkpoint
def __init__(
self, chain_db: BaseBeaconChainDB, fork_choice: BaseForkChoice
) -> None:
self._chain_db = chain_db
_validate_sm_configuration(self._sm_configuration)
self._state_machines_by_range = _load_state_machines(self._sm_configuration)
self._fork_choice = fork_choice
self._current_head = fork_choice.find_head()
head_state = self._chain_db.get_state_by_root(
self._current_head.state_root, BeaconState
)
self._reconcile_justification_and_finality(head_state)
@classmethod
def from_genesis(
cls, base_db: AtomicDatabaseAPI, genesis_state: BeaconState
) -> "BeaconChain":
for starting_slot, state_machine_class in cls._sm_configuration:
if starting_slot == GENESIS_SLOT:
signed_block_class = state_machine_class.signed_block_class
fork_choice_class = state_machine_class.fork_choice_class
config = state_machine_class.config
# NOTE: important this happens as soon as it can...
override_lengths(config)
break
else:
raise Exception("state machine configuration missing genesis era")
assert genesis_state.slot == GENESIS_SLOT
chain_db = BeaconChainDB.from_genesis(
base_db, genesis_state, signed_block_class, config
)
block_sink = ChainDBBlockSink(chain_db)
fork_choice = fork_choice_class.from_genesis(genesis_state, config, block_sink)
return cls(chain_db, fork_choice)
@property
def db(self) -> BaseBeaconChainDB:
return self._chain_db
def _get_fork_choice(self, slot: Slot) -> BaseForkChoice:
# NOTE: ignoring slot polymorphism for now...
expected_class = self.get_state_machine(slot).fork_choice_class
if expected_class == self._fork_choice.__class__:
return self._fork_choice
else:
raise NotImplementedError(
"a fork choice different than the one implemented was requested by slot"
)
def get_state_machine(self, slot: Slot) -> MedallaStateMachineFast:
"""
Return the ``StateMachine`` instance for the given slot number.
"""
# TODO iterate over ``reversed(....items())`` once we support only >=py3.8
for (slot_range, state_machine) in self._state_machines_by_range.items():
if slot in slot_range:
return state_machine
else:
raise Exception("state machine configuration was incorrect")
def get_canonical_head(self) -> BeaconBlock:
return self._current_head
def get_canonical_head_state(self) -> BeaconState:
head = self.get_canonical_head()
return self._chain_db.get_state_by_root(head.state_root, BeaconState)
def on_tick(self, tick: Tick) -> None:
if tick.is_first_in_slot():
fork_choice = self._get_fork_choice(tick.slot)
head = fork_choice.find_head()
self._update_head_if_new(head)
def get_block_by_slot(self, slot: Slot) -> Optional[BaseSignedBeaconBlock]:
state_machine = self.get_state_machine(slot)
return self._get_block_by_slot(slot, state_machine.signed_block_class)
def _get_block_by_slot(
self, slot: Slot, block_class: Type[SignedBeaconBlock]
) -> Optional[SignedBeaconBlock]:
# check in db first, implying a finalized chain
block = self._chain_db.get_block_by_slot(slot, block_class.block_class)
if block:
signature = self._chain_db.get_block_signature_by_root(block.hash_tree_root)
return SignedBeaconBlock.create(message=block, signature=signature)
else:
# check in the canonical chain according to fork choice
# NOTE: likely want a more efficient way to determine block by slot...
for block in self._fork_choice.get_canonical_chain():
if block.slot == slot:
signature = self._chain_db.get_block_signature_by_root(
block.hash_tree_root
)
return SignedBeaconBlock.create(message=block, signature=signature)
else:
return None
def _import_block(
self, block: BaseSignedBeaconBlock, perform_validation: bool = True
) -> BaseSignedBeaconBlock:
try:
# NOTE: would need to introduce a "root to slot" look up here for polymorphism
parent_block = self._chain_db.get_block_by_root(
block.parent_root, BeaconBlock
)
except BlockNotFound:
raise ParentNotFoundError(
f"attempt to import block {block} but missing parent block"
)
# NOTE: check if block is in the canonical chain
# First, see if we have a block already at that slot...
existing_block = self._get_block_by_slot(block.slot, block.__class__)
if existing_block:
if existing_block != block:
# NOTE: we want to keep the block but avoid heavy state transition for now...
# Rationale: this block may simply be a slashable block. It could also be on
# a fork. Choose to defer the state materialization until we re-org via fork choice.
self._chain_db.persist_block(block)
raise SlashableBlockError(
block,
f"attempt to import {block} but canonical chain"
" already has a block at this slot",
)
else:
# NOTE: block already imported!
return block
else:
head = self.get_canonical_head()
extension_of_head = block.parent_root == head.hash_tree_root
if not extension_of_head:
# NOTE: this arm implies we received a block for a slot _ahead_ of our head
# on the canonical chain...
# NOTE: block validity _should_ reject a block before it gets to this layer
# but we will double-check in the event that invariant is violated or does not hold
# NOTE: we elect to the block in the event of a
# re-org later, but do no further processing.
self._chain_db.persist_block(block)
raise SlashableBlockError(
block,
f"attempt to import {block} but canonical chain is not as far ahead",
)
state_machine = self.get_state_machine(block.slot)
state_class = state_machine.state_class
pre_state = self._chain_db.get_state_by_root(
parent_block.state_root, state_class
)
state, imported_block = state_machine.apply_state_transition(
pre_state, block, check_proposer_signature=perform_validation
)
if perform_validation:
validate_imported_block_unchanged(imported_block, block)
# NOTE: if we have a valid block/state, then record in the database.
self._chain_db.persist_block(block)
self._chain_db.persist_state(state, state_machine.config)
self._reconcile_justification_and_finality(state)
return imported_block
def _reconcile_justification_and_finality(self, state: BeaconState) -> None:
justified_checkpoint = state.current_justified_checkpoint
finalized_checkpoint = state.finalized_checkpoint
if justified_checkpoint.epoch > self._justified_checkpoint.epoch:
self._justified_checkpoint = justified_checkpoint
self._fork_choice.update_justified(state)
if finalized_checkpoint.epoch > self._finalized_checkpoint.epoch:
self._finalized_checkpoint = finalized_checkpoint
finalized_head = self._chain_db.get_block_by_root(
self._finalized_checkpoint.root, BeaconBlock
)
self._chain_db.mark_finalized_head(finalized_head)
def _update_head_if_new(self, block: BeaconBlock) -> None:
if block != self._current_head:
self._current_head = block
self.logger.debug("new head of chain: %s", block)
def _update_fork_choice_with_block(self, block: BeaconBlock) -> None:
"""
NOTE: it is assumed that ``_import_block`` has successfully be called
before this method is run as the fork choice shares state with the underlying
chain db.
Adding a new ``block`` likely updates the head so we also call
``_update_head_if_new`` after registering the new data with the
fork choice module.
"""
fork_choice = self._get_fork_choice(block.slot)
fork_choice.on_block(block)
for attestation in block.body.attestations:
self._update_fork_choice_with_attestation(fork_choice, attestation)
head = fork_choice.find_head()
self._update_head_if_new(head)
def _update_fork_choice_with_attestation(
self, fork_choice: BaseForkChoice, attestation: Attestation
) -> None:
block_root = attestation.data.beacon_block_root
target_epoch = attestation.data.target.epoch
indices = self._get_indices_from_attestation(attestation)
fork_choice.on_attestation(block_root, target_epoch, *indices)
def _find_present_ancestor_state(
self, block_root: Root
) -> Tuple[BeaconState, Tuple[SignedBeaconBlock, ...]]:
"""
Find the first state we have persisted that is an ancestor of ``target_block``.
"""
try:
block = self._chain_db.get_block_by_root(block_root, BeaconBlock)
blocks: Tuple[SignedBeaconBlock, ...] = ()
# NOTE: re: bounds here; worst case, we return the genesis state.
for slot in range(block.slot, GENESIS_SLOT - 1, -1):
try:
state_machine = self.get_state_machine(Slot(slot))
state = self._chain_db.get_state_by_root(
block.state_root, state_machine.state_class
)
return (state, blocks)
except StateNotFound:
signature = self._chain_db.get_block_signature_by_root(
block.hash_tree_root
)
blocks += (
SignedBeaconBlock.create(message=block, signature=signature),
)
block = self._chain_db.get_block_by_root(
block.parent_root, BeaconBlock
)
except BlockNotFound:
raise Exception(
"invariant violated: querying a block that has not been persisted"
)
# NOTE: `mypy` complains without this although execution should never get here...
return (None, ())
def _compute_missing_state(self, target_block: BeaconBlock) -> BeaconState:
"""
Calculate the state for the ``target_block``.
The chain persist states for canonical blocks.
In the even that we need a state that has not been persisted;
for example, if we are executing a re-org, then we will
need to compute it.
NOTE: this method will persist the new (potentially non-canonical) states.
"""
state, blocks = self._find_present_ancestor_state(target_block.parent_root)
for block in reversed(blocks):
state_machine = self.get_state_machine(block.slot)
state, _ = state_machine.apply_state_transition(state, block)
self._chain_db.persist_state(state, state_machine.config)
return state
def _get_indices_from_attestation(
self, attestation: Attestation
) -> Collection[ValidatorIndex]:
sm = self.get_state_machine(attestation.data.slot)
return get_attesting_indices(
sm._epochs_ctx, attestation.data, attestation.aggregation_bits
)
def on_block(
self, block: BaseSignedBeaconBlock, perform_validation: bool = True
) -> None:
self.logger.debug("attempting import of block %s", block)
try:
imported_block = self._import_block(block, perform_validation)
self.logger.debug("imported new block: %s", imported_block)
self._update_fork_choice_with_block(block.message)
except SlashableBlockError:
# still register a block if it is a duplicate, in event of a re-org
# other exceptions should not add the block to the fork choice
self._update_fork_choice_with_block(block.message)
raise
def on_attestation(self, attestation: Attestation) -> None:
"""
This method expects ``attestation`` to come from the wire, not one in a
(valid) block; attestations in blocks are handled in ``on_block``
"""
fork_choice = self._get_fork_choice(attestation.data.slot)
self._update_fork_choice_with_attestation(fork_choice, attestation)
class BeaconChainTest(BeaconChain):
_sm_configuration = ((GENESIS_SLOT, MedallaStateMachineTest),) # type: ignore
|
py | 1a3b0a0c21b7d34292ed8fd5f2d28e63873029cd | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'kg_keycloak'
copyright = '2020, Rangel Reale'
author = 'Rangel Reale'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx_rtd_theme', 'm2r2']
# source_suffix = '.rst'
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/style.css'
|
py | 1a3b0a702b70cd10837deab8246160e069916c53 | from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from linkpile.models import Link
class LinksFeed( Feed ):
title = 'Linkpile Links'
link = '/linkpile/'
description = 'A pile of links from around the web'
def items( self ):
return Link.get_recent(10)
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
def item_link(self, item):
return reverse('linkpile-link', args=[item.id])
|
py | 1a3b0a8b2c32f7f5de667cdac6f3bfe2ac763891 | import urllib.request
import os, sys
content = ''
attempts = 0
protocol = 'http'
subdomain = 'web135/'
domain = 'j3'
'''
lvl3 = ['']
lvl4 = ['']
lvl5 = ['']
lvl6 = ['']
lvl7 = ['']
lvl8 = ['']'''
root_dir = '../inetpub/wwwroot/wwwlive'
links = ['',
'news',
'emergency-info',
'county-departments',
'contact-us',
'elected-officials',
'commissioners-and-directors',
'other-appointed-officials',
'advisory-boards',
'directions',
'subscribe',
'foilform',
'contact-us/e-mail-ce',
'emergency-info/emergency-alert-sign-up',
]
'''while attempts < 3:
try:
for link in links:
final_link = protocol +'://'+ subdomain + '.' + domain +'/'+ link
print('Downloading Webpage: ' + final_link)
response = urllib.request.urlopen( final_link , timeout = 5 )
content = str(response.read().decode('utf-8'))
f = open( link, 'w' )
print('Writing to: ' + link)
f.write( content )
f.close()
break
except urllib.request.URLError as e:
attempts += 1
print (type(e))'''
for link in links:
dir = root_dir +'/'+ link
file = dir + '/'+ 'index.html'
final_link = protocol +'://'+ subdomain + domain +'/'+ link
print('Downloading Webpage: ' + final_link)
try:
response = urllib.request.urlopen( final_link , timeout = 15 )
content = str(response.read().decode('utf-8'))
except urllib.request.URLError as e:
print (str(e))
if not os.path.isdir(dir):
os.mkdir( dir)
print ("Created " + dir + " Folder")
f = open( file , 'w' )
print('Writing to: ' + dir)
f.write( content )
f.close()
|
py | 1a3b0bdef17bbab8c06fbe19ba5b2de1118923d1 | import time
from basetestcase import BaseTestCase
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection, Bucket, RestHelper
from membase.api.exception import BucketCreationException
from membase.helper.bucket_helper import BucketOperationHelper
from couchbase_helper.documentgenerator import BlobGenerator
from testconstants import STANDARD_BUCKET_PORT
from testconstants import LINUX_COUCHBASE_BIN_PATH
from testconstants import LINUX_COUCHBASE_SAMPLE_PATH
from testconstants import WIN_COUCHBASE_BIN_PATH
from testconstants import WIN_COUCHBASE_SAMPLE_PATH_C
from testconstants import COUCHBASE_FROM_WATSON, COUCHBASE_FROM_4DOT6,\
COUCHBASE_FROM_SPOCK, COUCHBASE_FROM_VULCAN,\
COUCHBASE_FROM_CHESHIRE_CAT
from scripts.install import InstallerJob
class CreateBucketTests(BaseTestCase):
def setUp(self):
super(CreateBucketTests, self).setUp()
self._init_parameters()
def _init_parameters(self):
self.bucket_name = self.input.param("bucket_name", 'default')
self.bucket_type = self.input.param("bucket_type", 'sasl')
self.reset_node_services = self.input.param("reset_node_services", True)
self.bucket_size = self.quota
self.password = 'password'
self.server = self.master
self.rest = RestConnection(self.server)
self.node_version = self.rest.get_nodes_version()
self.total_items_travel_sample = 31569
if self.node_version[:5] in COUCHBASE_FROM_WATSON:
if self.node_version[:5] in COUCHBASE_FROM_CHESHIRE_CAT:
self.total_items_travel_sample = 63288
else:
self.total_items_travel_sample = 63182
shell = RemoteMachineShellConnection(self.master)
type = shell.extract_remote_info().distribution_type
shell.disconnect()
self.sample_path = LINUX_COUCHBASE_SAMPLE_PATH
self.bin_path = LINUX_COUCHBASE_BIN_PATH
if self.nonroot:
self.sample_path = "/home/%s%s" % (self.master.ssh_username,
LINUX_COUCHBASE_SAMPLE_PATH)
self.bin_path = "/home/%s%s" % (self.master.ssh_username,
LINUX_COUCHBASE_BIN_PATH)
if type.lower() == 'windows':
self.sample_path = WIN_COUCHBASE_SAMPLE_PATH_C
self.bin_path = WIN_COUCHBASE_BIN_PATH
elif type.lower() == "mac":
self.sample_path = MAC_COUCHBASE_SAMPLE_PATH
self.bin_path = MAC_COUCHBASE_BIN_PATH
def tearDown(self):
super(CreateBucketTests, self).tearDown()
# Bucket creation with names as mentioned in MB-5844(.delete, _replicator.couch.1, _users.couch.1)
def test_banned_bucket_name(self, password='password'):
try:
if self.bucket_type == 'sasl':
self.rest.create_bucket(self.bucket_name, ramQuotaMB=200)
elif self.bucket_type == 'standard':
self.rest.create_bucket(self.bucket_name, ramQuotaMB=200, proxyPort=STANDARD_BUCKET_PORT + 1)
elif self.bucket_type == 'memcached':
self.rest.create_bucket(self.bucket_name, ramQuotaMB=200, proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='memcached')
else:
self.log.error('Bucket type not specified')
return
self.fail('created a bucket with invalid name {0}'.format(self.bucket_name))
except BucketCreationException as ex:
self.log.info(ex)
def test_win_specific_names(self):
version = self._get_cb_version()
if self._get_cb_os() != 'windows':
self.log.warning('This test is windows specific')
return
try:
self.test_banned_bucket_name()
finally:
try:
self.log.info('Will check if ns_server is running')
rest = RestConnection(self.master)
self.assertTrue(RestHelper(rest).is_ns_server_running(timeout_in_seconds=60))
except:
self._reinstall(version)
self.fail("ns_server is not running after bucket '%s' creation" %(
self.bucket_name))
# Bucket creation with names as mentioned in MB-5844(isasl.pw, ns_log)
def test_valid_bucket_name(self, password='password'):
tasks = []
shared_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
replicas=self.num_replicas)
if self.bucket_type == 'sasl':
self.cluster.create_sasl_bucket(name=self.bucket_name, password=password, bucket_params=shared_params)
self.buckets.append(Bucket(name=self.bucket_name, num_replicas=self.num_replicas,
bucket_size=self.bucket_size, master_id=self.server))
elif self.bucket_type == 'standard':
self.cluster.create_standard_bucket(name=self.bucket_name, port=STANDARD_BUCKET_PORT+1,
bucket_params=shared_params)
self.buckets.append(Bucket(name=self.bucket_name, num_replicas=self.num_replicas,
bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1, master_id=self.server))
elif self.bucket_type == "memcached":
tasks.append(self.cluster.async_create_memcached_bucket(name=self.bucket_name,
port=STANDARD_BUCKET_PORT+1,
bucket_params=shared_params))
self.buckets.append(Bucket(name=self.bucket_name,
num_replicas=self.num_replicas, bucket_size=self.bucket_size,
port=STANDARD_BUCKET_PORT + 1, master_id=self.server, type='memcached'))
for task in tasks:
task.result()
else:
self.log.error('Bucket type not specified')
return
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
self._load_all_buckets(self.server, gen_load, "create", 0)
self.cluster.bucket_delete(self.server, self.bucket_name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
""" put param like -p log_message="Created bucket". If test need a cluster,
put nodes_init=x in param to create cluster """
def test_log_message_in_log_page(self):
if self.log_message is not None:
self._load_doc_data_all_buckets(data_op="create", batch_size=5000)
serverInfo = self.servers[0]
shell = RemoteMachineShellConnection(serverInfo)
time.sleep(5)
output, error = shell.execute_command("curl -g -v -u Administrator:password \
http://{0}:8091/logs | grep '{1}'".format(serverInfo.ip,
self.log_message))
if not output:
self.log.info("message {0} is not in log".format(self.log_message))
elif output:
raise Exception("The message %s is in log." % self.log_message)
else:
raise Exception("No thing to test. You need to put log_message=something_to_test")
def test_travel_sample_bucket(self):
sample = "travel-sample"
if self.reset_node_services:
""" reset node to set services correctly: index,kv,n1ql """
self.rest.force_eject_node()
status = False
try:
status = self.rest.init_node_services(hostname=self.master.ip,
services= ["index,kv,n1ql,fts"])
init_node = self.cluster.async_init_node(self.master,
services = ["index,kv,n1ql,fts"])
except Exception as e:
if e:
print(e)
self.sleep(10)
self.log.info("Add new user after reset node! ")
self.add_built_in_server_user(node=self.master)
if status:
if self.node_version[:5] in COUCHBASE_FROM_WATSON:
self.rest.set_indexer_storage_mode(storageMode="memory_optimized")
shell = RemoteMachineShellConnection(self.master)
shell.execute_command("""curl -g -v -u Administrator:password \
-X POST http://{0}:8091/sampleBuckets/install \
-d '["travel-sample"]'""".format(self.master.ip))
shell.disconnect()
buckets = RestConnection(self.master).get_buckets()
for bucket in buckets:
if bucket.name != "travel-sample":
self.fail("travel-sample bucket did not create")
""" check for load data into travel-sample bucket """
end_time = time.time() + 120
while time.time() < end_time:
self.sleep(10)
num_actual = self.get_item_count(self.master, "travel-sample")
if int(num_actual) == self.total_items_travel_sample:
break
self.assertTrue(int(num_actual) == self.total_items_travel_sample,
"Items number expected %s, actual %s" % (
self.total_items_travel_sample, num_actual))
""" check all indexes are completed """
index_name = []
index_count = 8
if self.cb_version[:5] in COUCHBASE_FROM_4DOT6:
index_count = 10
result = self.rest.index_tool_stats(False)
self.log.info("check if all %s indexes built." % index_count)
end_time_i = time.time() + 60
while time.time() < end_time_i and len(index_name) < index_count:
for map in result:
if result["indexes"]:
for x in result["indexes"]:
if x["bucket"] == "travel-sample":
if x["progress"] < 100:
self.sleep(7, "waiting for indexing {0} complete"
.format(x["index"]))
result = self.rest.index_tool_stats(False)
elif x["progress"] == 100:
if x["index"] not in index_name:
index_name.append(x["index"])
self.sleep(7, "waiting for other indexing complete")
result = self.rest.index_tool_stats(False)
else:
self.sleep(7, "waiting for indexing start")
result = self.rest.index_tool_stats()
if time.time() >= end_time_i and len(index_name) < index_count:
self.log.info("index list {0}".format(index_name))
self.fail("some indexing may not complete")
elif len(index_name) == index_count:
self.log.info("travel-sample bucket is created and complete indexing")
self.log.info("index list in travel-sample bucket: {0}"
.format(index_name))
else:
self.log.info("There is extra index %s" % index_name)
def test_cli_travel_sample_bucket(self):
sample = "travel-sample"
""" couchbase-cli does not have option to reset the node yet
use rest to reset node to set services correctly: index,kv,n1ql """
self.rest.force_eject_node()
shell = RemoteMachineShellConnection(self.master)
set_index_storage_type = ""
if self.node_version[:5] in COUCHBASE_FROM_WATSON:
set_index_storage_type = " --index-storage-setting=memopt "
options = ' --cluster-port=8091 \
--cluster-ramsize=1000 \
--cluster-index-ramsize=300 \
--services=data,index,query,fts %s ' % set_index_storage_type
o, e = shell.execute_couchbase_cli(cli_command="cluster-init", options=options)
if self.node_version[:5] in COUCHBASE_FROM_SPOCK:
self.assertTrue(self._check_output("SUCCESS: Cluster initialized", o),
"Failed to initialize cluster")
else:
self.assertTrue(self._check_output("SUCCESS: init/edit localhost", o),
"Failed to init/edit localhost")
self.sleep(7, "wait for services up completely")
self.log.info("Add new user after reset node! ")
self.add_built_in_server_user(node=self.master)
shell = RemoteMachineShellConnection(self.master)
cluster_flag = "-n"
bucket_quota_flag = "-s"
data_set_location_flag = " "
if self.node_version[:5] in COUCHBASE_FROM_SPOCK:
cluster_flag = "-c"
bucket_quota_flag = "-m"
data_set_location_flag = "-d"
shell.execute_command("{0}cbdocloader -u Administrator -p password \
{3} {1}:{6} -b travel-sample {4} 200 {5} {2}travel-sample.zip" \
.format(self.bin_path,
self.master.ip,
self.sample_path,
cluster_flag,
bucket_quota_flag,
data_set_location_flag,
self.master.port))
shell.disconnect()
buckets = RestConnection(self.master).get_buckets()
for bucket in buckets:
if bucket.name != "travel-sample":
self.fail("travel-sample bucket did not create")
""" check for load data into travel-sample bucket """
end_time = time.time() + 120
while time.time() < end_time:
self.sleep(10)
num_actual = self.get_item_count(self.master, "travel-sample")
if int(num_actual) == self.total_items_travel_sample:
break
self.assertTrue(int(num_actual) == self.total_items_travel_sample,
"Items number expected %s, actual %s" % (
self.total_items_travel_sample, num_actual))
self.log.info("Total items %s " % num_actual)
""" check all indexes are completed """
index_name = []
index_count = 8
if self.cb_version[:5] in COUCHBASE_FROM_4DOT6:
index_count = 10
result = self.rest.index_tool_stats(False)
""" check all indexes are completed """
self.log.info("check if all %s indexes built." % index_count)
end_time_i = time.time() + 180
while time.time() < end_time_i and len(index_name) < index_count:
if result["indexes"]:
for x in result["indexes"]:
if x["bucket"] == "travel-sample":
if x["progress"] == 100 and \
x["index"] not in index_name:
index_name.append(x["index"])
self.sleep(7, "waiting for indexing complete")
result = self.rest.index_tool_stats(False)
else:
self.sleep(2, "waiting for indexing start")
result = self.rest.index_tool_stats(False)
if time.time() >= end_time_i and len(index_name) < index_count:
self.log.info("index list {0}".format(index_name))
self.fail("some indexing may not complete")
elif len(index_name) == index_count:
self.log.info("travel-sample bucket is created and complete indexing")
self.log.info("index list in travel-sample bucket: {0}"
.format(index_name))
else:
self.log.info("There is extra index %s" % index_name)
def test_cli_bucket_maxttl_setting(self):
""" couchbase-cli does not have option to reset the node yet
use rest to reset node to set services correctly: index,kv,n1ql """
if self.node_version[:5] in COUCHBASE_FROM_VULCAN:
self.rest.force_eject_node()
shell = RemoteMachineShellConnection(self.master)
set_index_storage_type = " --index-storage-setting=memopt "
options = ' --cluster-port=8091 \
--cluster-ramsize=300 \
--cluster-index-ramsize=300 \
--services=data,index,query %s ' \
% set_index_storage_type
o, e = shell.execute_couchbase_cli(cli_command="cluster-init",
options=options)
self.assertEqual(o[0], 'SUCCESS: Cluster initialized')
self.log.info("Add new user after reset node! ")
self.add_built_in_server_user(node=self.master)
shell = RemoteMachineShellConnection(self.master)
bucket_type = self.input.param("bucket_type", "couchbase")
options = ' --bucket=default \
--bucket-type={0} \
--bucket-ramsize=200 \
--max-ttl=400 \
--wait '.format(bucket_type)
o, e = shell.execute_couchbase_cli(cli_command="bucket-create",
options=options)
self.assertEqual(o[0], 'SUCCESS: Bucket created')
self.sleep(30, "Sleep before loading doc using cbdocloader")
cluster_flag = "-c"
bucket_quota_flag = "-m"
data_set_location_flag = "-d"
shell.execute_command(
"{0}cbdocloader -u Administrator -p password "
"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip"
.format(self.bin_path, self.master.ip, self.sample_path,
cluster_flag, bucket_quota_flag,
data_set_location_flag))
shell.disconnect()
buckets = RestConnection(self.master).get_buckets()
for bucket in buckets:
if bucket.name != "default":
self.fail("default bucket did not get created")
""" check for load data into travel-sample bucket """
end_time = time.time() + 120
num_actual = 0
while time.time() < end_time:
self.sleep(10)
num_actual = self.get_item_count(self.master, "default")
if int(num_actual) == self.total_items_travel_sample:
break
self.assertTrue(int(num_actual) == self.total_items_travel_sample,
"Items number expected %s, actual %s"
% (self.total_items_travel_sample, num_actual))
self.log.info("Total items %s " % num_actual)
self.sleep(400, "Waiting for docs to expire as per maxttl")
self.expire_pager([self.master])
self.sleep(20, "Wait for expiry_purger to run")
num_actual = self.get_item_count(self.master, "default")
if int(num_actual) != 0:
self.fail("Item count is not 0 after maxttl has elapsed")
else:
self.log.info("SUCCESS: Item count is 0 after maxttl has elapsed")
else:
self.log.info("This test is not designed to run in pre-vulcan(5.5.0) versions")
# Start of tests for ephemeral buckets
#
def test_ephemeral_buckets(self):
eviction_policy = self.input.param("eviction_policy", 'noEviction')
shared_params = self._create_bucket_params(server=self.server, size=100,
replicas=self.num_replicas, bucket_type='ephemeral',
eviction_policy=eviction_policy)
# just do sasl for now, pending decision on support of non-sasl buckets in 5.0
self.cluster.create_sasl_bucket(name=self.bucket_name, password=self.sasl_password, bucket_params=shared_params)
self.buckets.append(Bucket(name=self.bucket_name,
num_replicas=self.num_replicas,
bucket_size=self.bucket_size, master_id=self.server))
self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
self._load_all_buckets(self.server, gen_load, "create", 0)
self.cluster.bucket_delete(self.server, self.bucket_name)
self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
def _get_cb_version(self):
rest = RestConnection(self.master)
version = rest.get_nodes_self().version
return version[:version.rfind('-')]
def _get_cb_os(self):
rest = RestConnection(self.master)
return rest.get_nodes_self().os
def _reinstall(self, version):
servs = self.servers[:self.nodes_init]
params = {}
params['num_nodes'] = len(servs)
params['product'] = 'cb'
params['version'] = version
params['vbuckets'] = [self.input.param('vbuckets', 1024)]
self.log.info("will install {0} on {1}".format(version, [s.ip for s in servs]))
InstallerJob().parallel_install(servs, params)
if params['product'] in ["couchbase", "couchbase-server", "cb"]:
success = True
for server in servs:
success &= RemoteMachineShellConnection(server).is_couchbase_installed()
if not success:
self.input.test_params["stop-on-failure"] = True
self.log.error("Couchbase wasn't recovered. All downstream tests will be skipped")
self.fail("some nodes were not install successfully!")
def _check_output(self, word_check, output):
found = False
if len(output) >= 1:
if isinstance(word_check, list):
for ele in word_check:
for x in output:
if ele.lower() in str(x.lower()):
self.log.info("Found '{0} in CLI output".format(ele))
found = True
break
elif isinstance(word_check, str):
for x in output:
if word_check.lower() in str(x.lower()):
self.log.info("Found '{0}' in CLI output".format(word_check))
found = True
break
else:
self.log.error("invalid {0}".format(word_check))
return found |
py | 1a3b0cf2c7f8ac8ebffcc183d929b522553647e6 |
import math
import numpy as np
from numpy import sqrt
from direct_kinematic import DirectKinematic
class FitnessFunctions(object):
def __init__(self, parameters={}):
self.energy_constants = [31.1,21.1,26.6,8.3,5.00,2.6]
self.velocity_constants = [90,90,90,120,120,190]
return
def evaluate_energy(self, array_of_joints_coordinates, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
total_energy=0
all_data=[0]*trajectory_len
for coo in range(0, trajectory_len):
for joint in range(0, 6):
angle_in_rad_tmp=abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])
#if verbose:
# print(tmp)
all_data[coo]=all_data[coo]+angle_in_rad_tmp*self.energy_constants[joint]
total_energy=np.sum(all_data)
if verbose:
return total_energy, all_data
return total_energy
def evaluate_operation_time(self, array_of_joints_coordinates, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
total_operation_time = 0
angles_in_rad_tmp= [0] * 6
all_data=[0]*trajectory_len
for coo in range(0, trajectory_len):
angles_in_rad=[]
for joint in range(0, 6):
# rotation=|joint_b-joint_a|
angles_in_rad.append(abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint]))
#maybe there is an error between degree and radiants
all_data[coo]=math.degrees(max(angles_in_rad))/self.velocity_constants[angles_in_rad.index(max(angles_in_rad))]
total_operation_time=np.sum(all_data)
if verbose:
return total_operation_time, all_data
return total_operation_time
def evaluate_rotations(self, array_of_joints_coordinates, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
total_rotations = 0
all_data=[0]*trajectory_len
for joint in range(0, 6):
for coo in range(0, trajectory_len):
# rotation=|joint_b-joint_a|
all_data[coo]=all_data[coo]+abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])
#print(str(array_of_joints_coordinates[coo][joint])+" - "+ str(array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])+" = "+ str(abs(array_of_joints_coordinates[coo][joint]-array_of_joints_coordinates[(coo-1+trajectory_len)%trajectory_len][joint])))
total_rotations=np.sum(all_data)
if verbose:
return total_rotations, all_data
return total_rotations
def evaluate_position_accuracy(self, array_of_joints_coordinates, points, verbose=False):
trajectory_len=len(array_of_joints_coordinates)
directKinematics = DirectKinematic()
total_accuracy = 0
all_data=[]
for i in range(0, trajectory_len):
homogenousPred = directKinematics.evaluate(array_of_joints_coordinates[i]) # This is in homogenous coordinates
#homogenousPred = directKinematics.evaluate([0.345,0.720,-0.153, 2.120,0.874,1.620]) # This is in homogenous coordinates
predictedPosition = np.array([[homogenousPred[0][0] / homogenousPred[3][0]],
[homogenousPred[1][0] / homogenousPred[3][0]],
[homogenousPred[2][0] / homogenousPred[3][0]]]) #This is cartesian coordinates
x_diff = (points[i][0] - predictedPosition[0])**2
y_diff = (points[i][1] - predictedPosition[1])**2
z_diff = (points[i][2] - predictedPosition[2])**2
differences = x_diff + y_diff + z_diff
all_data.append(sqrt(differences)[0])
total_accuracy += all_data[i]
if verbose:
return total_accuracy, all_data
return total_accuracy
if __name__ == '__main__':
import os
import pickle
import pandas as pd
trajectory_points = [[2.25, 1.1, 0.25],
[0.9, 1.5, 0.25],
[-0.85, 1.14, 2.22],
[-1.8, 1.25, 1.17],
[1.8, 1.25, 1.17],
[-1.25, -1.1, 0.25],
[-2.25, -1.48, 0.25],
[0.45, -1.14, 2.22],
[0.8, -1.25, 2.35],
[0.8, -1.25, -1.35]]
outputs = [[0.473, 0.592, -0.230, 0.130, 0.008,-0.617],
[1.026, 0.293, -0.008, 0.132, 1.155, -0.617],
[2.086, -0.014, -0.270, 2.890, 1.155, -0.617],
[2.523, 0.179, -0.270, 2.890, -0.440, -0.617],
[0.597, 0.179, -0.270, 2.890, -0.440, -0.617],
[-2.417, 0.179, 0.434, 2.887, -0.665, -0.617],
[-2.465, 0.794, -0.459, 1.342, -0.665, -0.617],
[-1.087, -0.189, -0.462, 0.324, -0.665, -0.617],
[-0.951, -0.100, -0.462, 0.130, -0.526, -0.617],
[-0.966, 1.188, 0.215, 0.130, 0.008, -0.617]]
fitnessFunctions = FitnessFunctions()
total_accuracy, accuracies = fitnessFunctions.evaluate_position_accuracy(outputs, trajectory_points, True)
print("accuracy____" + str(total_accuracy))
print(accuracies)
fitnesses={"total":[],"rotation_A":[],"energy_E":[],"operation_T":[],"accuracy":[]}
data={}
verbose=True
data["outputs"]=outputs
total_rotation, data["rotation_A"] = fitnessFunctions.evaluate_rotations(outputs, verbose)
total_energy, data["energy_E"] = fitnessFunctions.evaluate_energy(outputs, verbose)
total_operation_time, data["operation_T"] = fitnessFunctions.evaluate_operation_time(outputs, verbose)
total_accuracy, data["accuracy"] = fitnessFunctions.evaluate_position_accuracy(outputs, trajectory_points, verbose)
data["total_rotation_A"]=total_rotation
data["total_energy_E"]=total_energy
data["total_operation_time"]=total_operation_time
data["total_accuracy"]=total_accuracy
fitness = -(total_accuracy)#ACCURACY OPTIMAL
#fitness = -(total_accuracy+20/200*total_operation_time)#TIME OPTIMAL
#fitness = -(total_accuracy+20*total_energy)#ENERGY OPTIMAL
#fitness = -(total_accuracy+20*total_rotation)#MINIMUM ROTATION
#fitness = -(total_accuracy+5*total_energy+10*total_operation_time+5*total_rotation)#COMBINED CONTROL
fitnesses["total"].append(-fitness)
fitnesses["rotation_A"].append(total_rotation)
fitnesses["energy_E"].append(total_energy)
fitnesses["operation_T"].append(total_operation_time)
fitnesses["accuracy"].append(total_accuracy)
if verbose:
data["fitness"]=total_accuracy
df = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in data.items() ]))
df.to_csv('tableResults_TEST_PAPER.csv', index=False)
|
py | 1a3b0dabf4092e81d484b65f61f08c814ccce747 | import sys
import os
import runpy
path = os.path.dirname(sys.modules[__name__].__file__)
path = os.path.join(path, '..')
sys.path.insert(0, path)
runpy.run_module('sstpd', run_name="__main__", alter_sys=True)
|
py | 1a3b0e6d4cb761ecf892388f2fd52d7723f60d77 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Pivot helper functions ."""
from collections import abc
from functools import wraps
from typing import Any, Callable, Dict, Optional, Union
import attr
import pandas as pd
from .._version import VERSION
from . import entities
__version__ = VERSION
__author__ = "Ian Hellen"
_DF_SRC_COL_PARAM_NAMES = [
"column",
"input_column",
"input_col",
"src_column",
"src_col",
]
@attr.s(auto_attribs=True)
class PivotRegistration:
"""
Pivot registration for function.
Notes
-----
src_module : str
The src_module to import
src_class : str, optional
class to import and instantiate that contains the function/method
(not needed if the target function is a pure Python function)
src_func_name: Callable
The function to wrap.
func_new_name: str, optional
Rename the function to this, defaults to `src_func_name`
input_type : str
The input data type that the function is expecting.
One of 'dataframe', 'iterable', 'value'
can_iterate: bool, optional
True if the function supports being called multiple times
(for iterable input). Default is True
entity_map: Dict[str, str]
dict of entities supported (keys) and attribute to use from
entity as input to the function
func_df_param_name: str
The name of the parameter that `func` takes the input value
e.g. func(ip=my_address) => 'ip' == func_df_col_param_name.
In the case of a DataFrame, this is usually 'data'
func_df_col_param_name: str
The name that the target function uses to identify the
column to use for input in the input DataFrame.
func_out_column_name: str, optional
The name of the column in the output DF to use as a key to join
to the input. If None, use `func_df_col_param_name`
func_static_params: Optional[Dict[str, Any]]
static parameters (kwargs) that are always passed
to the target function
func_input_value_arg: Optional[str]
The name of kwarg passed to the function that contain
the input value. If function supports DF input, `func_df_col_param_name`
will be used and this is not needed.
src_config_path : Optional[str]
The source path that the configuration was read from, default None.
src_config_entry : Optional[str]
The entry name in the configuration file, default None.
entity_container_name : Optional[str]
The name of the container in the entity that will hold
this pivot function.
return_raw_output : bool
Return raw output from the wrapped function, do not
try to format into a DataFrame. Default is False.
"""
input_type: str
entity_map: Dict[str, str]
func_df_param_name: Optional[str] = None
func_out_column_name: Optional[str] = None
func_df_col_param_name: Optional[str] = None
func_new_name: Optional[str] = None
src_module: Optional[str] = None
src_class: Optional[str] = None
src_func_name: Optional[str] = None
can_iterate: bool = True
func_static_params: Optional[Dict[str, Any]] = None
func_input_value_arg: Optional[str] = None
src_config_path: Optional[str] = None
src_config_entry: Optional[str] = None
entity_container_name: Optional[str] = None
return_raw_output: bool = False
def attr_for_entity(self, entity: Union[entities.Entity, str]) -> Optional[str]:
"""
Return the attribute to use for the specified entity.
Parameters
----------
entity : Union[entities.Entity, str]
Entity instance or name
Returns
-------
Optional[str]
Attribute name to use.
"""
if isinstance(entity, entities.Entity):
ent_name = entity.__class__.__name__
else:
ent_name = entity
return self.entity_map.get(ent_name)
def create_pivot_func(
target_func: Callable[[Any], Any],
pivot_reg: PivotRegistration,
) -> Callable[..., pd.DataFrame]:
"""
Create function wrapper for pivot function.
Parameters
----------
target_func: Callable
The target function to wrap.
pivot_reg : PivotRegistration
The pivot function registration object.
Returns
-------
Callable[[Any], pd.DataFrame]
The original `target_func` wrapped in pre-processing
and post-processing code.
"""
@wraps(target_func)
def pivot_lookup(*args, **kwargs) -> pd.DataFrame:
"""
Lookup Pivot function from Entity or parameter values.
Parameters
----------
data: Union[str, List[str], pd.DataFrame]
Not used if querying the entity value itself
Returns
-------
pd.DataFrame
DataFrame of Pivot function results.
"""
# remove and save the join kw, if specified (so it doesn't interfere
# with other operations and doesn't get sent to the function)
join_type = kwargs.pop("join", None)
input_value = _get_input_value(*args, pivot_reg=pivot_reg, parent_kwargs=kwargs)
_check_valid_settings_for_input(input_value, pivot_reg)
# If the input_value is not a DF convert it into one and return the DF,
# the column with the input value(s) plus the param dict that we're going
# to send to the function. This is going to look like:
# {"data": input_df, "src_column": input_column}
input_df, input_column, param_dict = _create_input_df(
input_value, pivot_reg, parent_kwargs=kwargs
)
# Add any static parameters for the function to our params dict
param_dict.update(pivot_reg.func_static_params or {})
# Call the target function and collect the results
if pivot_reg.input_type == "value":
if not pivot_reg.can_iterate and len(input_df) > 1:
raise TypeError(
"The function does not support multiple input values.",
"Try again with a single row/value as input.",
"E.g. func(data=df.iloc[N], column=...)",
)
result_df = _iterate_func(
target_func, input_df, input_column, pivot_reg, **kwargs
)
else:
result_df = target_func(**param_dict, **kwargs) # type: ignore
merge_key = pivot_reg.func_out_column_name or input_column
# If requested to join to input
# and this function is returning a DataFrame
if join_type and not pivot_reg.return_raw_output:
return input_df.merge(
result_df,
left_on=input_column,
right_on=merge_key,
how=join_type,
)
return result_df
return pivot_lookup
def _get_entity_attr_or_self(obj, attrib):
"""Return entity attribute or obj if not an entity."""
if isinstance(obj, entities.Entity):
return getattr(obj, attrib)
return obj
def _get_input_value(
*args, pivot_reg: PivotRegistration, parent_kwargs: Dict[str, Any]
) -> Any:
"""Extract input value from args or kwargs."""
if args:
input_value = args[0]
else:
# Search possible input arg names
poss_args = [
arg
for arg in [
pivot_reg.func_df_param_name,
pivot_reg.func_input_value_arg,
"value",
"data",
"input",
]
if arg
]
for arg_name in poss_args:
input_value = parent_kwargs.pop(arg_name, None)
if input_value is not None:
break
else:
raise AttributeError(
"Required keyword argument not found.",
f"One of {', '.join(poss_args)} required.",
)
if isinstance(input_value, entities.Entity):
src_entity_attrib = pivot_reg.attr_for_entity(input_value)
input_value = _get_entity_attr_or_self(input_value, src_entity_attrib)
return input_value
def _check_valid_settings_for_input(input_value: Any, pivot_reg: PivotRegistration):
"""Check input against settings in `pivot_reg`."""
# Must have one of these specified
if not (pivot_reg.func_df_col_param_name or pivot_reg.func_input_value_arg):
raise ValueError(
"A value for one of 'func_df_col_param_name' ",
"or 'func_input_value_arg' must be given",
)
# If the function accepts only value type and cannot iterate. Make sure
# that the input_value is a simple value
if pivot_reg.input_type == "value":
if not pivot_reg.func_input_value_arg:
raise ValueError("No value for pivot func input argument was given")
if not pivot_reg.can_iterate and (
isinstance(input_value, pd.DataFrame)
or (
# pylint: disable=isinstance-second-argument-not-valid-type
isinstance(input_value, pd.DataFrame)
and not isinstance(input_value, str)
# pylint: enable=isinstance-second-argument-not-valid-type
)
):
raise ValueError(
f"This function does not accept inputs of {type(input_value)}"
)
def _arg_to_dframe(arg_val, col_name: str = "param_value"):
"""
Convert a scalar or Iterable value to a DataFrame.
Parameters
----------
arg_val: Any
The value to be converted
col_name: Optional[str]
The name to assign to the DataFrame column
Returns
-------
pd.DataFrame
The resulting DataFrame
Notes
-----
If `arg_val` is already a DataFrame it is returned as is.
"""
if isinstance(arg_val, pd.DataFrame):
return arg_val
if isinstance(arg_val, str) or not isinstance(arg_val, abc.Iterable):
return pd.DataFrame([arg_val], columns=[col_name])
return pd.DataFrame(arg_val, columns=[col_name])
def _create_input_df(input_value, pivot_reg, parent_kwargs):
"""Create input_df and params from input."""
# If input_value type is not already a dataframe, convert it.
# If the DF column param is specified, use that or fall back
# to using the function input value arg.
input_column = pivot_reg.func_df_col_param_name or pivot_reg.func_input_value_arg
# If input_value is already a DF, this call just returns the original DF
input_df = _arg_to_dframe(input_value, input_column) # type: ignore
if isinstance(input_value, pd.DataFrame):
# If the original input_value is a DataFrame
# try to find the column name specification in kwargs
for col_param in (
pivot_reg.func_df_col_param_name,
pivot_reg.func_input_value_arg,
*_DF_SRC_COL_PARAM_NAMES,
):
if col_param in parent_kwargs and parent_kwargs[col_param] in input_df:
input_column = parent_kwargs.pop(col_param)
break
else:
raise KeyError(
f"'{input_column}' is not in the input dataframe",
"Please specify the column when calling the function."
"You can use one of the parameter names for this:",
_DF_SRC_COL_PARAM_NAMES,
)
# we want to get rid of data=xyz parameters from kwargs, since we're adding them
# below
parent_kwargs.pop("data", None)
parent_kwargs.pop(pivot_reg.func_df_param_name, None)
if input_column not in input_df:
raise KeyError(f"'{input_column}' is not in the input dataframe")
if input_column:
param_dict = {
pivot_reg.func_df_param_name: input_df,
pivot_reg.func_df_col_param_name: input_column,
}
else:
# If no column was specified, the user will have to specify
# this in the call to the method - we just add the DF parameter
param_dict = {pivot_reg.func_df_param_name: input_df}
return input_df, input_column, param_dict
def _iterate_func(target_func, input_df, input_column, pivot_reg, **kwargs):
"""Call `target_func` function with values of each row in `input_df`."""
results = []
# Add any static parameters to all_rows_kwargs
all_rows_kwargs = kwargs.copy()
all_rows_kwargs.update((pivot_reg.func_static_params or {}))
res_key_col_name = pivot_reg.func_out_column_name or pivot_reg.func_input_value_arg
for row in input_df[[input_column]].itertuples(index=False):
# Get rid of any conflicting arguments from kwargs
func_kwargs = all_rows_kwargs.copy()
func_kwargs.pop(pivot_reg.func_input_value_arg, None)
# Create a param dictionary with the value parameter for this row
param_dict = {pivot_reg.func_input_value_arg: row[0]}
# run the function
result = target_func(**param_dict, **all_rows_kwargs)
# Process the output, if it is a DataFrame
if not pivot_reg.return_raw_output and not isinstance(result, pd.DataFrame):
col_value = next(iter(row._asdict().values()))
if isinstance(result, dict):
# if result is a dict - make that into a row.
result = pd.DataFrame(pd.Series(result)).T
result[res_key_col_name] = col_value
else:
# just make the result into a string and use that as a single col
result = pd.DataFrame(
[[col_value, str(result)]], columns=[res_key_col_name, "result"]
)
results.append(result)
if pivot_reg.return_raw_output:
if len(results) == 1:
return results[0]
return results
return pd.concat(results, ignore_index=True)
# _PARENT_SELF = "parent_self"
# def query_cont_member_wrap(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
# """
# Wrap a func to work as instance method in a QueryContainer.
# Parameters
# ----------
# func : Callable[[Any], Any]
# Function to wrap as method
# Returns
# -------
# Callable[[Any], Any]
# Wrapped function
# Notes
# -----
# This is designed to be used inside a `QueryContainer`. The wrapped
# function checks to see if its arg[0] is a QueryContainer - meaning
# it has been called as an instance function of that class.
# If so, and the parent class has a _parent_self attribute, it will
# replace the original arg[0] (the self of QueryContainer) with
# the self of the containing class (_parent_self).
# It relies containing class setting `_parent_self` as an attribute
# in any QueryContainer attributes that it has. The msticpy Entity
# class does this.
# If these conditions don't apply it simply passed through the call
# to the original function.
# See Also
# --------
# QueryContainer
# Entity
# """
# @wraps(func)
# def _wrapped_member(*args, **kwargs):
# if (
# args
# and args[0].__class__.__name__ == "QueryContainer"
# and hasattr(args[0], _PARENT_SELF)
# ):
# parent_self = getattr(args[0], _PARENT_SELF)
# return func(parent_self, *args[1:], **kwargs)
# return func(*args, **kwargs)
# return _wrapped_member
|
py | 1a3b0e72581b1472690738c9095c43f384fd97fc | from scipy.spatial import distance as dist
from imutils import face_utils
import threading
import cv2
import imutils
EYE_AR_THRESH = 0.2
EYE_AR_CONSEC_FRAMES = 48
class FatigueBackgroundWorker:
def __init__(self, vs, predictor, detector):
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.vs = vs
self.predictor = predictor
self.detector = detector
self.drowsinessDetected = False
def start(self):
self.thread.start()
def get_result(self):
return self.drowsinessDetected
def calculate_landmarks(self):
# Calculate face landmarks
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
return lStart, lEnd, rStart, rEnd
def eye_aspect_ratio(self, eye):
# Calculate euclidean distance between the two sets of vertical eye landmarks (x y coordinates)
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# Calculate euclidean distance between horizontal eye landmarks
C = dist.euclidean(eye[0], eye[3])
# Calculate and return eye aspect ratio (ear)
return (A + B) / (2.0 * C)
def calculate_ear(self, shape, lStart, lEnd, rStart, rEnd):
# Detect eye positions for ear calculation
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = self.eye_aspect_ratio(leftEye)
rightEAR = self.eye_aspect_ratio(rightEye)
# Calculate average EAR for both eyes
return (leftEAR + rightEAR) / 2
def run(self):
(lStart, lEnd, rStart, rEnd) = self.calculate_landmarks()
counter = 0
while True:
frame = self.vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.detector(gray, 0)
for face in faces:
shape = self.predictor(gray, face)
shape = face_utils.shape_to_np(shape)
ear = self.calculate_ear(shape, lStart, lEnd, rStart, rEnd)
# Check if EAR is lower than treshold value
if ear < EYE_AR_THRESH:
counter += 1
# If eyes were closed for a sufficient number of frames, drowsiness is detected
if counter >= EYE_AR_CONSEC_FRAMES:
self.drowsinessDetected = True
else:
self.drowsinessDetected = False
|
py | 1a3b0ef74646576d718a57cd3390b5679f19c434 | """Some commonly used functions, like helpers"""
import lvgl as lv
import qrcode
import math
from micropython import const
import gc
from .components import QRCode, styles
PADDING = const(20)
BTN_HEIGHT = const(70)
HOR_RES = const(480)
VER_RES = const(800)
QR_PADDING = const(40)
def init_styles(dark=True):
if dark:
# Set theme
th = lv.theme_night_init(210, lv.font_roboto_22)
# adjusting theme
# background color
cbg = lv.color_hex(0x192432)
# ctxt = lv.color_hex(0x7f8fa4)
ctxt = lv.color_hex(0xffffff)
cbtnrel = lv.color_hex(0x506072)
cbtnpr = lv.color_hex(0x405062)
chl = lv.color_hex(0x313E50)
else:
# Set theme to light
# TODO: work in progress...
th = lv.theme_material_init(210, lv.font_roboto_22)
# adjusting theme
# background color
cbg = lv.color_hex(0xeeeeee)
# ctxt = lv.color_hex(0x7f8fa4)
ctxt = lv.color_hex(0)
cbtnrel = lv.color_hex(0x506072)
cbtnpr = lv.color_hex(0x405062)
chl = lv.color_hex(0x313E50)
th.style.label.sec.text.color = cbtnrel
th.style.scr.body.main_color = cbg
th.style.scr.body.grad_color = cbg
# text color
th.style.scr.text.color = ctxt
# buttons
# btn released
th.style.btn.rel.body.main_color = cbtnrel
th.style.btn.rel.body.grad_color = cbtnrel
th.style.btn.rel.body.shadow.width = 0
th.style.btn.rel.body.border.width = 0
th.style.btn.rel.body.radius = 10
# btn pressed
lv.style_copy(th.style.btn.pr, th.style.btn.rel)
th.style.btn.pr.body.main_color = cbtnpr
th.style.btn.pr.body.grad_color = cbtnpr
# button map released
th.style.btnm.btn.rel.body.main_color = cbg
th.style.btnm.btn.rel.body.grad_color = cbg
th.style.btnm.btn.rel.body.radius = 0
th.style.btnm.btn.rel.body.border.width = 0
th.style.btnm.btn.rel.body.shadow.width = 0
th.style.btnm.btn.rel.text.color = ctxt
# button map pressed
lv.style_copy(th.style.btnm.btn.pr, th.style.btnm.btn.rel)
th.style.btnm.btn.pr.body.main_color = chl
th.style.btnm.btn.pr.body.grad_color = chl
# button map inactive
lv.style_copy(th.style.btnm.btn.ina, th.style.btnm.btn.rel)
th.style.btnm.btn.ina.text.opa = 80
# button map background
th.style.btnm.bg.body.opa = 0
th.style.btnm.bg.body.border.width = 0
th.style.btnm.bg.body.shadow.width = 0
# textarea
th.style.ta.oneline.body.opa = 0
th.style.ta.oneline.body.border.width = 0
th.style.ta.oneline.text.font = lv.font_roboto_28
th.style.ta.oneline.text.color = ctxt
# slider
th.style.slider.knob.body.main_color = cbtnrel
th.style.slider.knob.body.grad_color = cbtnrel
th.style.slider.knob.body.radius = 5
th.style.slider.knob.body.border.width = 0
# page
th.style.page.bg.body.opa = 0
th.style.page.scrl.body.opa = 0
th.style.page.bg.body.border.width = 0
th.style.page.bg.body.padding.left = 0
th.style.page.bg.body.padding.right = 0
th.style.page.bg.body.padding.top = 0
th.style.page.bg.body.padding.bottom = 0
th.style.page.scrl.body.border.width = 0
th.style.page.scrl.body.padding.left = 0
th.style.page.scrl.body.padding.right = 0
th.style.page.scrl.body.padding.top = 0
th.style.page.scrl.body.padding.bottom = 0
lv.theme_set_current(th)
styles["theme"] = th
# Title style - just a default style with larger font
styles["title"] = lv.style_t()
lv.style_copy(styles["title"], th.style.label.prim)
styles["title"].text.font = lv.font_roboto_28
styles["title"].text.color = ctxt
styles["hint"] = lv.style_t()
lv.style_copy(styles["hint"], th.style.label.sec)
styles["hint"].text.font = lv.font_roboto_16
def add_label(text, y=PADDING, scr=None, style=None, width=None):
"""Helper functions that creates a title-styled label"""
if width is None:
width = HOR_RES-2*PADDING
if scr is None:
scr = lv.scr_act()
lbl = lv.label(scr)
lbl.set_text(text)
if style in styles:
lbl.set_style(0, styles[style])
lbl.set_long_mode(lv.label.LONG.BREAK)
lbl.set_width(width)
lbl.set_x((HOR_RES-width)//2)
lbl.set_align(lv.label.ALIGN.CENTER)
lbl.set_y(y)
return lbl
def add_button(text=None, callback=None, scr=None, y=700):
"""Helper function that creates a button with a text label"""
if scr is None:
scr = lv.scr_act()
btn = lv.btn(scr)
btn.set_width(HOR_RES-2*PADDING)
btn.set_height(BTN_HEIGHT)
if text is not None:
lbl = lv.label(btn)
lbl.set_text(text)
lbl.set_align(lv.label.ALIGN.CENTER)
btn.align(scr, lv.ALIGN.IN_TOP_MID, 0, 0)
btn.set_y(y)
if callback is not None:
btn.set_event_cb(callback)
return btn
def add_button_pair(text1, callback1, text2, callback2, scr=None, y=700):
"""Helper function that creates a button with a text label"""
btn1 = add_button(text1, callback1, scr=scr, y=y)
btn2 = add_button(text2, callback2, scr=scr, y=y)
align_button_pair(btn1, btn2)
return btn1, btn2
def align_button_pair(btn1, btn2):
"""Aligns two buttons in a row"""
w = (HOR_RES-3*PADDING)//2
btn1.set_width(w)
btn2.set_width(w)
btn2.set_x(HOR_RES//2+PADDING//2)
def add_qrcode(text, y=QR_PADDING, scr=None, style=None, width=None):
"""Helper functions that creates a title-styled label"""
if scr is None:
scr = lv.scr_act()
if width is None:
width = 350
qr = QRCode(scr)
qr.set_text(text)
qr.set_size(width)
qr.set_text(text)
qr.align(scr, lv.ALIGN.IN_TOP_MID, 0, y)
return qr
def separate(addr, letters=6, separator=" "):
extra = ""
if len(addr) % letters > 0:
extra = " "*(letters-(len(addr) % letters))
return separator.join([
addr[i:i+letters]
for i in range(0, len(addr), letters)
])+extra
def format_addr(addr, letters=6, words=3):
return separate(
separate(
addr, letters=letters, separator=" "
),
letters=(words*(letters+1)), separator="\n"
)
|
py | 1a3b1023f814c75cd2e0c6dbdd9626f37201be83 | from functools import lru_cache, singledispatch
from typing import Any, Callable, List, Tuple, Union
import attr
@attr.s
class _DispatchNotFound:
"""A dummy object to help signify a dispatch not found."""
pass
class MultiStrategyDispatch:
"""
MultiStrategyDispatch uses a combination of exact-match dispatch,
singledispatch, and FunctionDispatch.
"""
__slots__ = (
"_direct_dispatch",
"_function_dispatch",
"_single_dispatch",
"_generators",
"dispatch",
)
def __init__(self, fallback_func):
self._direct_dispatch = {}
self._function_dispatch = FunctionDispatch()
self._function_dispatch.register(lambda _: True, fallback_func)
self._single_dispatch = singledispatch(_DispatchNotFound)
self.dispatch = lru_cache(maxsize=None)(self._dispatch)
def _dispatch(self, cl):
try:
dispatch = self._single_dispatch.dispatch(cl)
if dispatch is not _DispatchNotFound:
return dispatch
except Exception:
pass
direct_dispatch = self._direct_dispatch.get(cl)
if direct_dispatch is not None:
return direct_dispatch
return self._function_dispatch.dispatch(cl)
def register_cls_list(self, cls_and_handler, direct: bool = False):
"""register a class to direct or singledispatch"""
for cls, handler in cls_and_handler:
if direct:
self._direct_dispatch[cls] = handler
else:
self._single_dispatch.register(cls, handler)
self.clear_direct()
self.dispatch.cache_clear()
def register_func_list(
self,
func_and_handler: List[
Union[
Tuple[Callable[[Any], bool], Any],
Tuple[Callable[[Any], bool], Any, bool],
]
],
):
"""register a function to determine if the handle
should be used for the type
"""
for tup in func_and_handler:
if len(tup) == 2:
func, handler = tup
self._function_dispatch.register(func, handler)
else:
func, handler, is_gen = tup
self._function_dispatch.register(
func, handler, is_generator=is_gen
)
self.clear_direct()
self.dispatch.cache_clear()
def clear_direct(self):
"""Clear the direct dispatch."""
self._direct_dispatch.clear()
class FunctionDispatch:
"""
FunctionDispatch is similar to functools.singledispatch, but
instead dispatches based on functions that take the type of the
first argument in the method, and return True or False.
objects that help determine dispatch should be instantiated objects.
"""
__slots__ = ("_handler_pairs",)
def __init__(self):
self._handler_pairs = []
def register(
self, can_handle: Callable[[Any], bool], func, is_generator=False
):
self._handler_pairs.insert(0, (can_handle, func, is_generator))
def dispatch(self, typ):
"""
returns the appropriate handler, for the object passed.
"""
for can_handle, handler, is_generator in self._handler_pairs:
# can handle could raise an exception here
# such as issubclass being called on an instance.
# it's easier to just ignore that case.
try:
ch = can_handle(typ)
except Exception:
continue
if ch:
if is_generator:
return handler(typ)
else:
return handler
raise KeyError("unable to find handler for {0}".format(typ))
|
py | 1a3b10901d71eab92b159e40db1c18f76ea0aea1 | #!/usr/bin/env python3
# Quantopian, Inc. licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Try to identify a running sshd process in two ways: (1) look for a process
# with the name "sshd" or "in.sshd"; (2) look for a process listening on port
# 22. If neither of those is found, then assume that sshd is stopped.
#
# Separately, check the sshd configuration for password-based logins by
# invoking sshd with the "-T" argument to get its effective configuration. If
# we were able to identify a running sshd process as described above, then use
# that process to determine the path to the sshd binary and/or the non-default
# configuration file name. Otherwise, just do "sshd -T", hoping that it'll be
# in our search path.
import json
import psutil
import re
import subprocess
sshd_name_re = re.compile(r'\bsshd\b|\bin\.sshd\b')
results = {}
try:
sshd_process = None
for p in psutil.process_iter():
try:
if sshd_name_re.search(p.exe()) or \
any(c for c in p.connections('tcp')
if c.laddr[1] == 22 and not len(c.raddr)):
sshd_process = p.as_dict(attrs=('exe', 'cmdline'))
break
except (FileNotFoundError, psutil.NoSuchProcess):
continue
else:
raise StopIteration()
except StopIteration:
sshd_process = None
results['status'] = 'stopped'
sshd_config_command = ['sshd', '-T']
else:
results['status'] = 'running'
sshd_config_command = [sshd_process['exe'], '-T']
sshd_cmdline = sshd_process['cmdline']
try:
sshd_config_file = sshd_cmdline[sshd_cmdline.index('-f') + 1]
sshd_config_command.extend(['-f', sshd_config_file])
except:
pass
try:
sshd_config = subprocess.check_output(
sshd_config_command, stderr=open('/dev/null', 'w')).decode('utf8')
except FileNotFoundError:
if not sshd_process:
results['status'] = 'missing'
sshd_config = ''
except:
sshd_config = ''
results['config'] = {}
if sshd_config:
for config in sshd_config.strip().split('\n'):
key, value = config.split(' ', 1)
results['config'][key] = value
print(json.dumps(results))
|
py | 1a3b115a613f8fc2347d11daec3499024231ae60 | #!/usr/bin/env python3
"""Mininet tests for FAUCET."""
# pylint: disable=too-many-lines
# pylint: disable=missing-docstring
# pylint: disable=too-many-arguments
# pylint: disable=unbalanced-tuple-unpacking
import binascii
import collections
import copy
import itertools
import ipaddress
import json
import os
import random
import re
import shutil
import socket
import threading
import time
import unittest
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
import scapy.all
import yaml # pytype: disable=pyi-error
from mininet.log import error
from mininet.util import pmonitor
from clib import mininet_test_base
from clib import mininet_test_util
from clib import mininet_test_topo
from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH
MIN_MBPS = 100
CONFIG_BOILER_UNTAGGED = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
CONFIG_TAGGED_BOILER = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
class QuietHTTPServer(HTTPServer):
allow_reuse_address = True
timeout = None
@staticmethod
def handle_error(_request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
@staticmethod
def log_message(_format, *_args):
return
def _log_post(self):
content_len = int(self.headers.get('content-length', 0))
content = self.rfile.read(content_len).decode().strip()
if content and hasattr(self.server, 'influx_log'):
with open(self.server.influx_log, 'a') as influx_log:
influx_log.write(content + '\n')
class InfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
return self.send_response(204)
class SlowInfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
time.sleep(self.server.timeout * 3)
return self.send_response(500)
class FaucetTest(mininet_test_base.FaucetTestBase):
pass
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
HOST_NAMESPACE = {}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
EVENT_SOCK_HEARTBEAT = '5'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
# pylint: disable=invalid-name
CONFIG = CONFIG_BOILER_UNTAGGED
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
def verify_events_log(self, event_log, timeout=10):
required_events = {'CONFIG_CHANGE', 'PORT_CHANGE', 'L2_LEARN', 'PORTS_STATUS', 'EVENT_SOCK_HEARTBEAT'}
for _ in range(timeout):
prom_event_id = self.scrape_prometheus_var('faucet_event_id', dpid=False)
event_id = None
with open(event_log, 'r') as event_log_file:
for event_log_line in event_log_file.readlines():
event = json.loads(event_log_line.strip())
event_id = event['event_id']
required_events -= set(event.keys())
if prom_event_id == event_id:
return
time.sleep(1)
self.assertEqual(prom_event_id, event_id)
self.assertFalse(required_events)
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self._enable_event_log()
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.verify_traveling_dhcp_mac()
self.gauge_smoke_test()
self.prometheus_smoke_test()
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_events_log(self.event_log)
class Faucet8021XBaseTest(FaucetTest):
HOST_NAMESPACE = {3: False}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
RADIUS_PORT = None
DOT1X_EXPECTED_EVENTS = []
SESSION_TIMEOUT = 3600
LOG_LEVEL = 'DEBUG'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="microphone"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="admin"
password="megaphone"
}
"""
freeradius_user_conf = """user Cleartext-Password := "microphone"
Session-timeout = {0}
admin Cleartext-Password := "megaphone"
Session-timeout = {0}
vlanuser1001 Cleartext-Password := "password"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan1"
vlanuser2222 Cleartext-Password := "milliphone"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan2"
filter_id_user_accept Cleartext-Password := "accept_pass"
Filter-Id = "accept_acl"
filter_id_user_deny Cleartext-Password := "deny_pass"
Filter-Id = "deny_acl"
"""
eapol1_host = None
eapol2_host = None
ping_host = None
nfv_host = None
nfv_intf = None
nfv_portno = None
@staticmethod
def _priv_mac(host_id):
two_byte_port_num = '%04x' % host_id
two_byte_port_num_formatted = ':'.join((two_byte_port_num[:2], two_byte_port_num[2:]))
return '00:00:00:00:%s' % two_byte_port_num_formatted
def _init_faucet_config(self):
self.eapol1_host, self.eapol2_host, self.ping_host, self.nfv_host = self.hosts_name_ordered()
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(self.nfv_host)[0]
nfv_intf = [
intf for intf in last_host_switch_link if intf in switch.intfList()][0]
self.nfv_intf = str(nfv_intf)
nfv_intf = self.nfv_host.intf()
self.RADIUS_PORT = mininet_test_util.find_free_udp_port(self.ports_sock, self._test_name())
self.CONFIG = self.CONFIG.replace('NFV_INTF', str(nfv_intf))
self.CONFIG = self.CONFIG.replace('RADIUS_PORT', str(self.RADIUS_PORT))
super(Faucet8021XBaseTest, self)._init_faucet_config()
def setUp(self):
super(Faucet8021XBaseTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
self.nfv_portno = self.port_map['port_4']
self.host_drop_all_ips(self.nfv_host)
self.nfv_pids = []
tcpdump_args = '-e -n -U'
self.eapol1_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -w %s/%s-start.pcap %s ether proto 0x888e &' % (
self.tmpdir, self.eapol1_host.name, tcpdump_args), 300))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i %s-eth0 -w %s/eap-lo.pcap %s ether proto 0x888e &' % (
self.nfv_host.name, self.tmpdir, tcpdump_args), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i lo -w %s/radius.pcap %s udp port %d &' % (
self.tmpdir, tcpdump_args, self.RADIUS_PORT), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.radius_log_path = self.start_freeradius()
self.nfv_pids.append(int(self.nfv_host.lastPid))
self._enable_event_log(300)
def tearDown(self, ignore_oferrors=False):
for pid in self.nfv_pids:
self.nfv_host.cmd('kill %u' % pid)
super(Faucet8021XBaseTest, self).tearDown(ignore_oferrors=ignore_oferrors)
def post_test_checks(self):
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_dot1x_events_log()
def verify_dot1x_events_log(self):
def replace_mac(host_no):
replacement_macs = {
'HOST1_MAC': self.eapol1_host.MAC(),
'HOST2_MAC': self.eapol2_host.MAC(),
'HOST3_MAC': self.ping_host.MAC(),
'HOST4_MAC': self.nfv_host.MAC(),
}
return replacement_macs.get(host_no, None)
def insert_dynamic_values(dot1x_expected_events):
for dot1x_event in dot1x_expected_events:
top_level_key = list(dot1x_event.keys())[0]
dot1x_params = {'dp_id': int(self.dpid)}
for key, val in dot1x_event[top_level_key].items():
if key == 'port':
dot1x_params[key] = self.port_map[val]
elif key == 'eth_src':
dot1x_params[key] = replace_mac(val)
dot1x_event[top_level_key].update(dot1x_params)
if not self.DOT1X_EXPECTED_EVENTS:
return
dot1x_expected_events = copy.deepcopy(self.DOT1X_EXPECTED_EVENTS)
insert_dynamic_values(dot1x_expected_events)
with open(self.event_log, 'r') as event_file:
events_that_happened = []
for event_log_line in event_file.readlines():
if 'DOT1X' not in event_log_line:
continue
event = json.loads(event_log_line.strip())
events_that_happened.append(event['DOT1X'])
for expected_event in dot1x_expected_events:
self.assertTrue(expected_event in events_that_happened,
msg='expected event: {} not in events_that_happened {}'.format(
expected_event, events_that_happened))
def try_8021x(self, host, port_num, conf, and_logoff=False, terminate_wpasupplicant=False,
wpasup_timeout=180, tcpdump_timeout=15, tcpdump_packets=10,
expect_success=True):
if expect_success:
self.wait_8021x_flows(port_num)
port_labels = self.port_labels(port_num)
success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [
lambda: self.wpa_supplicant_callback(
host, port_num, conf, and_logoff,
timeout=wpasup_timeout,
terminate_wpasupplicant=terminate_wpasupplicant)],
timeout=tcpdump_timeout, vflags='-vvv', packets=tcpdump_packets)
if expect_success:
self.wait_for_eap_success(host, self.get_wpa_ctrl_path(host))
if not and_logoff:
self.wait_8021x_success_flows(host, port_num)
success = 'Success' in tcpdump_txt
new_success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
new_failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
new_logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
new_dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
new_dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
new_dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
if expect_success != success:
return False
if expect_success and success:
self.assertGreater(new_success_total, success_total)
self.assertGreater(new_dp_success_total, dp_success_total)
self.assertEqual(failure_total, new_failure_total)
self.assertEqual(dp_failure_total, new_dp_failure_total)
logoff = 'logoff' in tcpdump_txt
if logoff != and_logoff:
return False
if and_logoff:
self.assertGreater(new_logoff_total, logoff_total)
return True
self.assertEqual(logoff_total, new_logoff_total)
self.assertEqual(dp_logoff_total, new_dp_logoff_total)
self.assertEqual(dp_success_total, new_dp_success_total)
self.assertGreaterEqual(new_failure_total, failure_total)
self.assertGreaterEqual(new_dp_failure_total, dp_failure_total)
return False
def retry_8021x(self, host, port_num, conf, and_logoff=False, retries=2, expect_success=True):
for _ in range(retries):
if self.try_8021x(host, port_num, conf, and_logoff, expect_success=expect_success):
return True
time.sleep(1)
return False
def wait_8021x_flows(self, port_no):
port_actions = [
'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), 'OUTPUT:%u' % self.nfv_portno]
from_nfv_actions = [
'SET_FIELD: {eth_src:01:80:c2:00:00:03}', 'OUTPUT:%d' % port_no]
from_nfv_match = {
'in_port': self.nfv_portno, 'dl_src': self._priv_mac(port_no), 'dl_type': 0x888e}
self.wait_until_matching_flow(None, table_id=0, actions=port_actions)
self.wait_until_matching_flow(from_nfv_match, table_id=0, actions=from_nfv_actions)
def wait_8021x_success_flows(self, host, port_no):
from_host_actions = [
'GOTO_TABLE:1']
from_host_match = {
'in_port': port_no, 'dl_src': host.MAC()}
self.wait_until_matching_flow(from_host_match, table_id=0, actions=from_host_actions)
def verify_host_success(self, eapol_host, port_no, wpasupplicant_conf, and_logoff):
self.one_ipv4_ping(
eapol_host, self.ping_host.IP(), require_host_learned=False, expected_result=False)
self.assertTrue(
self.try_8021x(
eapol_host, port_no, wpasupplicant_conf, and_logoff=and_logoff))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=True)
def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, terminate_wpasupplicant=False):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
if os.path.exists(wpa_ctrl_path):
self.terminate_wpasupplicant(host)
for pid in host.cmd('lsof -t %s' % wpa_ctrl_path).splitlines():
try:
os.kill(int(pid), 15)
except (ValueError, ProcessLookupError):
pass
try:
shutil.rmtree(wpa_ctrl_path)
except FileNotFoundError:
pass
log_prefix = host.name + '_'
self.start_wpasupplicant(
host, conf, timeout=timeout,
wpa_ctrl_socket_path=wpa_ctrl_path, log_prefix=log_prefix)
if and_logoff:
self.wait_for_eap_success(host, wpa_ctrl_path)
self.wait_until_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(), require_host_learned=False)
host.cmd('wpa_cli -p %s logoff' % wpa_ctrl_path)
self.wait_until_no_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
if terminate_wpasupplicant:
self.terminate_wpasupplicant(host)
def terminate_wpasupplicant(self, host):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
host.cmd('wpa_cli -p %s terminate' % wpa_ctrl_path)
def get_wpa_ctrl_path(self, host):
wpa_ctrl_path = os.path.join(
self.tmpdir, '%s/%s-wpasupplicant' % (self.tmpdir, host.name))
return wpa_ctrl_path
@staticmethod
def get_wpa_status(host, wpa_ctrl_path):
status = host.cmdPrint('wpa_cli -p %s status' % wpa_ctrl_path)
for line in status.splitlines():
if line.startswith('EAP state'):
return line.split('=')[1].strip()
return None
def wait_for_eap_success(self, host, wpa_ctrl_path, timeout=5):
for _ in range(timeout):
eap_state = self.get_wpa_status(host, wpa_ctrl_path)
if eap_state == 'SUCCESS':
return
time.sleep(1)
self.fail('did not get EAP success: %s' % eap_state)
def wait_for_radius(self, radius_log_path):
self.wait_until_matching_lines_from_file(
r'.*Ready to process requests', radius_log_path)
def start_freeradius(self):
radius_log_path = '%s/radius.log' % self.tmpdir
listen_match = r'(listen {[^}]*(limit {[^}]*})[^}]*})|(listen {[^}]*})'
listen_config = """listen {
type = auth
ipaddr = *
port = %s
}
listen {
type = acct
ipaddr = *
port = %d
}""" % (self.RADIUS_PORT, self.RADIUS_PORT + 1)
if os.path.isfile('/etc/freeradius/users'):
# Assume we are dealing with freeradius 2 configuration
shutil.copytree('/etc/freeradius/', '%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/users' % self.tmpdir
with open('%s/freeradius/radiusd.conf' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(listen_match, '', default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.write(listen_config)
default_site.truncate()
else:
# Assume we are dealing with freeradius >=3 configuration
freerad_version = os.popen(
r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip()
freerad_major_version = freerad_version.split(' ')[1]
shutil.copytree('/etc/freeradius/%s/' % freerad_major_version,
'%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/mods-config/files/authorize' % self.tmpdir
with open('%s/freeradius/sites-enabled/default' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(
listen_match, '', default_config)
default_config = re.sub(
r'server default {', 'server default {\n'+listen_config, default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.truncate()
with open(users_path, 'w') as users_file:
users_file.write(self.freeradius_user_conf.format(self.SESSION_TIMEOUT))
with open('%s/freeradius/clients.conf' % self.tmpdir, 'w') as clients:
clients.write("""client localhost {
ipaddr = 127.0.0.1
secret = SECRET
}""")
with open('%s/freeradius/sites-enabled/inner-tunnel' % self.tmpdir, 'r+') as innertunnel_site:
tunnel_config = innertunnel_site.read()
listen_config = """listen {
ipaddr = 127.0.0.1
port = %d
type = auth
}""" % (self.RADIUS_PORT + 2)
tunnel_config = re.sub(listen_match, listen_config, tunnel_config)
innertunnel_site.seek(0)
innertunnel_site.write(tunnel_config)
innertunnel_site.truncate()
os.system('chmod o+rx %s' % self.root_tmpdir)
os.system('chown -R root:freerad %s/freeradius/' % self.tmpdir)
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'freeradius -X -l %s -d %s/freeradius &' % (radius_log_path, self.tmpdir),
300))
self.wait_for_radius(radius_log_path)
return radius_log_path
class Faucet8021XSuccessTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'logoff'}}]
SESSION_TIMEOUT = 3600
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.verify_host_success(
self.eapol2_host, self.port_map['port_2'], self.wpasupplicant_conf_1, True)
self.post_test_checks()
class Faucet8021XFailureTest(Faucet8021XBaseTest):
"""Failure due to incorrect identity/password"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="wrongpassword"
}
"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'failure'}}]
def test_untagged(self):
self.assertFalse(
self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.post_test_checks()
class Faucet8021XPortStatusTest(Faucet8021XBaseTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}}]
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no4 = self.port_map['port_4']
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no1)
# self.wait_until_no_matching_flow(None, table_id=0, actions=actions)
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no4)
# self.wait_until_no_matching_flow(match, table_id=0, actions=actions)
self.set_port_up(port_no4)
self.wait_8021x_flows(port_no1)
# check only have rules for port 2 installed, after the NFV port comes up
self.set_port_down(port_no1)
self.flap_port(port_no4)
self.wait_8021x_flows(port_no2)
# no portno1
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
# When the port goes down, and up the host should not be authenticated anymore.
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False)
# terminate so don't automatically reauthenticate when port goes back up.
self.terminate_wpasupplicant(self.eapol1_host)
self.flap_port(port_no1)
self.wait_8021x_flows(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XPortFlapTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
for _ in range(2):
self.set_port_up(port_no1)
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.set_port_down(port_no1)
self.assertFalse(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
wpa_status = self.get_wpa_status(
self.eapol1_host, self.get_wpa_ctrl_path(self.eapol1_host))
self.assertNotEqual('SUCCESS', wpa_status)
# Kill supplicant so cant reply to the port up identity request.
self.terminate_wpasupplicant(self.eapol1_host)
self.post_test_checks()
class Faucet8021XIdentityOnPortUpTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
# start wpa sup, logon, then send id request. should then be 2 success.
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False,
tcpdump_timeout=180, tcpdump_packets=6))
self.set_port_down(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
def port_up(port):
self.set_port_up(port)
self.wait_8021x_flows(port)
tcpdump_filter = 'ether proto 0x888e'
tcpdump_txt = self.tcpdump_helper(
self.eapol1_host, tcpdump_filter, [
lambda: port_up(port_no1)],
timeout=80, vflags='-vvv', packets=10)
for req_str in (
'len 5, Request (1)', # assume that this is the identity request
'Identity: user', # supplicant replies with username
'Success', # supplicant success
):
self.assertTrue(req_str in tcpdump_txt)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True, retries=10)
self.post_test_checks()
class Faucet8021XPeriodicReauthTest(Faucet8021XBaseTest):
SESSION_TIMEOUT = 15
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_labels1 = self.port_labels(port_no1)
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
last_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
for _ in range(4):
for _ in range(self.SESSION_TIMEOUT * 2):
total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
if total > last_total:
break
time.sleep(1)
self.assertGreater(total, last_total, msg='failed to successfully re-auth')
last_total = total
self.post_test_checks()
class Faucet8021XConfigReloadTest(Faucet8021XBaseTest):
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.wait_8021x_flows(port_no1)
self.wait_8021x_flows(port_no2)
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][port_no1]['dot1x'] = False
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.wait_8021x_flows(port_no2)
self.post_test_checks()
class Faucet8021XCustomACLLoginTest(Faucet8021XBaseTest):
"""Ensure that 8021X Port ACLs Work before and after Login"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
auth_acl:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
noauth_acl:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
auth_acl: auth_acl
noauth_acl: noauth_acl
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.post_test_checks()
class Faucet8021XCustomACLLogoutTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
def test_untagged(self):
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XMABTest(Faucet8021XSuccessTest):
"""Ensure that 802.1x Port Supports Mac Auth Bypass."""
DOT1X_EXPECTED_EVENTS = [{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC',
'status': 'success'}},
]
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_mab: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
def start_freeradius(self):
# Add the host mac address to the FreeRADIUS config
self.freeradius_user_conf += '\n{0} Cleartext-Password := "{0}"'.format(
str(self.eapol1_host.MAC()).replace(':', '')
)
return super(Faucet8021XMABTest, self).start_freeradius()
@staticmethod
def dhclient_callback(host, timeout):
dhclient_cmd = 'dhclient -d -1 %s' % host.defaultIntf()
return host.cmd(mininet_test_util.timeout_cmd(dhclient_cmd, timeout), verbose=True)
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.dhclient_callback(self.eapol1_host, 10)
self.wait_until_matching_lines_from_file(r'.*AAA_SUCCESS.*', self.env['faucet']['FAUCET_LOG'])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertEqual(
1,
self.scrape_prometheus_var('port_dot1x_success_total', labels=self.port_labels(port_no1), default=0))
self.post_test_checks()
class Faucet8021XDynACLLoginTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
]
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_accept"
password="accept_pass"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_deny"
password="deny_pass"
}
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
accept_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
deny_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_dyn_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_dyn_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XDynACLLogoutTest(Faucet8021XDynACLLoginTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'logoff'}}
]
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XVLANTest(Faucet8021XSuccessTest):
"""Test that two hosts are put into vlans.
Same VLAN, Logoff, diff VLANs, port flap."""
CONFIG_GLOBAL = """vlans:
100:
vid: 100
description: "untagged"
radiusassignedvlan1:
vid: %u
description: "untagged"
dot1x_assigned: True
radiusassignedvlan2:
vid: %u
description: "untagged"
dot1x_assigned: True
""" % (mininet_test_base.MAX_TEST_VID - 1,
mininet_test_base.MAX_TEST_VID)
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: radiusassignedvlan1
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
RADIUS_PORT = 1940
DOT1X_EXPECTED_EVENTS = []
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser1001"
password="password"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser2222"
password="milliphone"
}
"""
def test_untagged(self):
vid = 100 ^ mininet_test_base.OFPVID_PRESENT
radius_vid1 = (mininet_test_base.MAX_TEST_VID - 1) ^ mininet_test_base.OFPVID_PRESENT
radius_vid2 = mininet_test_base.MAX_TEST_VID ^ mininet_test_base.OFPVID_PRESENT
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no3 = self.port_map['port_3']
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
# check two 1x hosts play nicely. (same dyn vlan)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=True)
# check two 1x hosts dont play (diff dyn vlan).
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=False)
# move host1 to new VLAN
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=True)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_SRC_TABLE)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': vid},
table_id=self._ETH_DST_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE)
self.wait_until_matching_flow(
{'eth_dst': self.eapol1_host.MAC(),
'vlan_vid': radius_vid2},
table_id=self._ETH_DST_TABLE)
# test port up/down. removes the dynamic vlan & host cache.
self.flap_port(port_no2)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol2_host.MAC()},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol2_host.MAC(),
'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid2])
self.wait_until_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.post_test_checks()
class FaucetUntaggedRandomVidTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
randvlan:
vid: 100
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: randvlan
%(port_2)d:
native_vlan: randvlan
%(port_3)d:
native_vlan: randvlan
%(port_4)d:
native_vlan: randvlan
"""
def test_untagged(self):
last_vid = None
for _ in range(5):
vid = random.randint(2, mininet_test_base.MAX_TEST_VID)
if vid == last_vid:
continue
self.change_vlan_config(
'randvlan', 'vid', vid, cold_start=True, hup=True)
self.ping_all_when_learned()
last_vid = vid
class FaucetUntaggedNoCombinatorialFlood(FaucetUntaggedTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedControllerNfvTest(FaucetUntaggedTest):
# Name of switch interface connected to last host, accessible to controller.
last_host_switch_intf = None
def _init_faucet_config(self):
last_host = self.hosts_name_ordered()[-1]
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(last_host)[0]
self.last_host_switch_intf = [intf for intf in last_host_switch_link if intf in switch.intfList()][0]
# Now that interface is known, FAUCET config can be written to include it.
super(FaucetUntaggedControllerNfvTest, self)._init_faucet_config()
def test_untagged(self):
super(FaucetUntaggedControllerNfvTest, self).test_untagged()
# Confirm controller can see switch interface with traffic.
ifconfig_output = self.net.controllers[0].cmd('ifconfig %s' % self.last_host_switch_intf)
self.assertTrue(
re.search('(R|T)X packets[: ][1-9]', ifconfig_output),
msg=ifconfig_output)
class FaucetUntaggedBroadcastTest(FaucetUntaggedTest):
def test_untagged(self):
super(FaucetUntaggedBroadcastTest, self).test_untagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
self.verify_unicast_not_looped()
class FaucetUntaggedNSLoopTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
nsonly:
- rule:
dl_type: %u
ip_proto: 58
icmpv6_type: 135
actions:
allow: 1
- rule:
actions:
allow: 0
vlans:
100:
description: "untagged"
""" % IPV6_ETH
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: nsonly
%(port_2)d:
native_vlan: 100
acl_in: nsonly
%(port_3)d:
native_vlan: 100
acl_in: nsonly
%(port_4)d:
native_vlan: 100
acl_in: nsonly
"""
def test_untagged(self):
self.verify_no_bcast_to_self()
class FaucetUntaggedNoCombinatorialBroadcastTest(FaucetUntaggedBroadcastTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetExperimentalAPITest(FaucetUntaggedTest):
"""Test the experimental Faucet API."""
CONTROLLER_CLASS = mininet_test_topo.FaucetExperimentalAPI
results_file = None
def _set_static_vars(self):
super(FaucetExperimentalAPITest, self)._set_static_vars()
self._set_var_path('faucet', 'API_TEST_RESULT', 'result.txt')
self.results_file = self.env['faucet']['API_TEST_RESULT']
def test_untagged(self):
self.wait_until_matching_lines_from_file(r'.*pass.*', self.results_file)
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env['faucet']['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedLLDPTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
@staticmethod
def wireshark_payload_format(payload_str):
formatted_payload_str = ''
groupsize = 4
for payload_offset in range(len(payload_str) // groupsize):
char_count = payload_offset * 2
if char_count % 0x10 == 0:
formatted_payload_str += '0x%4.4x: ' % char_count
payload_fragment = payload_str[payload_offset * groupsize:][:groupsize]
formatted_payload_str += ' ' + payload_fragment
return formatted_payload_str
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
oui_prefix = ''.join(self.FAUCET_MAC.split(':')[:3])
faucet_lldp_dp_id_attr = '%2.2x' % 1
expected_lldp_dp_id = ''.join((
oui_prefix,
faucet_lldp_dp_id_attr,
binascii.hexlify(str(self.dpid).encode('UTF-8')).decode()))
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 6: faucet',
r'Port Description TLV \(4\), length 10: first_port',
self.wireshark_payload_format(expected_lldp_dp_id)):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedLLDPDefaultFallbackTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 8: faucet-1',
r'Port Description TLV \(4\), length [1-9]: b%u' % self.port_map['port_1']):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
OVS_TYPE = 'user'
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 100
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'state_file'
meter_stats:
dps: ['%s']
type: 'meter_stats'
interval: 5
db: 'meter_file'
meter_stats_prom:
dps: ['%s']
type: 'meter_stats'
db: 'prometheus'
interval: 5
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME, self.DP_NAME)
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_meter_stats_file):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
meter_file:
type: 'text'
file: %s
%s
""" % (faucet_config_file, self.get_gauge_watcher_config(),
monitor_stats_file, monitor_state_file, monitor_meter_stats_file,
self.GAUGE_CONFIG_DBS)
def _init_gauge_config(self):
gauge_config = self.get_gauge_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_meter_stats_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
self._write_yaml_conf(self.gauge_config_path, yaml.safe_load(gauge_config))
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
# TODO: userspace DP port status not reliable.
self.ping_all_when_learned()
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
super(FaucetUntaggedApplyMeterTest, self).test_untagged()
first_host, second_host = self.hosts_name_ordered()[:2]
error('metered ping flood: %s' % first_host.cmd(
'ping -c 1000 -f %s' % second_host.IP()))
# Require meter band bytes to match.
self.wait_until_matching_lines_from_file(
r'.+faucet-1-1-byte-band-count.+[1-9].+',
self.monitor_meter_stats_file)
meter_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'meter_id': 1
}
byte_band_count = self.scrape_prometheus_var(
'of_meter_byte_band_count', labels=meter_labels, controller='gauge')
self.assertTrue(byte_band_count)
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
NETNS = True
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a separate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.hosts_name_ordered()[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
self.add_macvlan(first_host, macvlan1_intf, ipa=macvlan1_ipv4, mode='vepa')
self.add_macvlan(first_host, macvlan2_intf, mode='vepa')
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
['ip link set %s netns %s' % (macvlan2_intf, netns)])
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
self.quiet_commands(first_host, setup_cmds)
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_ipv4)
self.one_ipv4_ping(first_host, second_host.IP())
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE, actions=['OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': macvlan2_mac},
table_id=self._ETH_DST_HAIRPIN_TABLE, actions=['OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.one_ipv6_ping(first_host, second_host_ip.ip))
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def verify_dp_port_healthy(self, dp_port, retries=5, min_mbps=MIN_MBPS):
for _ in range(retries):
port_desc = self.get_port_desc_from_dpid(self.dpid, dp_port)
port_name = port_desc['name']
port_state = port_desc['state']
port_config = port_desc['config']
port_speed_mbps = (port_desc['curr_speed'] * 1e3) / 1e6
error('DP %u is %s, at %u mbps\n' % (dp_port, port_name, port_speed_mbps))
if port_speed_mbps < min_mbps:
error('port speed %u below minimum %u mbps\n' % (
port_speed_mbps, min_mbps))
elif port_config != 0:
error('port config %u must be 0 (all clear)' % port_config)
elif port_state not in (0, 4):
error('state %u must be 0 (all flags clear or live)\n' % (
port_state))
else:
return
time.sleep(1)
self.fail('DP port %u not healthy (%s)' % (dp_port, port_desc))
def test_portmap(self):
prom_desc = self.scrape_prometheus(
controller='faucet', var='of_dp_desc_stats')
self.assertIsNotNone(prom_desc, msg='Cannot scrape of_dp_desc_stats')
error('DP: %s\n' % prom_desc[0])
error('port_map: %s\n' % self.port_map)
for i, host in enumerate(self.hosts_name_ordered(), start=1):
in_port = 'port_%u' % i
dp_port = self.port_map[in_port]
if dp_port in self.switch_map:
error('verifying cabling for %s: host %s -> dp %u\n' % (
in_port, self.switch_map[dp_port], dp_port))
else:
error('verifying host %s -> dp %s\n' % (
in_port, dp_port))
self.verify_dp_port_healthy(dp_port)
self.require_host_learned(host, in_port=dp_port)
learned = self.prom_macs_learned()
self.assertEqual(
len(self.hosts_name_ordered()), len(learned),
msg='test requires exactly %u hosts learned (got %s)' % (
len(self.hosts_name_ordered()), learned))
def test_listening(self):
msg_template = (
'Processes listening on test, or all interfaces may interfere with tests. '
'Please deconfigure them (e.g. configure interface as "unmanaged"):\n\n%s')
controller = self._get_controller()
ss_out = controller.cmd('ss -lnep').splitlines()
listening_all_re = re.compile(r'^.+\s+(\*:\d+|:::\d+)\s+(:+\*|\*:\*).+$')
listening_all = [line for line in ss_out if listening_all_re.match(line)]
for test_intf in list(self.switch_map.values()):
int_re = re.compile(r'^.+\b%s\b.+$' % test_intf)
listening_int = [line for line in ss_out if int_re.match(line)]
self.assertFalse(
len(listening_int),
msg=(msg_template % '\n'.join(listening_int)))
if listening_all:
print('Warning: %s' % (msg_template % '\n'.join(listening_all)))
def test_silence(self):
# Make all test hosts silent and ensure we hear no other packets.
for host in self.hosts_name_ordered():
self.host_drop_all_ips(host)
host.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % host.defaultIntf())
for host in self.hosts_name_ordered():
tcpdump_filter = ''
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [], timeout=10, vflags='-vv', packets=1)
self.tcpdump_rx_packets(tcpdump_txt, 0)
self.assertTrue(
self.tcpdump_rx_packets(tcpdump_txt, 0),
msg='got unexpected packet from test switch: %s' % tcpdump_txt)
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'prometheus'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'prometheus'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
db: 'prometheus'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def _start_gauge_check(self):
if not self.gauge_controller.listen_port(self.config_ports['gauge_prom_port']):
return 'gauge not listening on prometheus port'
return None
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertIsNotNone(self.scrape_prometheus_var(
'faucet_pbr_version', any_labels=True, controller='gauge', retries=3))
conf = self._get_faucet_conf()
cookie = conf['dps'][self.DP_NAME]['cookie']
if not self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS):
self.fail(msg='Gauge Prometheus port counters not increasing')
for _ in range(self.DB_TIMEOUT * 3):
updated_counters = True
for host in self.hosts_name_ordered():
host_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'cookie': cookie,
'eth_dst': host.MAC(),
'inst_count': str(1),
'table_id': str(self._ETH_DST_TABLE),
'vlan': str(100),
'vlan_vid': str(4196)
}
packet_count = self.scrape_prometheus_var(
'flow_packet_count_eth_dst', labels=host_labels, controller='gauge')
byte_count = self.scrape_prometheus_var(
'flow_byte_count_eth_dst', labels=host_labels, controller='gauge')
if packet_count is None or packet_count == 0:
updated_counters = False
if byte_count is None or byte_count == 0:
updated_counters = False
if updated_counters:
return
time.sleep(1)
self.fail(msg='Gauge Prometheus flow counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {'gauge_influx_port': None}
influx_log = None
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['%s']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 2
db: 'influx'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def setup_influx(self):
self.influx_log = os.path.join(self.tmpdir, 'influx.log')
if self.server:
self.server.influx_log = self.influx_log
self.server.timeout = self.DB_TIMEOUT
def setUp(self): # pylint: disable=invalid-name
self.handler = InfluxPostHandler
super(FaucetUntaggedInfluxTest, self).setUp()
self.setup_influx()
def tearDown(self, ignore_oferrors=False): # pylint: disable=invalid-name
if self.server:
self.server.shutdown()
self.server.socket.close()
super(FaucetUntaggedInfluxTest, self).tearDown(ignore_oferrors=ignore_oferrors)
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 3 * 2
gauge_log_name = self.env['gauge']['GAUGE_LOG']
self.wait_until_matching_lines_from_file(
r'.+error shipping.+', gauge_log_name, timeout=timeout)
def _verify_influx_log(self, retries=3):
self.assertTrue(os.path.exists(self.influx_log))
expected_vars = {
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'bytes_in', 'flow_byte_count', 'port_state_reason',
'packets_in', 'packets_out'}
observed_vars = set()
for _ in range(retries):
with open(self.influx_log) as influx_log:
influx_log_lines = influx_log.readlines()
for point_line in influx_log_lines:
point_fields = point_line.strip().split()
self.assertEqual(3, len(point_fields), msg=point_fields)
ts_name, value_field, _ = point_fields
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values, msg=point_line)
if 'vlan_vid' in label_values:
self.assertEqual(
int(label_values['vlan']), int(value) ^ 0x1000)
if expected_vars == observed_vars:
break
time.sleep(1)
self.assertEqual(expected_vars, observed_vars)
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
def _wait_influx_log(self):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(self.influx_log):
return
time.sleep(1)
def _start_gauge_check(self):
influx_port = self.config_ports['gauge_influx_port']
try:
self.server = QuietHTTPServer(
(mininet_test_util.LOCALHOST, influx_port),
self.handler) # pytype: disable=attribute-error
self.server.timeout = self.DB_TIMEOUT
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
return None
except socket.error as err:
return 'cannot start Influx test server: %s' % err
def test_untagged(self):
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedMultiDBWatcherTest(
FaucetUntaggedInfluxTest, FaucetUntaggedPrometheusGaugeTest):
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {
'gauge_prom_port': None,
'gauge_influx_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
dbs: ['prometheus', 'influx']
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
dbs: ['prometheus', 'influx']
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
dbs: ['prometheus', 'influx']
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
@staticmethod
def test_tagged():
return
def test_untagged(self):
self.wait_dp_status(1, controller='gauge')
self.assertTrue(self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS))
self.ping_all_when_learned()
self.hup_gauge()
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def _start_gauge_check(self):
return None
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
"""
def _start_gauge_check(self):
return None
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetSingleUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def setUp(self): # pylint: disable=invalid-name
self.handler = SlowInfluxPostHandler
super().setUp()
self.setup_influx()
def test_untagged(self):
self.ping_all_when_learned()
self._wait_influx_log()
self.assertTrue(os.path.exists(self.influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingTest(FaucetNailedForwardingTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_lldp_blocked()
# Verify 802.1x flood block triggered.
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
table_id=self._FLOOD_TABLE)
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_cdp_blocked()
class FaucetTaggedAndUntaggedSameVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedSameVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=1, n_untagged=3, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedAndUntaggedSameVlanEgressTest(FaucetTaggedAndUntaggedSameVlanTest):
REQUIRES_METADATA = True
CONFIG = """
egress_pipeline: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetTaggedAndUntaggedSameVlanGroupTest(FaucetTaggedAndUntaggedSameVlanTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.pingAll()
learned_hosts = [
host for host in self.hosts_name_ordered() if self.host_learned(host)]
self.assertEqual(2, len(learned_hosts))
self.assertEqual(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
max_hosts: 3
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.ping_all_when_learned()
for i in range(10, 10+(self.MAX_HOSTS*2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
ping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, mac_intf, first_host.IP()),
2)
second_host.cmd(ping_cmd)
flows = self.get_matching_flows_on_dpid(
self.dpid,
{'dl_vlan': '100', 'in_port': int(self.port_map['port_2'])},
table_id=self._ETH_SRC_TABLE)
self.assertEqual(self.MAX_HOSTS, len(flows))
port_labels = self.port_labels(self.port_map['port_2'])
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', port_labels), 0)
learned_macs = [
mac for _, mac in self.scrape_prometheus_var(
'learned_macs', dict(port_labels, vlan=100),
multiple=True) if mac]
self.assertEqual(self.MAX_HOSTS, len(learned_macs))
class FaucetSingleHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test that hosts learned and reported in Prometheus, time out."""
TIMEOUT = 15
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 25
arp_neighbor_timeout: 12
nd_neighbor_timeout: 12
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
""" + CONFIG_BOILER_UNTAGGED
def hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
macs_learned = []
for mac, port in hosts.items():
if self.prom_mac_learned(mac, port=port):
self.mac_learned(mac, in_port=port)
macs_learned.append(mac)
return macs_learned
def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts):
mac_ipv4s = [mac_ipv4 for mac_ipv4, _ in mac_ips]
fping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c%u %s' % (
self.FPING_ARGS_SHORT, int(self.TIMEOUT / 3), ' '.join(mac_ipv4s)),
self.TIMEOUT / 2)
for _ in range(3):
fping_out = first_host.cmd(fping_cmd)
self.assertTrue(fping_out, msg='fping did not complete: %s' % fping_cmd)
macs_learned = self.hosts_learned(hosts)
if len(macs_learned) == len(hosts):
return
time.sleep(1)
first_host_diag = first_host.cmd('ifconfig -a ; arp -an')
second_host_diag = second_host.cmd('ifconfig -a ; arp -an')
self.fail('%s cannot be learned (%s != %s)\nfirst host %s\nsecond host %s\n' % (
mac_ips, macs_learned, fping_out, first_host_diag, second_host_diag))
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
all_learned_mac_ports = {}
# learn batches of hosts, then down them
for base in (10, 20, 30):
def add_macvlans(base, count):
mac_intfs = []
mac_ips = []
learned_mac_ports = {}
for i in range(base, base + count):
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
macvlan_mac = self.get_mac_of_intf(mac_intf, second_host)
learned_mac_ports[macvlan_mac] = self.port_map['port_2']
mac_ips.append((mac_ipv4, macvlan_mac))
return (mac_intfs, mac_ips, learned_mac_ports)
def down_macvlans(macvlans):
for macvlan in macvlans:
second_host.cmd('ip link set dev %s down' % macvlan)
def learn_then_down_hosts(base, count):
mac_intfs, mac_ips, learned_mac_ports = add_macvlans(base, count)
self.verify_hosts_learned(first_host, second_host, mac_ips, learned_mac_ports)
down_macvlans(mac_intfs)
return learned_mac_ports
learned_mac_ports = learn_then_down_hosts(base, 5)
all_learned_mac_ports.update(learned_mac_ports)
# make sure at least one host still learned
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.assertTrue(learned_macs)
before_expiry_learned_macs = learned_macs
# make sure they all eventually expire
for _ in range(self.TIMEOUT * 3):
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.verify_learn_counters(
100, list(range(1, len(self.hosts_name_ordered()) + 1)))
if not learned_macs:
break
time.sleep(1)
self.assertFalse(learned_macs, msg='MACs did not expire: %s' % learned_macs)
self.assertTrue(before_expiry_learned_macs)
for mac in before_expiry_learned_macs:
self.wait_until_no_matching_flow({'eth_dst': mac}, table_id=self._ETH_DST_TABLE)
class FaucetSingleHostsNoIdleTimeoutPrometheusTest(FaucetSingleHostsTimeoutPrometheusTest):
"""Test broken reset idle timer on flow refresh workaround."""
CONFIG = """
timeout: 15
arp_neighbor_timeout: 4
nd_neighbor_timeout: 4
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
idle_dst: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetSingleL3LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 512
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.254.254'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
faucet_vips: ["10.0.254.254/16"]
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
ipv4_fib: %u
""" % (_max_hosts() + 64, _max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetSingleL2LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 1024
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.0.1'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
""" % (_max_hosts() + 64, _max_hosts() + 64) +
"""
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
for _ in range(3):
configure_count = self.get_configure_count()
if configure_count == expected_count:
return
time.sleep(1)
self.fail('configure count %u != expected %u' % (
configure_count, expected_count))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
reload_type_vars = (
'faucet_config_reload_cold',
'faucet_config_reload_warm')
reload_vals = {}
for var in reload_type_vars:
reload_vals[var] = self.scrape_prometheus_var(
var, dpid=True, default=None)
for i in range(init_config_count, init_config_count+3):
self._configure_count_with_retry(i)
with open(self.faucet_config_path, 'a') as config_file:
config_file.write('\n')
self.verify_faucet_reconf(change_expected=False)
self._configure_count_with_retry(i+1)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_disconnections_total', dpid=True, default=None),
0)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_connections_total', dpid=True, default=None),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
for var in reload_type_vars:
self.assertEqual(
reload_vals[var],
self.scrape_prometheus_var(var, dpid=True, default=None))
class FaucetIPv4TupleTest(FaucetTest):
MAX_RULES = 1024
ETH_TYPE = IPV4_ETH
NET_BASE = ipaddress.IPv4Network('10.0.0.0/16')
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
table_sizes:
port_acl: 1100
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
"""
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 2048
ip_proto: 6
ipv4_dst: 127.0.0.1
ipv4_src: 127.0.0.1
tcp_dst: 65535
tcp_src: 65535
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetIPv4TupleTest, self).setUp()
self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt')
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
open(self.acl_config_file, 'w').write(self.START_ACL_CONFIG)
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def _push_tuples(self, eth_type, host_ips):
max_rules = len(host_ips)
rules = 1
while rules <= max_rules:
rules_yaml = []
for rule in range(rules):
host_ip = host_ips[rule]
port = (rule + 1) % 2**16
ip_match = str(host_ip)
rule_yaml = {
'eth_type': eth_type,
'ip_proto': 6,
'tcp_src': port,
'tcp_dst': port,
'ipv%u_src' % host_ip.version: ip_match,
'ipv%u_dst' % host_ip.version: ip_match,
'actions': {'allow': 1},
}
rules_yaml.append({'rule': rule_yaml})
yaml_acl_conf = {'acls': {1: {'exact_match': True, 'rules': rules_yaml}}}
tuple_txt = '%u IPv%u tuples\n' % (len(rules_yaml), host_ip.version)
error('pushing %s' % tuple_txt)
self.reload_conf(
yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=False)
error('pushed %s' % tuple_txt)
self.wait_until_matching_flow(
{'tp_src': port, 'ip_proto': 6, 'dl_type': eth_type}, table_id=0)
rules *= 2
def test_tuples(self):
host_ips = [host_ip for host_ip in itertools.islice(
self.NET_BASE.hosts(), self.MAX_RULES)]
self._push_tuples(self.ETH_TYPE, host_ips)
class FaucetIPv6TupleTest(FaucetIPv4TupleTest):
MAX_RULES = 1024
ETH_TYPE = IPV6_ETH
NET_BASE = ipaddress.IPv6Network('fc00::00/64')
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 34525
ip_proto: 6
ipv6_dst: ::1
ipv6_src: ::1
tcp_dst: 65535
tcp_src: 65535
"""
class FaucetConfigReloadTestBase(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
tagged_vlans: [200]
"""
ACL = """
acls:
1:
- rule:
description: "rule 1"
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
2:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 1
3:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5003
actions:
allow: 0
4:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
deny:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 0
allow:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
"""
ACL_COOKIE = None
def setUp(self): # pylint: disable=invalid-name
super(FaucetConfigReloadTestBase, self).setUp()
self.ACL_COOKIE = random.randint(1, 2**16-1)
self.ACL = self.ACL.replace('COOKIE', str(self.ACL_COOKIE))
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
with open(self.acl_config_file, 'w') as config_file:
config_file.write(self.ACL)
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
class FaucetDelPortTest(FaucetConfigReloadTestBase):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 200
"""
def test_port_down_flow_gone(self):
last_host = self.hosts_name_ordered()[-1]
self.require_host_learned(last_host)
second_host_dst_match = {'eth_dst': last_host.MAC()}
self.wait_until_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
self.change_port_config(
self.port_map['port_4'], None, None,
restart=True, cold_start=True)
self.wait_until_no_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
class FaucetConfigReloadTest(FaucetConfigReloadTestBase):
def test_add_unknown_dp(self):
conf = self._get_faucet_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_tabs_are_bad(self):
self.ping_all_when_learned()
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
orig_conf = self._get_faucet_conf()
self.force_faucet_reload(
'\t'.join(('tabs', 'are', 'bad')))
self.assertEqual(1, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
self.ping_all_when_learned()
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
def test_port_change_vlan(self):
first_host, second_host = self.hosts_name_ordered()[:2]
third_host, fourth_host = self.hosts_name_ordered()[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=True)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
orig_conf = self._get_faucet_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE, cookie=self.ACL_COOKIE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, host_cache=100)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_perm_learn(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True,
restart=True, cold_start=False)
self.ping_all_when_learned(hard_timeout=0)
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.retry_net_ping(hosts=(first_host, second_host))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
restart=True, cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetDeleteConfigReloadTest(FaucetConfigReloadTestBase):
def test_delete_interface(self):
# With all ports changed, we should cold start.
conf = self._get_faucet_conf()
del conf['dps'][self.DP_NAME]['interfaces']
conf['dps'][self.DP_NAME]['interfaces'] = {
int(self.port_map['port_1']): {
'native_vlan': '100',
'tagged_vlans': ['200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetRouterConfigReloadTest(FaucetConfigReloadTestBase):
def test_router_config_reload(self):
conf = self._get_faucet_conf()
conf['routers'] = {
'router-1': {
'vlans': ['100', '200'],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetConfigReloadAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acls_in: [allow]
%(port_2)d:
native_vlan: 100
acl_in: allow
%(port_3)d:
native_vlan: 100
acl_in: deny
%(port_4)d:
native_vlan: 100
acl_in: deny
"""
def _verify_hosts_learned(self, hosts):
self.pingAll()
for host in hosts:
self.require_host_learned(host)
self.assertEqual(len(hosts), self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
def test_port_acls(self):
hup = not self.STAT_RELOAD
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self._verify_hosts_learned((first_host, second_host))
self.change_port_config(
self.port_map['port_3'], 'acl_in', 'allow',
restart=True, cold_start=False, hup=hup)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [3, 4, 'allow'],
restart=True, cold_start=False, hup=hup)
self.coldstart_conf(hup=hup)
self._verify_hosts_learned((first_host, second_host, third_host))
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_tp_dst_blocked(5003, first_host, second_host)
class FaucetConfigStatReloadAclTest(FaucetConfigReloadAclTest):
# Use the stat-based reload method.
STAT_RELOAD = '1'
class FaucetUntaggedBGPDualstackDefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24", "fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
for _ in range(2):
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
first_host, second_host = self.hosts_name_ordered()[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'))
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+10.0.4.0\/24.+cannot be us$')
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(mininet_test_util.LOCALHOST)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'10.0.0.0/24 next-hop 10.0.0.254',
'10.0.1.0/24 next-hop 10.0.0.1',
'10.0.2.0/24 next-hop 10.0.0.2',
'10.0.2.0/24 next-hop 10.0.0.2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
# test nexthop expired when port goes down
first_host = self.hosts_name_ordered()[0]
match, table = self.match_table(ipaddress.IPv4Network('10.0.0.1/32'))
ofmsg = None
for _ in range(5):
self.one_ipv4_controller_ping(first_host)
ofmsg = self.get_matching_flow(match, table_id=table)
if ofmsg:
break
time.sleep(1)
self.assertTrue(ofmsg, msg=match)
self.set_port_down(self.port_map['port_1'])
for _ in range(5):
if not self.get_matching_flow(match, table_id=table):
return
time.sleep(1)
self.fail('host route %s still present' % match)
class FaucetUntaggedRestBcastIPv4RouteTest(FaucetUntaggedIPv4RouteTest):
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.retry_net_ping(hosts=(first_host, second_host))
self.swap_host_macs(first_host, second_host)
self.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.retry_net_ping(hosts=(first_host, second_host))
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
permanent_learn: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.ping_all_when_learned(hard_timeout=0)
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
# 3rd host impersonates 1st but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
self.assertFalse(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_3']))
self.retry_net_ping(hosts=(first_host, second_host))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
class FaucetCoprocessorTest(FaucetUntaggedTest):
N_UNTAGGED = 3
N_TAGGED = 1
CONFIG = """
interfaces:
%(port_1)d:
coprocessor: {strategy: vlan_vid}
mirror: %(port_4)d
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Inject packet into pipeline using coprocessor.
coprocessor_host, first_host, second_host, _ = self.hosts_name_ordered()
self.one_ipv4_ping(first_host, second_host.IP())
tcpdump_filter = ' and '.join((
'ether dst %s' % first_host.MAC(),
'ether src %s' % coprocessor_host.MAC(),
'icmp'))
cmds = [
lambda: coprocessor_host.cmd(
'arp -s %s %s' % (first_host.IP(), first_host.MAC())),
lambda: coprocessor_host.cmd(
'fping %s -c3 %s' % (self.FPING_ARGS_SHORT, first_host.IP())),
]
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, cmds, timeout=5, vflags='-vv', packets=1)
self.assertFalse(self.tcpdump_rx_packets(tcpdump_txt, packets=0))
class FaucetUntaggedLoopTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
loop_protect: True
%(port_4)d:
native_vlan: 100
loop_protect: True
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedLoopTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def total_port_bans(self):
total_bans = 0
for i in range(self.LINKS_PER_HOST * self.N_UNTAGGED):
port_labels = self.port_labels(self.port_map['port_%u' % (i + 1)])
total_bans += self.scrape_prometheus_var(
'port_learn_bans', port_labels, dpid=True, default=0)
return total_bans
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()
# Normal learning works
self.one_ipv4_ping(first_host, second_host.IP())
start_bans = self.total_port_bans()
# Create a loop between interfaces on second host - a veth pair,
# with two bridges, each connecting one leg of the pair to a host
# interface.
self.quiet_commands(second_host, (
'ip link add name veth-loop1 type veth peer name veth-loop2',
'ip link set veth-loop1 up',
'ip link set veth-loop2 up',
# TODO: tune for loop mitigation performance.
'tc qdisc add dev veth-loop1 root tbf rate 1000kbps latency 10ms burst 1000',
'tc qdisc add dev veth-loop2 root tbf rate 1000kbps latency 10ms burst 1000',
# Connect one leg of veth pair to first host interface.
'brctl addbr br-loop1',
'brctl setfd br-loop1 0',
'ip link set br-loop1 up',
'brctl addif br-loop1 veth-loop1',
'brctl addif br-loop1 %s-eth0' % second_host.name,
# Connect other leg of veth pair.
'brctl addbr br-loop2',
'brctl setfd br-loop2 0',
'ip link set br-loop2 up',
'brctl addif br-loop2 veth-loop2',
'brctl addif br-loop2 %s-eth1' % second_host.name))
# Flood some traffic into the loop
for _ in range(3):
first_host.cmd('fping %s -c3 10.0.0.254' % self.FPING_ARGS_SHORT)
end_bans = self.total_port_bans()
if end_bans > start_bans:
return
time.sleep(1)
self.assertGreater(end_bans, start_bans)
# Break the loop, and learning should work again
self.quiet_commands(second_host, (
'ip link set veth-loop1 down',
'ip link set veth-loop2 down',))
self.one_ipv4_ping(first_host, second_host.IP())
class FaucetUntaggedIPv4LACPTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
lacp_timeout: 3
interfaces:
%(port_1)d:
native_vlan: 100
lacp: 1
%(port_2)d:
native_vlan: 100
lacp: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetUntaggedIPv4LACPTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
bond = 'bond0'
# Linux driver should have this state (0x3f/63)
#
# Actor State: 0x3f, LACP Activity, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...1 = LACP Activity: Active
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGSA]
# FAUCET should have this state (0x3e/62)
# Actor State: 0x3e, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...0 = LACP Activity: Passive
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGS*]
lag_ports = (1, 2)
synced_state_txt = r"""
Slave Interface: \S+-eth0
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
Slave Interface: \S+-eth1
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: 0
Partner Churned Count: 0
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 255
port number: %d
port state: 62
""".strip() % tuple([self.port_map['port_%u' % i] for i in lag_ports])
lacp_timeout = 5
def prom_lacp_up_ports():
lacp_up_ports = 0
for lacp_port in lag_ports:
port_labels = self.port_labels(self.port_map['port_%u' % lacp_port])
lacp_state = self.scrape_prometheus_var('port_lacp_state', port_labels, default=0)
lacp_up_ports += 1 if lacp_state == 3 else 0
return lacp_up_ports
def require_lag_up_ports(expected_up_ports):
for _ in range(lacp_timeout*10):
if prom_lacp_up_ports() == expected_up_ports:
break
time.sleep(1)
self.assertEqual(prom_lacp_up_ports(), expected_up_ports)
def require_linux_bond_up():
for _retries in range(lacp_timeout*2):
result = first_host.cmd('cat /proc/net/bonding/%s|sed "s/[ \t]*$//g"' % bond)
result = '\n'.join([line.rstrip() for line in result.splitlines()])
with open(os.path.join(self.tmpdir, 'bonding-state.txt'), 'w') as state_file:
state_file.write(result)
if re.search(synced_state_txt, result):
break
time.sleep(1)
self.assertTrue(
re.search(synced_state_txt, result),
msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % (
result, synced_state_txt))
# Start with ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
# Deconfigure bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member))
# Configure bond interface
self.quiet_commands(first_host, (
('ip link add %s address 0e:00:00:00:00:99 '
'type bond mode 802.3ad lacp_rate fast miimon 100') % bond,
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond))
# Add bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set dev %s master %s' % (bond_member, bond),))
for _flaps in range(2):
# All ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
# Pick a random port to come up.
up_port = random.choice(lag_ports)
self.set_port_up(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
# We have connectivity with only one port.
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
# We have connectivity with two ports.
require_lag_up_ports(2)
require_linux_bond_up()
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond)
# We have connectivity if that random port goes down.
self.set_port_down(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
class FaucetUntaggedIPv4LACPMismatchTest(FaucetUntaggedIPv4LACPTest):
"""Ensure remote LACP system ID mismatch is logged."""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
for i, bond_member in enumerate(bond_members):
bond = 'bond%u' % i
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member,
('ip link add %s address 0e:00:00:00:00:%2.2x '
'type bond mode 802.3ad lacp_rate fast miimon 100') % (bond, i*2+i),
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond,
'ip link set dev %s master %s' % (bond_member, bond)))
log_file = os.path.join(self.tmpdir, 'faucet.log')
self.wait_until_matching_lines_from_file(r'.+actor system mismatch.+', log_file)
class FaucetUntaggedIPv4ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_ping_fragment_controller(self):
first_host = self.hosts_name_ordered()[0]
first_host.cmd('ping -s 1476 -c 3 %s' % self.FAUCET_VIPV4.ip)
self.one_ipv4_controller_ping(first_host)
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
fuzz_template = 'python3 -c \"from scapy.all import * ; scapy.all.send(%s, count=%u)\"'
for fuzz_cmd in (
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=0))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=8))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('fuzz(%s(pdst=\'%s\'))' % ('ARP', self.FAUCET_VIPV4.ip), packets)):
fuzz_out = first_host.cmd(mininet_test_util.timeout_cmd(fuzz_cmd, 180))
self.assertTrue(
re.search('Sent %u packets' % packets, fuzz_out), msg='%s: %s' % (
fuzz_cmd, fuzz_out))
self.one_ipv4_controller_ping(first_host)
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
class FaucetUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
# Try 64 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
# Try 128 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4, size=128)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
""" + CONFIG_BOILER_UNTAGGED
def test_ndisc6(self):
first_host = self.hosts_name_ordered()[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEqual(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.hosts_name_ordered()[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEqual(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'ethertype IPv6 \(0x86dd\), length 142',
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetUntaggedIPv6ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
count = 0
abort = False
def note(*args):
error('%s:' % self._test_name(), *args + tuple('\n'))
# Some of these tests have been slowing down and timing out,
# So this code is intended to allow some debugging and analysis
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = ("from scapy.all import * ;"
"scapy.all.send(IPv6(dst='%s')/fuzz(%s()),count=%u)" %
(self.FAUCET_VIPV6.ip, fuzz_class, packets))
out, start, too_long = '', time.time(), 30 # seconds
popen = first_host.popen('python3', '-c', fuzz_cmd)
for _, line in pmonitor({first_host: popen}):
out += line
if time.time() - start > too_long:
note('stopping', fuzz_class, 'after >', too_long, 'seconds')
note('output was:', out)
popen.terminate()
abort = True
break
popen.wait()
if 'Sent %u packets' % packets in out:
count += packets
elapsed = time.time() - start
note('sent', packets, fuzz_class, 'packets in %.2fs' % elapsed)
fuzz_success = True
if abort:
break
note('successfully sent', count, 'packets')
self.assertTrue(fuzz_success)
note('pinging', first_host)
self.one_ipv6_controller_ping(first_host)
note('test_fuzz_controller() complete')
class FaucetUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
# Try 64 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
# Try 128 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6, size=128)
class FaucetTaggedAndUntaggedDiffVlanTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
native_vlan: 101
%(port_4)d:
native_vlan: 101
"""
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedAndUntaggedDiffVlanTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=2, n_untagged=2, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_separate_untagged_tagged(self):
tagged_host_pair = self.hosts_name_ordered()[:2]
untagged_host_pair = self.hosts_name_ordered()[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.retry_net_ping(hosts=tagged_host_pair)
self.retry_net_ping(hosts=untagged_host_pair)
# hosts cannot ping hosts in other VLANs
self.assertEqual(
100, self.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedEgressACLTest(FaucetUntaggedTest):
REQUIRES_METADATA = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acl_out: 1
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
self.ping_all_when_learned()
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
def test_port5002_notblocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=egress_acl_table)
class FaucetUntaggedDPACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
dp_acls: [1]
""" + CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedNoReconfACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
opstatus_reconf: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
matches = {
'in_port': int(self.port_map['port_1']),
'tcp_dst': 5001,
'eth_type': IPV4_ETH,
'ip_proto': 6}
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_down(self.port_map['port_1'])
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_up(self.port_map['port_1'])
self.ping_all_when_learned()
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
# Match packets > 1023
tcp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
class FaucetUntaggedOutputOnlyTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
output_only: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1'])},
table_id=self._VLAN_TABLE,
actions=[])
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertEqual(100.0, self.ping((first_host, second_host)))
self.assertEqual(0, self.ping((third_host, second_host)))
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetMultiOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
vlan_vid: 123
set_fields:
- eth_dst: "06:06:06:06:06:06"
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiConfVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetUntaggedMultiMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
output_only: True
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
ping_pairs = (
(first_host, second_host),
(second_host, first_host))
self.flap_all_switch_ports()
self.change_port_config(
self.port_map['port_3'], 'mirror',
[self.port_map['port_1'], self.port_map['port_2']],
restart=True, cold_start=False, hup=True)
self.verify_ping_mirrored_multi(
ping_pairs, mirror_host, both_mirrored=True)
class FaucetUntaggedMultiMirrorSepTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
mirror: %(port_1)d
%(port_4)d:
mirror: %(port_1)d
"""
def test_untagged(self):
self.flap_all_switch_ports()
# Make sure the two hosts both mirror from port 1
first_host, second_host = self.hosts_name_ordered()[0:2]
mirror_host = self.hosts_name_ordered()[2]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
mirror_host = self.hosts_name_ordered()[3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = CONFIG_TAGGED_BOILER
def setUp(self): # pylint: disable=invalid-name
super(FaucetTaggedTest, self).setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=4, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_tagged(self):
self.ping_all_when_learned()
class FaucetTaggedMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetTaggedVLANPCPTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedGlobalIPv4RouteTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
NETNS = True
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
@staticmethod
def netbase(vid, host):
return ipaddress.ip_interface('192.168.%u.%u' % (vid, host))
def fping(self, macvlan_int, ipg):
return 'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def fib_table(self):
return self._IPV4_FIB_TABLE
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv4_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["192.168.%u.254/24"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v4: True
max_wildcard_table_size: 1024
table_sizes:
vlan: %u
vip: %u
flood: %u
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(),
len(STR_VIDS) * 3, # VLAN
len(STR_VIDS) * 2, # VIP
len(STR_VIDS) * 12, # Flood
'%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
def configure_mesh(self, first_host, second_host):
hosts = (first_host, second_host)
required_ipds = set()
ipd_to_macvlan = {}
for i, host in enumerate(hosts, start=1):
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
macvlan_int = 'macvlan%u' % vid
ipa = self.netbase(vid, i)
ipg = self.netbase(vid, 254)
ipd = self.netbase(vid, 253)
required_ipds.add(str(ipd.ip))
ipd_to_macvlan[str(ipd.ip)] = (macvlan_int, host)
setup_commands.extend([
self.run_ip('link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid)),
self.run_ip('link set dev %s up' % vlan_int),
self.run_ip('link add %s link %s type macvlan mode vepa' % (macvlan_int, vlan_int)),
self.run_ip('link set dev %s up' % macvlan_int),
self.run_ip('address add %s/%u dev %s' % (ipa.ip, self.NETPREFIX, macvlan_int)),
self.run_ip('route add default via %s table %u' % (ipg.ip, vid)),
self.run_ip('rule add from %s table %u priority 100' % (ipa, vid)),
# stimulate learning attempts for down host.
self.run_ip('neigh add %s lladdr %s dev %s' % (ipd.ip, self.FAUCET_MAC, macvlan_int))])
# next host routes via FAUCET for other host in same connected subnet
# to cause routing to be exercised.
for j, _ in enumerate(hosts, start=1):
if j != i:
other_ip = self.netbase(vid, j)
setup_commands.append(
self.run_ip('route add %s via %s table %u' % (other_ip, ipg.ip, vid)))
for ipa in (ipg.ip, ipd.ip):
setup_commands.append(self.fping(macvlan_int, ipa))
self.quiet_commands(host, setup_commands)
return required_ipds, ipd_to_macvlan
def verify_drop_rules(self, required_ipds, ipd_to_macvlan):
for _ in range(10):
if not required_ipds:
break
drop_rules = self.get_matching_flows_on_dpid(
self.dpid, {'dl_type': self.ETH_TYPE, 'dl_vlan': str(self.GLOBAL_VID)},
table_id=self.fib_table(), actions=[])
if drop_rules:
for drop_rule in drop_rules:
match = drop_rule['match']
del match['dl_type']
del match['dl_vlan']
self.assertEqual(1, len(match))
ipd = list(match.values())[0].split('/')[0]
if ipd in required_ipds:
required_ipds.remove(ipd)
for ipd in required_ipds:
macvlan_int, host = ipd_to_macvlan[ipd]
host.cmd(self.fping(macvlan_int, ipd))
time.sleep(1)
self.assertFalse(required_ipds, msg='no drop rules for %s' % required_ipds)
def verify_routing_performance(self, first_host, second_host):
for first_host_ip, second_host_ip in (
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[0], 2)),
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[-1], 2)),
(self.netbase(self.NEW_VIDS[-1], 1), self.netbase(self.NEW_VIDS[0], 2))):
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.scapy_bcast(first_host))
def verify_l3_mesh(self, first_host, second_host):
for vid in self.NEW_VIDS:
macvlan_int = 'macvlan%u' % vid
first_host_ip = self.netbase(vid, 1)
second_host_ip = self.netbase(vid, 2)
self.macvlan_ping(first_host, second_host_ip.ip, macvlan_int)
self.macvlan_ping(second_host, first_host_ip.ip, macvlan_int)
def verify_l3_hairpin(self, first_host):
macvlan1_int = 'macvlan%u' % self.NEW_VIDS[0]
macvlan2_int = 'macvlan%u' % self.NEW_VIDS[1]
macvlan2_ip = self.netbase(self.NEW_VIDS[1], 1)
macvlan1_gw = self.netbase(self.NEW_VIDS[0], 254)
macvlan2_gw = self.netbase(self.NEW_VIDS[1], 254)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
[self.run_ip('link set %s netns %s' % (macvlan2_int, netns))])
for exec_cmd in (
(self.run_ip('address add %s/%u dev %s' % (macvlan2_ip.ip, self.NETPREFIX, macvlan2_int)),
self.run_ip('link set %s up' % macvlan2_int),
self.run_ip('route add default via %s' % macvlan2_gw.ip))):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
setup_cmds.append(
self.run_ip('route add %s via %s' % (macvlan2_ip, macvlan1_gw.ip)))
self.quiet_commands(first_host, setup_cmds)
self.macvlan_ping(first_host, macvlan2_ip.ip, macvlan1_int)
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
required_ipds, ipd_to_macvlan = self.configure_mesh(first_host, second_host)
self.verify_drop_rules(required_ipds, ipd_to_macvlan)
self.verify_routing_performance(first_host, second_host)
self.verify_l3_mesh(first_host, second_host)
self.verify_l3_hairpin(first_host)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedGlobalIPv6RouteTest(FaucetTaggedGlobalIPv4RouteTest):
IPV = 6
NETPREFIX = 112
ETH_TYPE = IPV6_ETH
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 103))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
def netbase(self, vid, host):
return ipaddress.ip_interface('fc00::%u:%u' % (vid, host))
def fib_table(self):
return self._IPV6_FIB_TABLE
def fping(self, macvlan_int, ipg):
return 'fping6 %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv6_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["fc00::%u:254/112"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v6: True
max_wildcard_table_size: 512
table_sizes:
vlan: 256
vip: 128
flood: 384
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
class FaucetTaggedScaleTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
VIDS = _vids()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
CONFIG_GLOBAL = """
vlans:
""" + '\n'.join(['\n'.join(
(' %u:',
' description: "tagged"')) % i for i in VIDS])
CONFIG = """
interfaces:
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
""" % ('%(port_1)d', ','.join(STR_VIDS),
'%(port_2)d', ','.join(STR_VIDS),
'%(port_3)d', ','.join(STR_VIDS),
'%(port_4)d', ','.join(STR_VIDS))
def test_tagged(self):
self.ping_all_when_learned()
for host in self.hosts_name_ordered():
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
setup_commands.extend([
'ip link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid),
'ip link set dev %s up' % vlan_int])
self.quiet_commands(host, setup_commands)
for host in self.hosts_name_ordered():
rdisc6_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
rdisc6_commands.append(
'rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int)
self.quiet_commands(host, rdisc6_commands)
for vlan in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
for _ in range(3):
for host in self.hosts_name_ordered():
self.quiet_commands(
host,
['rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int])
vlan_hosts_learned = self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': str(vlan)})
if vlan_hosts_learned == len(self.hosts_name_ordered()):
break
time.sleep(1)
self.assertGreater(
vlan_hosts_learned, 1,
msg='not all VLAN %u hosts learned (%u)' % (vlan, vlan_hosts_learned))
class FaucetTaggedBroadcastTest(FaucetTaggedTest):
def test_tagged(self):
super(FaucetTaggedBroadcastTest, self).test_tagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedExtLoopProtectTest(FaucetTaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_2)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
ext_port1, ext_port2, int_port1, int_port2 = self.hosts_name_ordered()
self.verify_broadcast((ext_port1, ext_port2), False)
self.verify_broadcast((int_port1, int_port2), True)
self.verify_unicast((int_port1, int_port2), True)
class FaucetTaggedWithUntaggedTest(FaucetTaggedTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 200
tagged_vlans: [100]
%(port_2)d:
native_vlan: 200
tagged_vlans: [100]
%(port_3)d:
native_vlan: 200
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
tagged_vlans: [100]
"""
def test_tagged(self):
self.ping_all_when_learned()
native_ips = [
ipaddress.ip_interface('10.99.99.%u/24' % (i + 1)) for i in range(len(self.hosts_name_ordered()))]
for native_ip, host in zip(native_ips, self.hosts_name_ordered()):
self.host_ipv4_alias(host, native_ip, intf=host.intf_root_name)
for own_native_ip, host in zip(native_ips, self.hosts_name_ordered()):
for native_ip in native_ips:
if native_ip != own_native_ip:
self.one_ipv4_ping(host, native_ip.ip, intf=host.intf_root_name)
class FaucetTaggedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
200:
description: "not used"
300:
description: "not used"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
"""
def test_tagged(self):
self._enable_event_log()
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface('10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface('10.0.2.1/24')
for _coldstart in range(2):
for _swaps in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
# change of a VLAN/ports not involved in routing, should be a warm start.
for vid in (300, 200):
self.change_port_config(
self.port_map['port_4'], 'native_vlan', vid,
restart=True, cold_start=False)
self.wait_until_matching_lines_from_file(
r'.+L3_LEARN.+10.0.0.[12].+', self.event_log)
class FaucetTaggedTargetedResolutionIPv4RouteTest(FaucetTaggedIPv4RouteTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
targeted_gw_resolution: True
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface('fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface('fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface('fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4GlobalInterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
200:
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC2 + """
routers:
global:
vlans: [100, 200]
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
global_vlan: 300
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 200
"""
exabgp_peer_conf = """
static {
route 10.99.99.0/24 next-hop 10.200.0.1 local-preference 100;
route 10.0.5.0/24 next-hop 127.0.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'), vlan_vid=300)
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
for vlanb_vid in (300, 200):
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.change_vlan_config(
'vlanb', 'vid', vlanb_vid, restart=True, cold_start=True)
class FaucetUntaggedPortSwapIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
vlana:
vid: 100
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [vlana, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: vlana
%(port_2)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
# Delete port 2
self.change_port_config(
self.port_map['port_2'], None, None,
restart=False, cold_start=False)
# Add port 3
self.add_port_config(
self.port_map['port_3'], {'native_vlan': 'vlanb'},
restart=True, cold_start=True)
third_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(third_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(third_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(third_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
class FaucetUntaggedExpireIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
max_host_fib_retry_count: 2
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
second_host.cmd('ifconfig %s down' % second_host.defaultIntf().name)
log_file = os.path.join(self.tmpdir, 'faucet.log')
expired_re = r'.+expiring dead route %s.+' % second_host_ip.ip
self.wait_until_matching_lines_from_file(expired_re, log_file)
second_host.cmd('ifconfig %s up' % second_host.defaultIntf().name)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/112", "fe80::1:254/112"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/112", "fe80::2:254/112"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('10.0.0.1/24')
second_host_net = ipaddress.ip_interface('172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112", "fc01::1:254/112"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface('fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route fc00::10:0/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:0/112 next-hop fc00::1:254;
route fc00::50:0/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+fc00::40:0\/112.+cannot be us$')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address('fc00::10:1')
second_host_ip = ipaddress.ip_interface('fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address('fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def pre_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.hosts_name_ordered()[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network('fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'fc00::1:0/112 next-hop fc00::1:254',
'fc00::10:0/112 next-hop fc00::1:1',
'fc00::20:0/112 next-hop fc00::1:2',
'fc00::30:0/112 next-hop fc00::1:2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
class FaucetUntaggedRestBcastIPv6RouteTest(FaucetUntaggedIPv6RouteTest):
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface('fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface('fc00::20:1/112')
for _coldstart in range(2):
for _swaps in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
class FaucetStringOfDPTest(FaucetTest):
MAX_HOSTS = 4
NUM_HOSTS = 4
LINKS_PER_HOST = 1
VID = 100
CONFIG = None
GROUP_TABLE = False
dpids = None
topo = None
def non_host_links(self, dpid):
return self.topo.dpid_peer_links(dpid)
@staticmethod
def get_config_header(_config_global, _debug_log, _dpid, _hardware):
"""Don't generate standard config file header."""
return ''
@staticmethod
def acls():
return {}
@staticmethod
def acl_in_dp():
return {}
def build_net(self, stack=False, n_dps=1,
n_tagged=0, tagged_vid=100,
untagged_hosts=None,
include=None, include_optional=None,
switch_to_switch_links=1, hw_dpid=None,
stack_ring=False, lacp=False, use_external=False,
router=None, dp_options=None):
"""Set up Mininet and Faucet for the given topology."""
if include is None:
include = []
if include_optional is None:
include_optional = []
self.dpids = [str(self.rand_dpid()) for _ in range(n_dps)]
self.dpids[0] = self.dpid
self.topo = mininet_test_topo.FaucetStringOfDPSwitchTopo(
self.OVS_TYPE,
self.ports_sock,
dpids=self.dpids,
n_tagged=n_tagged,
tagged_vid=tagged_vid,
untagged_hosts=untagged_hosts,
links_per_host=self.LINKS_PER_HOST,
switch_to_switch_links=switch_to_switch_links,
test_name=self._test_name(),
hw_dpid=hw_dpid, switch_map=self.switch_map,
stack_ring=stack_ring,
port_order=self.port_order
)
self.port_maps = {dpid: self.create_port_map(dpid) for dpid in self.dpids}
self.port_map = self.port_maps[self.dpid]
self.CONFIG = self.get_config(
self.dpids,
hw_dpid,
stack,
self.hardware,
self.debug_log_path,
n_tagged,
tagged_vid,
untagged_hosts,
include,
include_optional,
self.acls(),
self.acl_in_dp(),
lacp,
use_external,
router,
dp_options
)
def get_config(self, dpids=None, hw_dpid=None, stack=False, hardware=None, ofchannel_log=None,
n_tagged=0, tagged_vid=0, untagged_hosts=None,
include=None, include_optional=None, acls=None, acl_in_dp=None,
lacp=False, use_external=False, router=None, dp_options=None):
"""Build a complete Faucet configuration for each datapath, using the given topology."""
if dpids is None:
dpids = []
if include is None:
include = []
if include_optional is None:
include_optional = []
if acls is None:
acls = {}
if acl_in_dp is None:
acl_in_dp = {}
dpid_names = {}
dpname_to_dpkey = {}
def dp_name(i):
return 'faucet-%i' % (i + 1)
def add_vlans(n_tagged, tagged_vid, untagged_hosts, router):
vlans_config = {}
if untagged_hosts:
for vid in untagged_hosts.keys():
vlans_config[vid] = {
'description': 'untagged',
}
if ((n_tagged and not untagged_hosts) or
(n_tagged and untagged_hosts and tagged_vid not in untagged_hosts)):
vlans_config[tagged_vid] = {
'description': 'tagged',
}
if router:
for vid in router.keys():
if vid in vlans_config:
if 'faucet_mac' in router[vid]:
vlans_config[vid]['faucet_mac'] = router[vid]['faucet_mac']
if 'faucet_vips' in router[vid]:
vlans_config[vid]['faucet_vips'] = router[vid]['faucet_vips']
return vlans_config
def add_router(router):
router_config = {}
if router:
router_config['router-1'] = {
'vlans': list(router.keys()),
}
return router_config
def add_acl_to_port(name, port, interfaces_config):
if name in acl_in_dp and port in acl_in_dp[name]:
interfaces_config[port]['acl_in'] = acl_in_dp[name][port]
def add_dp_to_dp_ports(name, dpid, dp_config, interfaces_config, stack,
n_tagged, tagged_vid, untagged_hosts):
for link in self.topo.dpid_peer_links(dpid):
port, peer_dpid, peer_port = link.port, link.peer_dpid, link.peer_port
interfaces_config[port] = {}
if stack:
# make this a stacking link.
interfaces_config[port].update(
{
'stack': {
'dp': dpid_names[peer_dpid],
'port': peer_port}
})
else:
# not a stack - make this a trunk.
tagged_vlans = []
if n_tagged:
tagged_vlans.append(tagged_vid)
if untagged_hosts:
for vid in untagged_hosts.keys():
if vid not in tagged_vlans:
tagged_vlans.append(vid)
if tagged_vlans:
interfaces_config[port]['tagged_vlans'] = tagged_vlans
if lacp:
interfaces_config[port].update(
{'lacp': 1, 'lacp_active': True})
add_acl_to_port(name, port, interfaces_config)
# TODO: make per test configurable
dp_config['lacp_timeout'] = 10
# TODO: make the stacking root configurable
first_dp = dpid == self.dpid
if stack and first_dp:
dp_config['stack'] = {
'priority': 1
}
def add_dp(name, dpid, hw_dpid, i, stack,
n_tagged, tagged_vid, untagged_hosts,
use_external, dp_options):
dp_config = {
'dp_id': int(dpid),
'hardware': hardware if dpid == hw_dpid else 'Open vSwitch',
'ofchannel_log': ofchannel_log + str(i) if ofchannel_log else None,
'interfaces': {},
'group_table': self.GROUP_TABLE,
}
interfaces_config = {}
index = 1
for n_port in range(n_tagged):
port = self.port_maps[dpid]['port_%d' % index]
interfaces_config[port] = {
'tagged_vlans': [tagged_vid],
'loop_protect_external': (use_external and n_port != n_tagged - 1),
}
add_acl_to_port(name, port, interfaces_config)
index += 1
if untagged_hosts:
n_port = 0
for vid, num_hosts in untagged_hosts.items():
for _ in range(num_hosts):
port = self.port_maps[dpid]['port_%d' % index]
interfaces_config[port] = {
'native_vlan': vid,
'loop_protect_external': (use_external and n_port != num_hosts - 1),
}
add_acl_to_port(name, port, interfaces_config)
index += 1
n_port += 1
add_dp_to_dp_ports(name, dpid, dp_config, interfaces_config, stack,
n_tagged, tagged_vid, untagged_hosts)
for portno, config in list(interfaces_config.items()):
stack = config.get('stack', None)
if stack and 'stack' in interfaces_config[portno]:
peer_portno = stack['port']
interfaces_config[portno]['stack'].update({
'port': 'b%u' % peer_portno})
dp_config['interfaces'] = interfaces_config
if dp_options:
for key, value in dp_options.items():
dp_config[key] = value
return dp_config
# Create config
config = {'version': 2}
if include:
config['include'] = list(include)
if include_optional:
config['include-optional'] = list(include_optional)
config['vlans'] = add_vlans(n_tagged, tagged_vid, untagged_hosts, router)
if router:
config['routers'] = add_router(router)
config['acls'] = acls.copy()
config['dps'] = {}
for i, dpid in enumerate(dpids):
dpid_names[dpid] = name = dp_name(i)
dpname_to_dpkey[name] = dpid
self.set_dpid_names(dpid_names)
for i, dpid in enumerate(dpids):
name = dpid_names[dpid]
config['dps'][name] = add_dp(
name, dpid, hw_dpid, i, stack,
n_tagged, tagged_vid, untagged_hosts,
use_external, dp_options)
config_text = yaml.dump(config, default_flow_style=False)
return config_text
def verify_no_cable_errors(self):
i = 0
for dpid in self.dpids:
i += 1
labels = {'dp_id': '0x%x' % int(dpid), 'dp_name': 'faucet-%u' % i}
self.assertEqual(
0, self.scrape_prometheus_var(
var='stack_cabling_errors_total', labels=labels, default=None))
self.assertGreater(
self.scrape_prometheus_var(
var='stack_probes_received_total', labels=labels), 0)
def verify_stack_hosts(self, verify_bridge_local_rule=True, retries=3):
lldp_cap_files = []
for host in self.hosts_name_ordered():
lldp_cap_file = os.path.join(self.tmpdir, '%s-lldp.cap' % host)
lldp_cap_files.append(lldp_cap_file)
host.cmd(mininet_test_util.timeout_cmd(
'tcpdump -U -n -c 1 -i %s -w %s ether proto 0x88CC and not ether src %s &' % (
host.defaultIntf(), host.MAC(), lldp_cap_file), 60))
# should not flood LLDP from hosts
self.verify_lldp_blocked(self.hosts_name_ordered())
# hosts should see no LLDP probes
self.verify_empty_caps(lldp_cap_files)
if verify_bridge_local_rule:
# Verify 802.1x flood block triggered.
for dpid in self.dpids:
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
dpid=dpid, table_id=self._FLOOD_TABLE, ofa_match=False)
self.retry_net_ping(retries=retries)
def stack_port_status(self, dpid, dp_name, port_no):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
return self.scrape_prometheus_var(
'port_stack_state', labels=labels,
default=None, dpid=False)
def wait_for_stack_port_status(self, dpid, dp_name, port_no, status, timeout=25):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
if not self.wait_for_prometheus_var(
'port_stack_state', status, labels=labels,
default=None, dpid=False, timeout=timeout):
self.fail('did not get expected dpid %x port %u port_stack_state %u' % (
int(dpid), port_no, status))
def one_stack_port_down(self, dpid, dp_name, port):
self.set_port_down(port, dpid, wait=False)
self.wait_for_stack_port_status(dpid, dp_name, port, 4)
def one_stack_port_up(self, dpid, dp_name, port):
self.set_port_up(port, dpid, wait=False)
self.wait_for_stack_port_status(dpid, dp_name, port, 3)
def verify_stack_up(self, prop=1.0, timeout=25):
for _ in range(timeout):
links = 0
links_up = 0
for i, dpid in enumerate(self.dpids, start=1):
dp_name = 'faucet-%u' % i
for link in self.non_host_links(dpid):
status = self.stack_port_status(dpid, dp_name, link.port)
links += 1
if status == 3: # up
links_up += 1
prop_up = links_up / links
if prop_up >= prop:
return
time.sleep(1)
self.fail('not enough links up: %f / %f' % (links_up, links))
def verify_one_stack_down(self, stack_offset_port, coldstart=False):
self.retry_net_ping()
stack_port = self.non_host_links(self.dpid)[stack_offset_port].port
remote_stack_port = self.non_host_links(self.dpid)[stack_offset_port].peer_port
self.set_port_down(stack_port, wait=False)
# self.dpids[1] is the intermediate switch.
self.set_port_down(remote_stack_port, self.dpids[1], wait=False)
# test case where one link is down when coldstarted.
if coldstart:
self.coldstart_conf()
self.verify_stack_up(prop=0.75)
self.verify_stack_hosts(verify_bridge_local_rule=False)
# Broadcast works, and first switch doesn't see broadcast packet ins from stack.
packet_in_before_broadcast = self.scrape_prometheus_var('of_vlan_packet_ins')
self.verify_broadcast()
packet_in_after_broadcast = self.scrape_prometheus_var('of_vlan_packet_ins')
self.assertEqual(
packet_in_before_broadcast,
packet_in_after_broadcast)
# TODO: re-enable.
# self.verify_no_cable_errors()
def verify_no_arp_storm(self, ping_host, tcpdump_host):
num_arp_expected = self.topo.switch_to_switch_links * 2
tcpdump_filter = 'arp and ether src %s' % ping_host.MAC()
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: ping_host.cmd('arp -d %s' % tcpdump_host.IP()),
lambda: ping_host.cmd(' '.join((self.FPINGS_ARGS_ONE, tcpdump_host.IP())))],
packets=(num_arp_expected+1))
num_arp_received = len(re.findall(
'who-has %s tell %s' % (tcpdump_host.IP(), ping_host.IP()), tcpdump_txt))
self.assertTrue(num_arp_received)
self.assertLessEqual(num_arp_received, num_arp_expected)
def verify_stack_has_no_loop(self):
for ping_host, tcpdump_host in (
(self.hosts_name_ordered()[0], self.hosts_name_ordered()[-1]),
(self.hosts_name_ordered()[-1], self.hosts_name_ordered()[0])):
self.verify_no_arp_storm(ping_host, tcpdump_host)
def verify_all_stack_hosts(self):
for _ in range(2):
self.verify_stack_up()
self.verify_no_cable_errors()
self.verify_stack_hosts()
self.verify_traveling_dhcp_mac()
self.verify_unicast_not_looped()
self.verify_no_bcast_to_self()
self.verify_stack_has_no_loop()
self.flap_all_switch_ports()
def verify_tunnel_established(self, src_host, dst_host, other_host, packets=3):
"""Verify ICMP packets tunnelled from src to dst."""
icmp_match = {'eth_type': IPV4_ETH, 'ip_proto': 1}
self.wait_until_matching_flow(icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)
tcpdump_text = self.tcpdump_helper(
dst_host, 'icmp[icmptype] == 8', [
# need to set static ARP as only ICMP is tunnelled.
lambda: src_host.cmd('arp -s %s %s' % (other_host.IP(), other_host.MAC())),
lambda: src_host.cmd('ping -c%u -t1 %s' % (packets, other_host.IP()))
],
packets=1, timeout=(packets + 1),
)
self.wait_nonzero_packet_count_flow(
icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False)
self.assertTrue(re.search(
'%s: ICMP echo request' % other_host.IP(), tcpdump_text
), 'Tunnel was not established')
def verify_one_broadcast(self, from_host, to_hosts):
self.assertGreater(len(to_hosts), 1, 'Testing only one ext host is not useful')
received_broadcasts = []
for to_host in to_hosts:
if self.verify_broadcast(hosts=(from_host, to_host), broadcast_expected=None):
received_broadcasts.append(to_host)
received_names = {host.name: host for host in received_broadcasts}
self.assertEqual(len(received_broadcasts), 1,
'Received not exactly one broadcast from %s: %s' %
(from_host.name, received_names))
def map_int_ext_hosts(self):
conf = self._get_faucet_conf()
host_name_map = {host.name: host for host in self.hosts_name_ordered()}
int_hosts = set()
ext_hosts = set()
dp_hosts = {}
for dp_name, dp_conf in conf['dps'].items():
dpid = int(dp_conf['dp_id'])
dp_int_hosts = set()
dp_ext_hosts = set()
for port, p_conf in dp_conf['interfaces'].items():
if 'stack' not in p_conf:
host = host_name_map[self.net.topo.dpid_port_host[dpid][port]]
if p_conf.get('loop_protect_external', False):
dp_ext_hosts.add(host)
else:
dp_int_hosts.add(host)
dp_hosts[dp_name] = (dp_int_hosts, dp_ext_hosts)
int_hosts.update(dp_int_hosts)
ext_hosts.update(dp_ext_hosts)
return int_hosts, ext_hosts, dp_hosts
def verify_protected_connectivity(self):
self.verify_stack_up()
int_hosts, ext_hosts, dp_hosts = self.map_int_ext_hosts()
for int_host in int_hosts:
# All internal hosts can reach other internal hosts.
for other_int_host in int_hosts - {int_host}:
self.verify_broadcast(hosts=(int_host, other_int_host), broadcast_expected=True)
self.one_ipv4_ping(int_host, other_int_host.IP())
# All internal hosts can reach exactly one external host.
self.verify_one_broadcast(int_host, ext_hosts)
for ext_host in ext_hosts:
# All external hosts cannot flood to each other.
for other_ext_host in ext_hosts - {ext_host}:
self.verify_broadcast(hosts=(ext_host, other_ext_host), broadcast_expected=False)
# All external hosts can reach internal hosts.
for int_host in int_hosts:
self.verify_broadcast(hosts=(ext_host, int_host), broadcast_expected=True)
self.one_ipv4_ping(ext_host, int_host.IP())
def set_externals_state(self, dp_name, externals_up):
"""Set the port up/down state of all external ports on a switch"""
dp_conf = self._get_faucet_conf()['dps'][dp_name]
for port_num, port_conf in dp_conf['interfaces'].items():
if port_conf.get('loop_protect_external'):
if externals_up:
self.set_port_up(port_num, dp_conf.get('dp_id'))
else:
self.set_port_down(port_num, dp_conf.get('dp_id'))
def validate_with_externals_down(self, dp_name):
"""Check situation when all externals on a given dp are down"""
self.set_externals_state(dp_name, False)
self.verify_protected_connectivity()
self.set_externals_state(dp_name, True)
def validate_with_externals_down_fails(self, dp_name):
"""Faucet code is not currently correct, so expect to fail."""
# TODO: Fix faucet so the test inversion is no longer required.
asserted = False
try:
self.validate_with_externals_down(dp_name)
except AssertionError:
asserted = True
self.assertTrue(asserted, 'Did not fail as expected for %s' % dp_name)
class FaucetSingleUntaggedIPV4RoutingWithStackingTest(FaucetStringOfDPTest):
"""IPV4 intervlan routing with stacking test"""
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
SWITCH_TO_SWITCH_LINKS = 1
NUM_DPS = 4
V100 = 100
V200 = 200
V300 = 300
V100_NUM_HOSTS = 1
V200_NUM_HOSTS = 1
V300_NUM_HOSTS = 0
FAUCET_MAC2 = '0e:00:00:00:00:02'
@staticmethod
def get_dp_options():
return {
'drop_spoofed_faucet_mac': False,
'arp_neighbor_timeout': 2,
'max_resolve_backoff_time': 2,
'proactive_learn_v4': True
}
def setUp(self):
pass
def set_up(self):
super(FaucetSingleUntaggedIPV4RoutingWithStackingTest, self).setUp()
router_info = {
self.V100: {
'faucet_mac': self.FAUCET_MAC,
'faucet_vips': [self.get_faucet_vip(1)],
'targeted_gw_resolution': False,
},
self.V200: {
'faucet_mac': self.FAUCET_MAC2,
'faucet_vips': [self.get_faucet_vip(2)],
'targeted_gw_resolution': False,
}
}
untagged_hosts = {self.V100: self.V100_NUM_HOSTS,
self.V200: self.V200_NUM_HOSTS,
self.V300: self.V300_NUM_HOSTS}
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts=untagged_hosts,
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
router=router_info,
dp_options=self.get_dp_options()
)
self.start_net()
@staticmethod
def get_faucet_mac(vindex):
"""Get the faucet MAC"""
return '0e:00:00:00:00:0%u' % vindex
def get_faucet_vip(self, vindex):
"""Get the IPV4 faucet vip"""
return '10.%u00.0.254/%u' % (vindex, self.NETPREFIX)
def get_ip(self, host_n, vindex):
"""Get the IPV4 host ip"""
return '10.%u00.0.%u/%u' % (vindex, host_n, self.NETPREFIX)
def host_ping(self, src_host, dst_ip):
"""ping host"""
self.one_ipv4_ping(src_host, dst_ip, require_host_learned=False, retries=5)
def set_host_ip(self, host, host_ip):
"""Set the host ip"""
host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)
def verify_intervlan_routing(self):
"""Setup host routes and verify intervlan routing is possible"""
num_hosts = self.V100_NUM_HOSTS + self.V200_NUM_HOSTS + self.V300_NUM_HOSTS
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_hosts = [(self.hosts_name_ordered()[i], ipaddress.ip_interface(
self.get_ip(i+1, 1))) for i in range(len(self.hosts_name_ordered())) if (i % num_hosts) == 0]
v200_hosts = [(self.hosts_name_ordered()[i], ipaddress.ip_interface(
self.get_ip(i+1, 2))) for i in range(len(self.hosts_name_ordered())) if (i % num_hosts) == 1]
for host_tuple in v100_hosts:
host, host_ip = host_tuple
self.set_host_ip(host, host_ip)
for host_tuple in v200_hosts:
host, host_ip = host_tuple
self.set_host_ip(host, host_ip)
for v100_host_tuple in v100_hosts:
v100_host, v100_host_ip = v100_host_tuple
for v200_host_tuple in v200_hosts:
v200_host, v200_host_ip = v200_host_tuple
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
# TODO: multi DP route resolver needs to flood out stack ports
self.host_ping(v100_host, first_faucet_vip.ip)
self.host_ping(v200_host, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
for src_host_tuple in v100_hosts:
src_host, _ = src_host_tuple
for dst_host_tuple in v100_hosts:
_, dst_ip = dst_host_tuple
if src_host_tuple == dst_host_tuple:
continue
self.host_ping(src_host, dst_ip.ip)
for src_host_tuple in v200_hosts:
src_host, _ = src_host_tuple
for dst_host_tuple in v200_hosts:
_, dst_ip = dst_host_tuple
if src_host_tuple == dst_host_tuple:
continue
self.host_ping(src_host, dst_ip.ip)
def test_intervlan_routing_2stack(self):
"""Verify intervlan routing works with 2 DPs in a stack"""
self.NUM_DPS = 2
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_intervlan_routing_3stack(self):
"""Verify intervlan routing works with 3 DPs in a stack"""
self.NUM_DPS = 3
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_intervlan_routing_4stack(self):
"""Verify intervlan routing works with 4 DPs in a stack"""
self.NUM_DPS = 4
self.set_up()
self.verify_stack_up()
self.verify_intervlan_routing()
def test_path_no_vlans(self):
"""Test when a DP in the path of a intervlan route contains no routed VLANs"""
self.NUM_DPS = 3
self.set_up()
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_host = self.hosts_name_ordered()[0]
v100_host_ip = ipaddress.ip_interface(self.get_ip(1, 1))
v200_host = self.hosts_name_ordered()[5]
v200_host_ip = ipaddress.ip_interface(self.get_ip(2, 2))
# Remove all hosts on the middle DP by chaning them to hosts on VLAN300
# the middle DP now contains no hosts with VLAN 100 or VLAN 200
conf = self._get_faucet_conf()
interface_config = conf['dps']['faucet-2']['interfaces']
for port_key, port_dict in interface_config.items():
if 'stack' in port_dict:
continue
conf['dps']['faucet-2']['interfaces'][port_key]['native_vlan'] = self.V300
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.verify_stack_up()
self.set_host_ip(v100_host, v100_host_ip)
self.set_host_ip(v200_host, v200_host_ip)
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
# TODO: multi DP route resolver needs to flood out stack ports
self.host_ping(v100_host, first_faucet_vip.ip)
self.host_ping(v200_host, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
def test_dp_one_vlan_from_router(self):
"""Test when a DP has only one of the routed VLANs"""
self.NUM_DPS = 2
self.set_up()
first_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(1))
second_faucet_vip = ipaddress.ip_interface(self.get_faucet_vip(2))
v100_host = self.hosts_name_ordered()[0]
v100_host_ip = ipaddress.ip_interface(self.get_ip(1, 1))
v200_host = self.hosts_name_ordered()[3]
v200_host_ip = ipaddress.ip_interface(self.get_ip(2, 2))
# Remove host on VLAN100 by changing it to a host on VLAN300, there is now only
# one host on the DP that is being routed (200)
conf = self._get_faucet_conf()
interface_config = conf['dps']['faucet-2']['interfaces']
for port_key, port_dict in interface_config.items():
if 'stack' in port_dict:
continue
if port_dict['native_vlan'] == self.V100:
conf['dps']['faucet-2']['interfaces'][port_key]['native_vlan'] = self.V300
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.verify_stack_up()
self.set_host_ip(v100_host, v100_host_ip)
self.set_host_ip(v200_host, v200_host_ip)
self.add_host_route(v100_host, v200_host_ip, first_faucet_vip.ip)
self.add_host_route(v200_host, v100_host_ip, second_faucet_vip.ip)
# TODO: multi DP route resolver needs to flood out stack ports
self.host_ping(v100_host, first_faucet_vip.ip)
self.host_ping(v200_host, second_faucet_vip.ip)
self.host_ping(v100_host, v200_host_ip.ip)
self.host_ping(v200_host, v100_host_ip.ip)
self.assertEqual(
self._ip_neigh(v100_host, first_faucet_vip.ip, self.IPV), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(v200_host, second_faucet_vip.ip, self.IPV), self.FAUCET_MAC2)
class FaucetSingleUntaggedIPV6RoutingWithStackingTest(FaucetSingleUntaggedIPV4RoutingWithStackingTest):
"""IPV6 intervlan routing with stacking tests"""
IPV = 6
NETPREFIX = 64
ETH_TYPE = IPV6_ETH
def get_dp_options(self):
return {
'drop_spoofed_faucet_mac': False,
'nd_neighbor_timeout': 2,
'max_resolve_backoff_time': 1,
'proactive_learn_v6': True
}
def host_ping(self, src_host, dst_ip):
self.one_ipv6_ping(src_host, dst_ip, require_host_learned=False)
def set_host_ip(self, host, host_ip):
self.add_host_ipv6_address(host, host_ip)
def get_faucet_vip(self, vindex):
"""Get the IPV6 faucet vip"""
return 'fc0%u::1:254/112' % vindex
def get_ip(self, host_n, vindex):
"""Get the IPV6 host ip"""
return 'fc0%u::1:%u/64' % (vindex, host_n)
class FaucetStringOfDPUntaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPUntaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, untagged_hosts={self.VID: self.NUM_HOSTS})
self.start_net()
def test_untagged(self):
"""All untagged hosts in multi switch topology can reach one another."""
self.verify_stack_hosts()
self.verify_traveling_dhcp_mac()
class FaucetStringOfDPTaggedTest(FaucetStringOfDPTest):
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPTaggedTest, self).setUp()
self.build_net(
n_dps=self.NUM_DPS, n_tagged=self.NUM_HOSTS, tagged_vid=self.VID)
self.start_net()
def test_tagged(self):
"""All tagged hosts in multi switch topology can reach one another."""
self.verify_stack_hosts(verify_bridge_local_rule=False)
self.verify_traveling_dhcp_mac()
class FaucetSingleStackStringOfDPTagged0Test(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPTagged0Test, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID,
switch_to_switch_links=2)
self.start_net()
def test_tagged(self):
"""All tagged hosts in stack topology can reach each other."""
self.verify_stack_up()
for coldstart in (False, True):
self.verify_one_stack_down(0, coldstart)
class FaucetSingleStackStringOfDPTagged1Test(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with tagged hosts."""
NUM_DPS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPTagged1Test, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
n_tagged=self.NUM_HOSTS,
tagged_vid=self.VID,
switch_to_switch_links=2)
self.start_net()
def test_tagged(self):
self.verify_stack_up()
for coldstart in (False, True):
self.verify_one_stack_down(1, coldstart)
class FaucetStringOfDPLACPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of LACP-connected datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
match_bcast = {'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'}
action_str = 'OUTPUT:%u'
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPLACPUntaggedTest, self).setUp()
self.build_net(
stack=False,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
lacp=True)
self.start_net()
def lacp_ports(self):
first_link, second_link = sorted(self.non_host_links(self.dpid))
first_lacp_port, second_lacp_port = first_link.port, second_link.port
remote_first_lacp_port, remote_second_lacp_port = first_link.peer_port, second_link.peer_port
return (first_lacp_port, second_lacp_port,
remote_first_lacp_port, remote_second_lacp_port)
def wait_for_lacp_state(self, port_no, wanted_state, dpid, dp_name, timeout=30):
labels = self.port_labels(port_no)
labels.update({'dp_id': '0x%x' % int(dpid), 'dp_name': dp_name})
if not self.wait_for_prometheus_var(
'port_lacp_state', wanted_state,
labels=labels, dpid=False, timeout=timeout):
self.fail('wanted LACP state for %s to be %u' % (labels, wanted_state))
def wait_for_lacp_port_init(self, port_no, dpid, dp_name):
self.wait_for_lacp_state(port_no, 1, dpid, dp_name)
def wait_for_lacp_port_up(self, port_no, dpid, dp_name):
self.wait_for_lacp_state(port_no, 3, dpid, dp_name)
def wait_for_lacp_port_noact(self, port_no, dpid, dp_name):
self.wait_for_lacp_state(port_no, 5, dpid, dp_name)
# We sort non_host_links by port because FAUCET sorts its ports
# and only floods out of the first active LACP port in that list
def wait_for_all_lacp_up(self):
(first_lacp_port, second_lacp_port, remote_first_lacp_port, _) = self.lacp_ports()
self.wait_for_lacp_port_up(first_lacp_port, self.dpid, self.DP_NAME)
self.wait_for_lacp_port_up(second_lacp_port, self.dpid, self.DP_NAME)
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[self.action_str % first_lacp_port])
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[self.action_str % remote_first_lacp_port],
dpid=self.dpids[1])
def test_lacp_port_down(self):
"""LACP works with any member down."""
(first_lacp_port, second_lacp_port,
remote_first_lacp_port, remote_second_lacp_port) = self.lacp_ports()
local_ports = {first_lacp_port, second_lacp_port}
remote_ports = {remote_first_lacp_port, remote_second_lacp_port}
self.wait_for_all_lacp_up()
self.retry_net_ping()
for local_lacp_port, remote_lacp_port in (
(first_lacp_port, remote_first_lacp_port),
(second_lacp_port, remote_second_lacp_port)):
other_local_lacp_port = list(local_ports - {local_lacp_port})[0]
other_remote_lacp_port = list(remote_ports - {remote_lacp_port})[0]
self.set_port_down(local_lacp_port, wait=False)
self.wait_for_lacp_port_init(
local_lacp_port, self.dpid, self.DP_NAME)
self.wait_for_lacp_port_init(
remote_lacp_port, self.dpids[1], 'faucet-2')
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[
self.action_str % other_local_lacp_port])
self.wait_until_matching_flow(
self.match_bcast, self._FLOOD_TABLE, actions=[
self.action_str % other_remote_lacp_port],
dpid=self.dpids[1])
self.retry_net_ping()
self.set_port_up(local_lacp_port)
self.wait_for_all_lacp_up()
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other, LAG_CHANGE event emitted."""
self._enable_event_log()
for _ in range(3):
self.wait_for_all_lacp_up()
self.verify_stack_hosts()
self.flap_all_switch_ports()
# Check for presence of LAG_CHANGE event in event socket log
self.wait_until_matching_lines_from_file(r'.+LAG_CHANGE.+', self.event_log)
def test_dyn_fail(self):
"""Test lacp fail on reload with dynamic lacp status."""
conf = self._get_faucet_conf()
(src_port, dst_port, fail_port, _) = self.lacp_ports()
self.wait_for_lacp_port_up(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
interfaces_conf = conf['dps']['faucet-2']['interfaces']
interfaces_conf[fail_port]['lacp'] = 0
interfaces_conf[fail_port]['lacp_active'] = False
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_lacp_port_init(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
def test_passthrough(self):
"""Test lacp passthrough on port fail."""
conf = self._get_faucet_conf()
(src_port, dst_port, fail_port, end_port) = self.lacp_ports()
interfaces_conf = conf['dps']['faucet-1']['interfaces']
interfaces_conf[dst_port]['lacp_passthrough'] = [src_port]
interfaces_conf[dst_port]['loop_protect_external'] = True
interfaces_conf[dst_port]['lacp'] = 2
interfaces_conf[src_port]['loop_protect_external'] = True
interfaces_conf = conf['dps']['faucet-2']['interfaces']
interfaces_conf[fail_port]['loop_protect_external'] = True
interfaces_conf[end_port]['loop_protect_external'] = True
interfaces_conf[end_port]['lacp'] = 2
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_all_lacp_up()
self.verify_stack_hosts()
interfaces_conf[fail_port]['lacp'] = 0
interfaces_conf[fail_port]['lacp_active'] = False
self.reload_conf(conf, self.faucet_config_path, restart=True,
cold_start=False, change_expected=False)
self.wait_for_lacp_port_init(src_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_up(dst_port, self.dpids[0], 'faucet-1')
self.wait_for_lacp_port_init(end_port, self.dpids[1], 'faucet-2')
class FaucetStackStringOfDPUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 2
def setUp(self): # pylint: disable=invalid-name
super(FaucetStackStringOfDPUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""All untagged hosts in stack topology can reach each other."""
self.verify_stack_hosts()
class FaucetSingleStackStringOfDPExtLoopProtUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 2
NUM_HOSTS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOfDPExtLoopProtUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
use_external=True)
self.start_net()
def test_untagged(self):
"""Host can reach each other, unless both marked loop_protect_external"""
for host in self.hosts_name_ordered():
self.require_host_learned(host)
# Part 1: Make sure things are connected properly.
self.verify_protected_connectivity() # Before reload
# Part 2: Test the code on pipeline reconfiguration path.
conf = self._get_faucet_conf()
loop_interface = None
for interface, interface_conf in conf['dps']['faucet-2']['interfaces'].items():
if 'stack' in interface_conf:
continue
if not interface_conf.get('loop_protect_external', False):
loop_interface = interface
break
self._mark_external(loop_interface, True)
self._mark_external(loop_interface, False)
# Part 3: Make sure things are the same after reload.
self.verify_protected_connectivity() # After reload
def _mark_external(self, loop_interface, protect_external):
conf = self._get_faucet_conf()
conf['dps']['faucet-2']['interfaces'][loop_interface]['loop_protect_external'] = protect_external
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
def test_missing_ext(self):
"""Test stacked dp with all external ports down on a switch"""
self.validate_with_externals_down_fails('faucet-1')
self.validate_with_externals_down_fails('faucet-2')
class FaucetSingleStackStringOf3DPExtLoopProtUntaggedTest(FaucetStringOfDPTest):
"""Test topology of stacked datapaths with untagged hosts."""
NUM_DPS = 3
NUM_HOSTS = 3
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackStringOf3DPExtLoopProtUntaggedTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=2,
hw_dpid=self.hw_dpid,
use_external=True)
self.start_net()
def test_untagged(self):
self.verify_stack_up()
int_hosts, ext_hosts, dp_hosts = self.map_int_ext_hosts()
_, root_ext_hosts = dp_hosts[self.DP_NAME]
for int_host in int_hosts:
# All internal hosts can reach other internal hosts.
for other_int_host in int_hosts - {int_host}:
self.verify_broadcast(
hosts=(int_host, other_int_host), broadcast_expected=True)
self.verify_unicast(
hosts=(int_host, other_int_host), unicast_expected=True)
# All internal hosts should reach exactly one external host.
self.verify_one_broadcast(int_host, ext_hosts)
for ext_host in ext_hosts:
# All external hosts cannot flood to each other
for other_ext_host in ext_hosts - {ext_host}:
self.verify_broadcast(
hosts=(ext_host, other_ext_host), broadcast_expected=False)
remote_ext_hosts = ext_hosts - set(root_ext_hosts)
# int host should never be broadcast to an ext host that is not on the root.
for local_int_hosts, _ in dp_hosts.values():
for local_int_host in local_int_hosts:
for remote_ext_host in remote_ext_hosts:
self.verify_broadcast(
hosts=(local_int_host, remote_ext_host), broadcast_expected=False)
class FaucetGroupStackStringOfDPUntaggedTest(FaucetStackStringOfDPUntaggedTest):
"""Test topology of stacked datapaths with untagged hosts."""
GROUP_TABLE = True
class FaucetStackRingOfDPTest(FaucetStringOfDPTest):
NUM_DPS = 3
SOFTWARE_ONLY = True
def setUp(self): # pylint: disable=invalid-name
super(FaucetStackRingOfDPTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=1,
stack_ring=True)
self.start_net()
def test_untagged(self):
"""Stack loop prevention works and hosts can ping each other, STACK_TOPO_CHANGE event emitted."""
self._enable_event_log()
self.verify_stack_up()
self.verify_stack_has_no_loop()
self.retry_net_ping()
self.verify_traveling_dhcp_mac()
# Move through each DP breaking either side of the ring
for dpid_i in range(self.NUM_DPS):
dpid = self.dpids[dpid_i]
dp_name = 'faucet-%u' % (dpid_i + 1)
for link in self.non_host_links(dpid):
port = link.port
self.one_stack_port_down(dpid, dp_name, port)
self.retry_net_ping()
self.one_stack_port_up(dpid, dp_name, port)
# Check for presence of STACK_TOPO_CHANGE event in event socket log
self.wait_until_matching_lines_from_file(r'.+STACK_TOPO_CHANGE.+', self.event_log)
class FaucetSingleStack4RingOfDPTest(FaucetStackRingOfDPTest):
NUM_DPS = 4
class FaucetSingleStackAclControlTest(FaucetStringOfDPTest):
"""Test ACL control of stacked datapaths with untagged hosts."""
NUM_DPS = 3
NUM_HOSTS = 3
def acls(self):
map1, map2, map3 = [self.port_maps[dpid] for dpid in self.dpids]
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'nw_dst': '10.0.0.2',
'actions': {
'output': {
'port': map1['port_2']
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'dl_dst': 'ff:ff:ff:ff:ff:ff',
'actions': {
'output': {
'ports': [
map1['port_2'],
map1['port_4']]
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'output': {
'port': map1['port_4']
}
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
2: [
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'output': {
'port': map2['port_5']
}
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
3: [
{'rule': {
'dl_type': IPV4_ETH,
'nw_dst': '10.0.0.7',
'actions': {
'output': {
'port': map3['port_1']
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'dl_dst': 'ff:ff:ff:ff:ff:ff',
'actions': {
'output': {
'ports': [map3['port_1']]
}
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
map1, map2, map3 = [self.port_maps[dpid] for dpid in self.dpids]
return {
'faucet-1': {
# Port 1, acl_in = 1
map1['port_1']: 1,
},
'faucet-2': {
# Port 4, acl_in = 2
map2['port_4']: 2,
},
'faucet-3': {
# Port 4, acl_in = 3
map3['port_4']: 3,
},
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetSingleStackAclControlTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
)
self.start_net()
def test_unicast(self):
"""Hosts in stack topology can appropriately reach each other over unicast."""
hosts = self.hosts_name_ordered()
self.verify_stack_up()
self.verify_tp_dst_notblocked(5000, hosts[0], hosts[1], table_id=None)
self.verify_tp_dst_blocked(5000, hosts[0], hosts[3], table_id=None)
self.verify_tp_dst_notblocked(5000, hosts[0], hosts[6], table_id=None)
self.verify_tp_dst_blocked(5000, hosts[0], hosts[7], table_id=None)
self.verify_no_cable_errors()
def test_broadcast(self):
"""Hosts in stack topology can appropriately reach each other over broadcast."""
hosts = self.hosts_name_ordered()
self.verify_stack_up()
self.verify_bcast_dst_notblocked(5000, hosts[0], hosts[1])
self.verify_bcast_dst_blocked(5000, hosts[0], hosts[3])
self.verify_bcast_dst_notblocked(5000, hosts[0], hosts[6])
self.verify_bcast_dst_blocked(5000, hosts[0], hosts[7])
self.verify_no_cable_errors()
class FaucetStringOfDPACLOverrideTest(FaucetStringOfDPTest):
NUM_DPS = 1
NUM_HOSTS = 2
# ACL rules which will get overridden.
def acls(self):
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5001,
'actions': {
'allow': 1,
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5002,
'actions': {
'allow': 0,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# ACL rules which get put into an include-optional
# file, then reloaded into FAUCET.
@staticmethod
def acls_override():
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5001,
'actions': {
'allow': 0,
},
}},
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 6,
'tcp_dst': 5002,
'actions': {
'allow': 1,
},
}},
{'rule': {
'actions': {
'allow': 1,
},
}},
],
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port, acl_in = 1
port_1: 1,
},
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetStringOfDPACLOverrideTest, self).setUp()
self.acls_config = os.path.join(self.tmpdir, 'acls.yaml')
missing_config = os.path.join(self.tmpdir, 'missing_config.yaml')
self.build_net(
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
include_optional=[self.acls_config, missing_config],
)
self.start_net()
def test_port5001_blocked(self):
"""Test that TCP port 5001 is blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5001, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
config_file.write(self.get_config(acls=self.acls_override()))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_no_cable_errors()
def test_port5002_notblocked(self):
"""Test that TCP port 5002 is not blocked."""
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5002, first_host, second_host)
with open(self.acls_config, 'w') as config_file:
config_file.write(self.get_config(acls=self.acls_override()))
self.verify_faucet_reconf(cold_start=False, change_expected=True)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_no_cable_errors()
class FaucetTunnelSameDpTest(FaucetStringOfDPTest):
NUM_DPS = 2
NUM_HOSTS = 2
SWITCH_TO_SWITCH_LINKS = 2
VID = 100
def acls(self):
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': 'faucet-1',
'port': 'b%(port_2)d'}
}
}
}}
]
}
# DP-to-acl_in port mapping.
def acl_in_dp(self):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port 1, acl_in = 1
port_1: 1,
}
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetTunnelSameDpTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
)
self.start_net()
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.verify_stack_up()
src_host, dst_host, other_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host)
class FaucetTunnelTest(FaucetStringOfDPTest):
NUM_DPS = 2
NUM_HOSTS = 2
SWITCH_TO_SWITCH_LINKS = 2
VID = 100
def acls(self):
dpid2 = self.dpids[1]
port2_1 = self.port_maps[dpid2]['port_1']
return {
1: [
{'rule': {
'dl_type': IPV4_ETH,
'ip_proto': 1,
'actions': {
'allow': 0,
'output': {
'tunnel': {
'type': 'vlan',
'tunnel_id': 200,
'dp': 'faucet-2',
'port': port2_1}
}
}
}}
]
}
# DP-to-acl_in port mapping.
def acl_in_dp(self,):
port_1 = self.port_map['port_1']
return {
'faucet-1': {
# First port 1, acl_in = 1
port_1: 1,
}
}
def setUp(self): # pylint: disable=invalid-name
super(FaucetTunnelTest, self).setUp()
self.build_net(
stack=True,
n_dps=self.NUM_DPS,
untagged_hosts={self.VID: self.NUM_HOSTS},
switch_to_switch_links=self.SWITCH_TO_SWITCH_LINKS,
hw_dpid=self.hw_dpid,
)
self.start_net()
def test_tunnel_established(self):
"""Test a tunnel path can be created."""
self.verify_stack_up()
src_host, other_host, dst_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host)
def test_tunnel_path_rerouted(self):
"""Test a tunnel path is rerouted when a stack is down."""
self.verify_stack_up()
first_stack_port = self.non_host_links(self.dpid)[0].port
self.one_stack_port_down(self.dpid, self.DP_NAME, first_stack_port)
src_host, other_host, dst_host = self.hosts_name_ordered()[:3]
self.verify_tunnel_established(src_host, dst_host, other_host, packets=10)
self.set_port_up(first_stack_port, self.dpid)
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_BOILER_UNTAGGED
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_TAGGED_BOILER
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.retry_net_ping(hosts=(first_host, second_host))
self.wait_nonzero_packet_count_flow(
{'dl_src': '0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self._PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: True
""" + CONFIG_BOILER_UNTAGGED
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('host %s still learned' % host)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for _ in range(timeout):
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name) as debug_log:
debug = debug_log.read()
if re.search(pattern, debug):
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg):
log_file = self.env['faucet']['FAUCET_LOG']
host_log_re = r'.*%s %s.*' % (msg, host_mac)
self.wait_until_matching_lines_from_file(host_log_re, log_file)
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()
self.host_ipv4_alias(first_host, ipaddress.ip_interface('10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
class FaucetDisconnectTest(FaucetUntaggedTest):
"""Test that switch works properly after repeated disconnections
caused by DPID mismatch"""
def update_config(self, dpid):
"""Update config with good/bad DPID"""
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['dp_id'] = int(dpid)
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_untagged(self):
"""Run untagged test after disconnects and config update"""
# We update the config with a bad DPID and then wait for
# 'unknown datapath' messages, indicating switch connections that
# FAUCET has rejected. The switch should see them as
# 'connection reset by peer'.
mask = int(16*'f', 16)
bad_dpid = (int(self.dpid) + 0xdeadbeef) & mask
faucet_log = self.env['faucet']['FAUCET_LOG']
self.update_config(dpid=bad_dpid)
self.wait_until_matching_lines_from_file(
r'.*ERROR.*unknown datapath', faucet_log, timeout=60, count=4)
self.update_config(dpid=self.dpid)
super().test_untagged()
class FaucetBadFlowModTest(FaucetUntaggedTest):
"""Test that switch and FAUCET still work after we send some bad flow_mods"""
def base_flow_mod(self):
"""Return a base flow mod that we mess with"""
return {'dpid': self.dpid,
'cookie': 0,
'cookie_mask': 0,
'table_id': 0,
'idle_timeout': 29,
'hard_timeout': 91,
'flags': 1,
'priority': 1,
'match': {'in_port': 1},
'actions': [{
'type': 'OUTPUT',
'port': 2}]}
# For now, the flow_mods are reasonably well-formed but with
# parameters that are incorrect for the switch and for FAUCET
def bad_dpid(self):
"""Return a random, bad dpid parameter"""
mask = int(16*'f', 16)
dpid = (int(self.dpid) + random.randint(0, 1 << 63)) & mask
return {'dpid': dpid}
@staticmethod
def bad_table():
"""Return a bad table ID parameter"""
# This should be higher than FAUCET's max table ID
bad_table_start = 32
return {'table_id': random.randint(bad_table_start, 100)}
def bad_port(self):
"""Return a (hopefully very) bad port number"""
max_port = max(self.port_map.values())
offset = random.randint(0x1000, 0xE0000000)
mask = 0xEFFFFFFF
return (max_port + offset) & mask
def bad_match(self):
"""Return a bad match field"""
matches = (
# Bad input port
{'in_port': self.bad_port()},
# IPv4 (broadcast) src with bad ('reserved') ethertype
{'nw_src': '255.255.255.255', 'dl_type': 0xFFFF},
# IPv4 with IPv6 ethertype:
{'nw_src': '1.2.3.4', 'dl_type': 0x86DD},
# IPv4 address as IPv6 dst
{'ipv6_dst': '1.2.3.4', 'dl_type': 0x86DD},
# IPv6 dst with Bad/reserved ip_proto
{'ipv6_dst': '2001::aaaa:bbbb:cccc:1111', 'ip_proto': 255},
# Destination port but no transport protocol
{'tp_dst': 80},
# ARP opcode on non-ARP packetx
{'arp_op': 0x3, 'dl_type': 0x1234})
match = random.sample(matches, 1)[0]
return {'match': match}
def bad_actions(self, count=1):
"""Return a questionable actions parameter"""
actions = (
{'type': 'OUTPUT', 'port': self.bad_port()},
{'type': 'PUSH_MPLS', 'ethertype': 0x8BAD},
{'type': 'SET_QUEUE', 'queue_id':
random.randint(0x8000, 0xFFFFFFFF)})
return {'actions': random.sample(actions, count)}
# Possible options for bad parameters
bad_options = ('dpid', 'table', 'match', 'actions')
def bad_flow_mod(self):
"""Return a flow mod with some bad parameters"""
flow_mod = self.base_flow_mod()
# Add two or more bad options
options = random.sample(self.bad_options,
random.randint(2, len(self.bad_options)))
for option in options:
param = getattr(self, 'bad_%s' % option)()
flow_mod.update(param)
return flow_mod
def send_flow_mod(self, flow_mod, timeout=5):
"""Send flow_mod to switch via ofctl"""
int_dpid = mininet_test_util.str_int_dpid(self.dpid)
return self._ofctl_post(int_dpid, 'stats/flowentry/modify',
timeout=timeout, params=flow_mod)
def tearDown(self, ignore_oferrors=True):
"""Ignore OF errors on teardown"""
oferrors = super().tearDown(ignore_oferrors)
oferrors = re.findall(r'type: (\w+)', oferrors)
counter = collections.Counter(oferrors)
error('Ignored OF error count: %s\n' % dict(counter))
# TODO: ensure at least one error is always generated.
# pylint: disable=arguments-differ
def test_untagged(self, count=10):
"""Send a bunch of bad flow mods, then verify connectivity"""
for _ in range(count):
flow_mod = self.bad_flow_mod()
error('sending bad flow_mod', flow_mod, '\n')
self.send_flow_mod(flow_mod)
self.ping_all_when_learned()
class FaucetUntaggedMorePortsBase(FaucetUntaggedTest):
"""Base class for untagged test with more ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 16 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 180 # Timeout for event logger process
# Config lines for additional ports
CONFIG_EXTRA_PORT = """
{port}:
native_vlan: 100""" + "\n"
def _init_faucet_config(self): # pylint: disable=invalid-name
"""Extend config with more ports if needed"""
self.assertTrue(self.CONFIG.endswith(CONFIG_BOILER_UNTAGGED))
# We know how to extend the config for more ports
base_port_count = len(re.findall('port', CONFIG_BOILER_UNTAGGED))
ports = self.topo.dpid_ports(self.dpid)
for port in ports[base_port_count:]:
self.CONFIG += self.CONFIG_EXTRA_PORT.format(port=port)
super()._init_faucet_config()
def setUp(self):
"""Make sure N_UNTAGGED doesn't exceed hw port count"""
if self.config and self.config.get('hw_switch', False):
self.N_UNTAGGED = min(len(self.config['dp_ports']),
self.N_UNTAGGED)
error('(%d ports) ' % self.N_UNTAGGED)
super().setUp()
class FaucetSingleUntagged32PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 32 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 32 # Maximum number of ports to test
@unittest.skip('slow and potentially unreliable on travis')
class FaucetSingleUntagged48PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 48 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 48 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 360 # Timeout for event logger process
|
py | 1a3b1234b3f09f1e8a4f8990e8de37af88759465 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from .constants import (
DEFAULT_HTTP_TIMEOUT,
MANAGEMENT_HOST,
)
from .models import (
AffinityGroups,
AffinityGroup,
AvailabilityResponse,
Certificate,
Certificates,
DataVirtualHardDisk,
Deployment,
Disk,
Disks,
Locations,
HostedService,
HostedServices,
Images,
OperatingSystems,
OperatingSystemFamilies,
OSImage,
OSImageDetails,
PersistentVMRole,
ResourceExtensions,
ReservedIP,
ReservedIPs,
ReplicationProgress,
ReplicationProgressElement,
RoleSize,
RoleSizes,
StorageService,
StorageServices,
Subscription,
Subscriptions,
SubscriptionCertificate,
SubscriptionCertificates,
SubscriptionOperationCollection,
VirtualNetworkSites,
VMImages,
)
from ._common_conversion import (
_str,
)
from ._common_error import (
_validate_not_none,
)
from .servicemanagementclient import (
_ServiceManagementClient,
)
from ._serialization import (
_XmlSerializer,
)
class ServiceManagementService(_ServiceManagementClient):
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST, request_session=None,
timeout=DEFAULT_HTTP_TIMEOUT):
'''
Initializes the management service.
subscription_id:
Subscription to manage.
cert_file:
Path to .pem certificate file (httplib), or location of the
certificate in your Personal certificate store (winhttp) in the
CURRENT_USER\my\CertificateName format.
If a request_session is specified, then this is unused.
host:
Live ServiceClient URL. Defaults to Azure public cloud.
request_session:
Session object to use for http requests. If this is specified, it
replaces the default use of httplib or winhttp. Also, the cert_file
parameter is unused when a session is passed in.
The session object handles authentication, and as such can support
multiple types of authentication: .pem certificate, oauth.
For example, you can pass in a Session instance from the requests
library. To use .pem certificate authentication with requests
library, set the path to the .pem file on the session.cert
attribute.
timeout:
Optional. Timeout for the http request, in seconds.
'''
super(ServiceManagementService, self).__init__(
subscription_id, cert_file, host, request_session, timeout)
#--Operations for subscriptions --------------------------------------
def list_role_sizes(self):
'''
Lists the role sizes that are available under the specified
subscription.
'''
return self._perform_get(self._get_role_sizes_path(),
RoleSizes)
def list_subscriptions(self):
'''
Returns a list of subscriptions that you can access.
You must make sure that the request that is made to the management
service is secure using an Active Directory access token.
'''
return self._perform_get(self._get_subscriptions_path(),
Subscriptions)
#--Operations for storage accounts -----------------------------------
def list_storage_accounts(self):
'''
Lists the storage accounts available under the current subscription.
'''
return self._perform_get(self._get_storage_service_path(),
StorageServices)
def get_storage_account_properties(self, service_name):
'''
Returns system properties for the specified storage account.
service_name:
Name of the storage service account.
'''
_validate_not_none('service_name', service_name)
return self._perform_get(self._get_storage_service_path(service_name),
StorageService)
def get_storage_account_keys(self, service_name):
'''
Returns the primary and secondary access keys for the specified
storage account.
service_name:
Name of the storage service account.
'''
_validate_not_none('service_name', service_name)
return self._perform_get(
self._get_storage_service_path(service_name) + '/keys',
StorageService)
def regenerate_storage_account_keys(self, service_name, key_type):
'''
Regenerates the primary or secondary access key for the specified
storage account.
service_name:
Name of the storage service account.
key_type:
Specifies which key to regenerate. Valid values are:
Primary, Secondary
'''
_validate_not_none('service_name', service_name)
_validate_not_none('key_type', key_type)
return self._perform_post(
self._get_storage_service_path(
service_name) + '/keys?action=regenerate',
_XmlSerializer.regenerate_keys_to_xml(
key_type),
StorageService)
def create_storage_account(self, service_name, description, label,
affinity_group=None, location=None,
geo_replication_enabled=None,
extended_properties=None,
account_type='Standard_GRS'):
'''
Creates a new storage account in Windows Azure.
service_name:
A name for the storage account that is unique within Windows Azure.
Storage account names must be between 3 and 24 characters in length
and use numbers and lower-case letters only.
description:
A description for the storage account. The description may be up
to 1024 characters in length.
label:
A name for the storage account. The name may be up to 100
characters in length. The name can be used to identify the storage
account for your tracking purposes.
affinity_group:
The name of an existing affinity group in the specified
subscription. You can specify either a location or affinity_group,
but not both.
location:
The location where the storage account is created. You can specify
either a location or affinity_group, but not both.
geo_replication_enabled:
Deprecated. Replaced by the account_type parameter.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
account_type:
Specifies whether the account supports locally-redundant storage,
geo-redundant storage, zone-redundant storage, or read access
geo-redundant storage.
Possible values are:
Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS
'''
_validate_not_none('service_name', service_name)
_validate_not_none('description', description)
_validate_not_none('label', label)
if affinity_group is None and location is None:
raise ValueError(
'location or affinity_group must be specified')
if affinity_group is not None and location is not None:
raise ValueError(
'Only one of location or affinity_group needs to be specified')
if geo_replication_enabled == False:
account_type = 'Standard_LRS'
return self._perform_post(
self._get_storage_service_path(),
_XmlSerializer.create_storage_service_input_to_xml(
service_name,
description,
label,
affinity_group,
location,
account_type,
extended_properties),
as_async=True)
def update_storage_account(self, service_name, description=None,
label=None, geo_replication_enabled=None,
extended_properties=None,
account_type='Standard_GRS'):
'''
Updates the label, the description, and enables or disables the
geo-replication status for a storage account in Windows Azure.
service_name:
Name of the storage service account.
description:
A description for the storage account. The description may be up
to 1024 characters in length.
label:
A name for the storage account. The name may be up to 100
characters in length. The name can be used to identify the storage
account for your tracking purposes.
geo_replication_enabled:
Deprecated. Replaced by the account_type parameter.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
account_type:
Specifies whether the account supports locally-redundant storage,
geo-redundant storage, zone-redundant storage, or read access
geo-redundant storage.
Possible values are:
Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS
'''
_validate_not_none('service_name', service_name)
if geo_replication_enabled == False:
account_type = 'Standard_LRS'
return self._perform_put(
self._get_storage_service_path(service_name),
_XmlSerializer.update_storage_service_input_to_xml(
description,
label,
account_type,
extended_properties))
def delete_storage_account(self, service_name):
'''
Deletes the specified storage account from Windows Azure.
service_name:
Name of the storage service account.
'''
_validate_not_none('service_name', service_name)
return self._perform_delete(
self._get_storage_service_path(service_name),
as_async=True)
def check_storage_account_name_availability(self, service_name):
'''
Checks to see if the specified storage account name is available, or
if it has already been taken.
service_name:
Name of the storage service account.
'''
_validate_not_none('service_name', service_name)
return self._perform_get(
self._get_storage_service_path() +
'/operations/isavailable/' +
_str(service_name) + '',
AvailabilityResponse)
#--Operations for hosted services ------------------------------------
def list_hosted_services(self):
'''
Lists the hosted services available under the current subscription.
Note that you will receive a list of HostedService instances, without
all details inside. For instance, deployments will be None. If you
want deployments information for a specific host service, you have to
call get_hosted_service_properties with embed_detail=True.
'''
return self._perform_get(self._get_hosted_service_path(),
HostedServices)
def get_hosted_service_properties(self, service_name, embed_detail=False):
'''
Retrieves system properties for the specified hosted service. These
properties include the service name and service type; the name of the
affinity group to which the service belongs, or its location if it is
not part of an affinity group; and optionally, information on the
service's deployments.
service_name:
Name of the hosted service.
embed_detail:
When True, the management service returns properties for all
deployments of the service, as well as for the service itself.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('embed_detail', embed_detail)
return self._perform_get(
self._get_hosted_service_path(service_name) +
'?embed-detail=' +
_str(embed_detail).lower(),
HostedService)
def create_hosted_service(self, service_name, label, description=None,
location=None, affinity_group=None,
extended_properties=None):
'''
Creates a new hosted service in Windows Azure.
service_name:
A name for the hosted service that is unique within Windows Azure.
This name is the DNS prefix name and can be used to access the
hosted service.
label:
A name for the hosted service. The name can be up to 100 characters
in length. The name can be used to identify the storage account for
your tracking purposes.
description:
A description for the hosted service. The description can be up to
1024 characters in length.
location:
The location where the hosted service will be created. You can
specify either a location or affinity_group, but not both.
affinity_group:
The name of an existing affinity group associated with this
subscription. This name is a GUID and can be retrieved by examining
the name element of the response body returned by
list_affinity_groups. You can specify either a location or
affinity_group, but not both.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('label', label)
if affinity_group is None and location is None:
raise ValueError(
'location or affinity_group must be specified')
if affinity_group is not None and location is not None:
raise ValueError(
'Only one of location or affinity_group needs to be specified')
return self._perform_post(self._get_hosted_service_path(),
_XmlSerializer.create_hosted_service_to_xml(
service_name,
label,
description,
location,
affinity_group,
extended_properties),
as_async=True)
def update_hosted_service(self, service_name, label=None, description=None,
extended_properties=None):
'''
Updates the label and/or the description for a hosted service in
Windows Azure.
service_name:
Name of the hosted service.
label:
A name for the hosted service. The name may be up to 100 characters
in length. You must specify a value for either Label or
Description, or for both. It is recommended that the label be
unique within the subscription. The name can be used
identify the hosted service for your tracking purposes.
description:
A description for the hosted service. The description may be up to
1024 characters in length. You must specify a value for either
Label or Description, or for both.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
return self._perform_put(self._get_hosted_service_path(service_name),
_XmlSerializer.update_hosted_service_to_xml(
label,
description,
extended_properties))
def delete_hosted_service(self, service_name, complete=False):
'''
Deletes the specified hosted service from Windows Azure.
service_name:
Name of the hosted service.
complete:
True if all OS/data disks and the source blobs for the disks should
also be deleted from storage.
'''
_validate_not_none('service_name', service_name)
path = self._get_hosted_service_path(service_name)
if complete == True:
path = path +'?comp=media'
return self._perform_delete(path, as_async=True)
def get_deployment_by_slot(self, service_name, deployment_slot):
'''
Returns configuration information, status, and system properties for
a deployment.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_slot', deployment_slot)
return self._perform_get(
self._get_deployment_path_using_slot(
service_name, deployment_slot),
Deployment)
def get_deployment_by_name(self, service_name, deployment_name):
'''
Returns configuration information, status, and system properties for a
deployment.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
return self._perform_get(
self._get_deployment_path_using_name(
service_name, deployment_name),
Deployment)
def create_deployment(self, service_name, deployment_slot, name,
package_url, label, configuration,
start_deployment=False,
treat_warnings_as_error=False,
extended_properties=None):
'''
Uploads a new service package and creates a new deployment on staging
or production.
service_name:
Name of the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production
name:
The name for the deployment. The deployment name must be unique
among other deployments for the hosted service.
package_url:
A URL that refers to the location of the service package in the
Blob service. The service package can be located either in a
storage account beneath the same subscription or a Shared Access
Signature (SAS) URI from any storage account.
label:
A name for the hosted service. The name can be up to 100 characters
in length. It is recommended that the label be unique within the
subscription. The name can be used to identify the hosted service
for your tracking purposes.
configuration:
The base-64 encoded service configuration file for the deployment.
start_deployment:
Indicates whether to start the deployment immediately after it is
created. If false, the service model is still deployed to the
virtual machines but the code is not run immediately. Instead, the
service is Suspended until you call Update Deployment Status and
set the status to Running, at which time the service will be
started. A deployed service still incurs charges, even if it is
suspended.
treat_warnings_as_error:
Indicates whether to treat package validation warnings as errors.
If set to true, the Created Deployment operation fails if there
are validation warnings on the service package.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_slot', deployment_slot)
_validate_not_none('name', name)
_validate_not_none('package_url', package_url)
_validate_not_none('label', label)
_validate_not_none('configuration', configuration)
return self._perform_post(
self._get_deployment_path_using_slot(
service_name, deployment_slot),
_XmlSerializer.create_deployment_to_xml(
name,
package_url,
label,
configuration,
start_deployment,
treat_warnings_as_error,
extended_properties),
as_async=True)
def delete_deployment(self, service_name, deployment_name,delete_vhd=False):
'''
Deletes the specified deployment.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
path= self._get_deployment_path_using_name(service_name, deployment_name)
if delete_vhd:
path += '?comp=media'
return self._perform_delete(
path,
as_async=True)
def swap_deployment(self, service_name, production, source_deployment):
'''
Initiates a virtual IP swap between the staging and production
deployment environments for a service. If the service is currently
running in the staging environment, it will be swapped to the
production environment. If it is running in the production
environment, it will be swapped to staging.
service_name:
Name of the hosted service.
production:
The name of the production deployment.
source_deployment:
The name of the source deployment.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('production', production)
_validate_not_none('source_deployment', source_deployment)
return self._perform_post(self._get_hosted_service_path(service_name),
_XmlSerializer.swap_deployment_to_xml(
production, source_deployment),
as_async=True)
def change_deployment_configuration(self, service_name, deployment_name,
configuration,
treat_warnings_as_error=False,
mode='Auto', extended_properties=None):
'''
Initiates a change to the deployment configuration.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
configuration:
The base-64 encoded service configuration file for the deployment.
treat_warnings_as_error:
Indicates whether to treat package validation warnings as errors.
If set to true, the Created Deployment operation fails if there
are validation warnings on the service package.
mode:
If set to Manual, WalkUpgradeDomain must be called to apply the
update. If set to Auto, the Windows Azure platform will
automatically apply the update To each upgrade domain for the
service. Possible values are: Auto, Manual
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('configuration', configuration)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + '/?comp=config',
_XmlSerializer.change_deployment_to_xml(
configuration,
treat_warnings_as_error,
mode,
extended_properties),
as_async=True)
def update_deployment_status(self, service_name, deployment_name, status):
'''
Initiates a change in deployment status.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
status:
The change to initiate to the deployment status. Possible values
include:
Running, Suspended
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('status', status)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + '/?comp=status',
_XmlSerializer.update_deployment_status_to_xml(
status),
as_async=True)
def upgrade_deployment(self, service_name, deployment_name, mode,
package_url, configuration, label, force,
role_to_upgrade=None, extended_properties=None):
'''
Initiates an upgrade.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
mode:
If set to Manual, WalkUpgradeDomain must be called to apply the
update. If set to Auto, the Windows Azure platform will
automatically apply the update To each upgrade domain for the
service. Possible values are: Auto, Manual
package_url:
A URL that refers to the location of the service package in the
Blob service. The service package can be located either in a
storage account beneath the same subscription or a Shared Access
Signature (SAS) URI from any storage account.
configuration:
The base-64 encoded service configuration file for the deployment.
label:
A name for the hosted service. The name can be up to 100 characters
in length. It is recommended that the label be unique within the
subscription. The name can be used to identify the hosted service
for your tracking purposes.
force:
Specifies whether the rollback should proceed even when it will
cause local data to be lost from some role instances. True if the
rollback should proceed; otherwise false if the rollback should
fail.
role_to_upgrade:
The name of the specific role to upgrade.
extended_properties:
Dictionary containing name/value pairs of storage account
properties. You can have a maximum of 50 extended property
name/value pairs. The maximum length of the Name element is 64
characters, only alphanumeric characters and underscores are valid
in the Name, and the name must start with a letter. The value has
a maximum length of 255 characters.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('mode', mode)
_validate_not_none('package_url', package_url)
_validate_not_none('configuration', configuration)
_validate_not_none('label', label)
_validate_not_none('force', force)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + '/?comp=upgrade',
_XmlSerializer.upgrade_deployment_to_xml(
mode,
package_url,
configuration,
label,
role_to_upgrade,
force,
extended_properties),
as_async=True)
def walk_upgrade_domain(self, service_name, deployment_name,
upgrade_domain):
'''
Specifies the next upgrade domain to be walked during manual in-place
upgrade or configuration change.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
upgrade_domain:
An integer value that identifies the upgrade domain to walk.
Upgrade domains are identified with a zero-based index: the first
upgrade domain has an ID of 0, the second has an ID of 1, and so on.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('upgrade_domain', upgrade_domain)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + '/?comp=walkupgradedomain',
_XmlSerializer.walk_upgrade_domain_to_xml(
upgrade_domain),
as_async=True)
def rollback_update_or_upgrade(self, service_name, deployment_name, mode,
force):
'''
Cancels an in progress configuration change (update) or upgrade and
returns the deployment to its state before the upgrade or
configuration change was started.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
mode:
Specifies whether the rollback should proceed automatically.
auto - The rollback proceeds without further user input.
manual - You must call the Walk Upgrade Domain operation to
apply the rollback to each upgrade domain.
force:
Specifies whether the rollback should proceed even when it will
cause local data to be lost from some role instances. True if the
rollback should proceed; otherwise false if the rollback should
fail.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('mode', mode)
_validate_not_none('force', force)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + '/?comp=rollback',
_XmlSerializer.rollback_upgrade_to_xml(
mode, force),
as_async=True)
def reboot_role_instance(self, service_name, deployment_name,
role_instance_name):
'''
Requests a reboot of a role instance that is running in a deployment.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
role_instance_name:
The name of the role instance.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_instance_name', role_instance_name)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + \
'/roleinstances/' + _str(role_instance_name) + \
'?comp=reboot',
'',
as_async=True)
def reimage_role_instance(self, service_name, deployment_name,
role_instance_name):
'''
Requests a reimage of a role instance that is running in a deployment.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
role_instance_name:
The name of the role instance.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_instance_name', role_instance_name)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + \
'/roleinstances/' + _str(role_instance_name) + \
'?comp=reimage',
'',
as_async=True)
def rebuild_role_instance(self, service_name, deployment_name,
role_instance_name):
'''
Reinstalls the operating system on instances of web roles or worker
roles and initializes the storage resources that are used by them. If
you do not want to initialize storage resources, you can use
reimage_role_instance.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
role_instance_name:
The name of the role instance.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_instance_name', role_instance_name)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + \
'/roleinstances/' + _str(role_instance_name) + \
'?comp=rebuild&resources=allLocalDrives',
'',
as_async=True)
def delete_role_instances(self, service_name, deployment_name,
role_instance_names):
'''
Reinstalls the operating system on instances of web roles or worker
roles and initializes the storage resources that are used by them. If
you do not want to initialize storage resources, you can use
reimage_role_instance.
service_name:
Name of the hosted service.
deployment_name:
The name of the deployment.
role_instance_names:
List of role instance names.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_instance_names', role_instance_names)
return self._perform_post(
self._get_deployment_path_using_name(
service_name, deployment_name) + '/roleinstances/?comp=delete',
_XmlSerializer.role_instances_to_xml(role_instance_names),
as_async=True)
def check_hosted_service_name_availability(self, service_name):
'''
Checks to see if the specified hosted service name is available, or if
it has already been taken.
service_name:
Name of the hosted service.
'''
_validate_not_none('service_name', service_name)
return self._perform_get(
'/' + self.subscription_id +
'/services/hostedservices/operations/isavailable/' +
_str(service_name) + '',
AvailabilityResponse)
#--Operations for service certificates -------------------------------
def list_service_certificates(self, service_name):
'''
Lists all of the service certificates associated with the specified
hosted service.
service_name:
Name of the hosted service.
'''
_validate_not_none('service_name', service_name)
return self._perform_get(
'/' + self.subscription_id + '/services/hostedservices/' +
_str(service_name) + '/certificates',
Certificates)
def get_service_certificate(self, service_name, thumbalgorithm, thumbprint):
'''
Returns the public data for the specified X.509 certificate associated
with a hosted service.
service_name:
Name of the hosted service.
thumbalgorithm:
The algorithm for the certificate's thumbprint.
thumbprint:
The hexadecimal representation of the thumbprint.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('thumbalgorithm', thumbalgorithm)
_validate_not_none('thumbprint', thumbprint)
return self._perform_get(
'/' + self.subscription_id + '/services/hostedservices/' +
_str(service_name) + '/certificates/' +
_str(thumbalgorithm) + '-' + _str(thumbprint) + '',
Certificate)
def add_service_certificate(self, service_name, data, certificate_format,
password=None):
'''
Adds a certificate to a hosted service.
service_name:
Name of the hosted service.
data:
The base-64 encoded form of the pfx/cer file.
certificate_format:
The service certificate format.
password:
The certificate password. Default to None when using cer format.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('data', data)
_validate_not_none('certificate_format', certificate_format)
_validate_not_none('password', password)
return self._perform_post(
'/' + self.subscription_id + '/services/hostedservices/' +
_str(service_name) + '/certificates',
_XmlSerializer.certificate_file_to_xml(
data, certificate_format, password),
as_async=True)
def delete_service_certificate(self, service_name, thumbalgorithm,
thumbprint):
'''
Deletes a service certificate from the certificate store of a hosted
service.
service_name:
Name of the hosted service.
thumbalgorithm:
The algorithm for the certificate's thumbprint.
thumbprint:
The hexadecimal representation of the thumbprint.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('thumbalgorithm', thumbalgorithm)
_validate_not_none('thumbprint', thumbprint)
return self._perform_delete(
'/' + self.subscription_id + '/services/hostedservices/' +
_str(service_name) + '/certificates/' +
_str(thumbalgorithm) + '-' + _str(thumbprint),
as_async=True)
#--Operations for management certificates ----------------------------
def list_management_certificates(self):
'''
The List Management Certificates operation lists and returns basic
information about all of the management certificates associated with
the specified subscription. Management certificates, which are also
known as subscription certificates, authenticate clients attempting to
connect to resources associated with your Windows Azure subscription.
'''
return self._perform_get('/' + self.subscription_id + '/certificates',
SubscriptionCertificates)
def get_management_certificate(self, thumbprint):
'''
The Get Management Certificate operation retrieves information about
the management certificate with the specified thumbprint. Management
certificates, which are also known as subscription certificates,
authenticate clients attempting to connect to resources associated
with your Windows Azure subscription.
thumbprint:
The thumbprint value of the certificate.
'''
_validate_not_none('thumbprint', thumbprint)
return self._perform_get(
'/' + self.subscription_id + '/certificates/' + _str(thumbprint),
SubscriptionCertificate)
def add_management_certificate(self, public_key, thumbprint, data):
'''
The Add Management Certificate operation adds a certificate to the
list of management certificates. Management certificates, which are
also known as subscription certificates, authenticate clients
attempting to connect to resources associated with your Windows Azure
subscription.
public_key:
A base64 representation of the management certificate public key.
thumbprint:
The thumb print that uniquely identifies the management
certificate.
data:
The certificate's raw data in base-64 encoded .cer format.
'''
_validate_not_none('public_key', public_key)
_validate_not_none('thumbprint', thumbprint)
_validate_not_none('data', data)
return self._perform_post(
'/' + self.subscription_id + '/certificates',
_XmlSerializer.subscription_certificate_to_xml(
public_key, thumbprint, data))
def delete_management_certificate(self, thumbprint):
'''
The Delete Management Certificate operation deletes a certificate from
the list of management certificates. Management certificates, which
are also known as subscription certificates, authenticate clients
attempting to connect to resources associated with your Windows Azure
subscription.
thumbprint:
The thumb print that uniquely identifies the management
certificate.
'''
_validate_not_none('thumbprint', thumbprint)
return self._perform_delete(
'/' + self.subscription_id + '/certificates/' + _str(thumbprint))
#--Operations for affinity groups ------------------------------------
def list_affinity_groups(self):
'''
Lists the affinity groups associated with the specified subscription.
'''
return self._perform_get(
'/' + self.subscription_id + '/affinitygroups',
AffinityGroups)
def get_affinity_group_properties(self, affinity_group_name):
'''
Returns the system properties associated with the specified affinity
group.
affinity_group_name:
The name of the affinity group.
'''
_validate_not_none('affinity_group_name', affinity_group_name)
return self._perform_get(
'/' + self.subscription_id + '/affinitygroups/' +
_str(affinity_group_name) + '',
AffinityGroup)
def create_affinity_group(self, name, label, location, description=None):
'''
Creates a new affinity group for the specified subscription.
name:
A name for the affinity group that is unique to the subscription.
label:
A name for the affinity group. The name can be up to 100 characters
in length.
location:
The data center location where the affinity group will be created.
To list available locations, use the list_location function.
description:
A description for the affinity group. The description can be up to
1024 characters in length.
'''
_validate_not_none('name', name)
_validate_not_none('label', label)
_validate_not_none('location', location)
return self._perform_post(
'/' + self.subscription_id + '/affinitygroups',
_XmlSerializer.create_affinity_group_to_xml(name,
label,
description,
location))
def update_affinity_group(self, affinity_group_name, label,
description=None):
'''
Updates the label and/or the description for an affinity group for the
specified subscription.
affinity_group_name:
The name of the affinity group.
label:
A name for the affinity group. The name can be up to 100 characters
in length.
description:
A description for the affinity group. The description can be up to
1024 characters in length.
'''
_validate_not_none('affinity_group_name', affinity_group_name)
_validate_not_none('label', label)
return self._perform_put(
'/' + self.subscription_id + '/affinitygroups/' +
_str(affinity_group_name),
_XmlSerializer.update_affinity_group_to_xml(label, description))
def delete_affinity_group(self, affinity_group_name):
'''
Deletes an affinity group in the specified subscription.
affinity_group_name:
The name of the affinity group.
'''
_validate_not_none('affinity_group_name', affinity_group_name)
return self._perform_delete('/' + self.subscription_id + \
'/affinitygroups/' + \
_str(affinity_group_name))
#--Operations for locations ------------------------------------------
def list_locations(self):
'''
Lists all of the data center locations that are valid for your
subscription.
'''
return self._perform_get('/' + self.subscription_id + '/locations',
Locations)
#--Operations for retrieving operating system information ------------
def list_operating_systems(self):
'''
Lists the versions of the guest operating system that are currently
available in Windows Azure.
'''
return self._perform_get(
'/' + self.subscription_id + '/operatingsystems',
OperatingSystems)
def list_operating_system_families(self):
'''
Lists the guest operating system families available in Windows Azure,
and also lists the operating system versions available for each family.
'''
return self._perform_get(
'/' + self.subscription_id + '/operatingsystemfamilies',
OperatingSystemFamilies)
#--Operations for retrieving subscription history --------------------
def get_subscription(self):
'''
Returns account and resource allocation information on the specified
subscription.
'''
return self._perform_get('/' + self.subscription_id + '',
Subscription)
# Operations for retrieving subscription operations ------------------
def list_subscription_operations(self, start_time=None, end_time=None, object_id_filter=None,
operation_result_filter=None, continuation_token=None):
'''
List subscription operations.
start_time: Required. An ISO8601 date.
end_time: Required. An ISO8601 date.
object_id_filter: Optional. Returns subscription operations only for the specified object type and object ID
operation_result_filter: Optional. Returns subscription operations only for the specified result status, either Succeeded, Failed, or InProgress.
continuation_token: Optional.
More information at:
https://msdn.microsoft.com/en-us/library/azure/gg715318.aspx
'''
start_time = ('StartTime=' + start_time) if start_time else ''
end_time = ('EndTime=' + end_time) if end_time else ''
object_id_filter = ('ObjectIdFilter=' + object_id_filter) if object_id_filter else ''
operation_result_filter = ('OperationResultFilter=' + operation_result_filter) if operation_result_filter else ''
continuation_token = ('ContinuationToken=' + continuation_token) if continuation_token else ''
parameters = ('&'.join(v for v in (start_time, end_time, object_id_filter, operation_result_filter, continuation_token) if v))
parameters = '?' + parameters if parameters else ''
return self._perform_get(self._get_list_subscription_operations_path() + parameters,
SubscriptionOperationCollection)
#--Operations for reserved ip addresses -----------------------------
def create_reserved_ip_address(self, name, label=None, location=None):
'''
Reserves an IPv4 address for the specified subscription.
name:
Required. Specifies the name for the reserved IP address.
label:
Optional. Specifies a label for the reserved IP address. The label
can be up to 100 characters long and can be used for your tracking
purposes.
location:
Required. Specifies the location of the reserved IP address. This
should be the same location that is assigned to the cloud service
containing the deployment that will use the reserved IP address.
To see the available locations, you can use list_locations.
'''
_validate_not_none('name', name)
return self._perform_post(
self._get_reserved_ip_path(),
_XmlSerializer.create_reserved_ip_to_xml(name, label, location),
as_async=True)
def delete_reserved_ip_address(self, name):
'''
Deletes a reserved IP address from the specified subscription.
name:
Required. Name of the reserved IP address.
'''
_validate_not_none('name', name)
return self._perform_delete(self._get_reserved_ip_path(name),
as_async=True)
def associate_reserved_ip_address(
self, name, service_name, deployment_name, virtual_ip_name=None
):
'''
Associate an existing reservedIP to a deployment.
name:
Required. Name of the reserved IP address.
service_name:
Required. Name of the hosted service.
deployment_name:
Required. Name of the deployment.
virtual_ip_name:
Optional. Name of the VirtualIP in case of multi Vip tenant.
If this value is not specified default virtualIP is used
for this operation.
'''
_validate_not_none('name', name)
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
return self._perform_post(
self._get_reserved_ip_path_for_association(name),
_XmlSerializer.associate_reserved_ip_to_xml(
service_name, deployment_name, virtual_ip_name
),
as_async=True,
x_ms_version='2015-02-01'
)
def disassociate_reserved_ip_address(
self, name, service_name, deployment_name, virtual_ip_name=None
):
'''
Disassociate an existing reservedIP from the given deployment.
name:
Required. Name of the reserved IP address.
service_name:
Required. Name of the hosted service.
deployment_name:
Required. Name of the deployment.
virtual_ip_name:
Optional. Name of the VirtualIP in case of multi Vip tenant.
If this value is not specified default virtualIP is used
for this operation.
'''
_validate_not_none('name', name)
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
return self._perform_post(
self._get_reserved_ip_path_for_disassociation(name),
_XmlSerializer.associate_reserved_ip_to_xml(
service_name, deployment_name, virtual_ip_name
),
as_async=True,
x_ms_version='2015-02-01'
)
def get_reserved_ip_address(self, name):
'''
Retrieves information about the specified reserved IP address.
name:
Required. Name of the reserved IP address.
'''
_validate_not_none('name', name)
return self._perform_get(self._get_reserved_ip_path(name), ReservedIP)
def list_reserved_ip_addresses(self):
'''
Lists the IP addresses that have been reserved for the specified
subscription.
'''
return self._perform_get(self._get_reserved_ip_path(), ReservedIPs)
#--Operations for virtual machines -----------------------------------
def get_role(self, service_name, deployment_name, role_name):
'''
Retrieves the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
return self._perform_get(
self._get_role_path(service_name, deployment_name, role_name),
PersistentVMRole)
def create_virtual_machine_deployment(self, service_name, deployment_name,
deployment_slot, label, role_name,
system_config, os_virtual_hard_disk,
network_config=None,
availability_set_name=None,
data_virtual_hard_disks=None,
role_size=None,
role_type='PersistentVMRole',
virtual_network_name=None,
resource_extension_references=None,
provision_guest_agent=None,
vm_image_name=None,
media_location=None,
dns_servers=None,
reserved_ip_name=None):
'''
Provisions a virtual machine based on the supplied configuration.
service_name:
Name of the hosted service.
deployment_name:
The name for the deployment. The deployment name must be unique
among other deployments for the hosted service.
deployment_slot:
The environment to which the hosted service is deployed. Valid
values are: staging, production
label:
Specifies an identifier for the deployment. The label can be up to
100 characters long. The label can be used for tracking purposes.
role_name:
The name of the role.
system_config:
Contains the metadata required to provision a virtual machine from
a Windows or Linux OS image. Use an instance of
WindowsConfigurationSet or LinuxConfigurationSet.
os_virtual_hard_disk:
Contains the parameters Windows Azure uses to create the operating
system disk for the virtual machine. If you are creating a Virtual
Machine by using a VM Image, this parameter is not used.
network_config:
Encapsulates the metadata required to create the virtual network
configuration for a virtual machine. If you do not include a
network configuration set you will not be able to access the VM
through VIPs over the internet. If your virtual machine belongs to
a virtual network you can not specify which subnet address space
it resides under. Use an instance of ConfigurationSet.
availability_set_name:
Specifies the name of an availability set to which to add the
virtual machine. This value controls the virtual machine
allocation in the Windows Azure environment. Virtual machines
specified in the same availability set are allocated to different
nodes to maximize availability.
data_virtual_hard_disks:
Contains the parameters Windows Azure uses to create a data disk
for a virtual machine.
role_size:
The size of the virtual machine to allocate. The default value is
Small. Possible values are: ExtraSmall,Small,Medium,Large,
ExtraLarge,A5,A6,A7,A8,A9,Basic_A0,Basic_A1,Basic_A2,Basic_A3,
Basic_A4,Standard_D1,Standard_D2,Standard_D3,Standard_D4,
Standard_D11,Standard_D12,Standard_D13,Standard_D14,Standard_G1,
Standard_G2,Sandard_G3,Standard_G4,Standard_G5. The specified
value must be compatible with the disk selected in the
OSVirtualHardDisk values.
role_type:
The type of the role for the virtual machine. The only supported
value is PersistentVMRole.
virtual_network_name:
Specifies the name of an existing virtual network to which the
deployment will belong.
resource_extension_references:
Optional. Contains a collection of resource extensions that are to
be installed on the Virtual Machine. This element is used if
provision_guest_agent is set to True. Use an iterable of instances
of ResourceExtensionReference.
provision_guest_agent:
Optional. Indicates whether the VM Agent is installed on the
Virtual Machine. To run a resource extension in a Virtual Machine,
this service must be installed.
vm_image_name:
Optional. Specifies the name of the VM Image that is to be used to
create the Virtual Machine. If this is specified, the
system_config and network_config parameters are not used.
media_location:
Optional. Required if the Virtual Machine is being created from a
published VM Image. Specifies the location of the VHD file that is
created when VMImageName specifies a published VM Image.
dns_servers:
Optional. List of DNS servers (use DnsServer class) to associate
with the Virtual Machine.
reserved_ip_name:
Optional. Specifies the name of a reserved IP address that is to be
assigned to the deployment. You must run create_reserved_ip_address
before you can assign the address to the deployment using this
element.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('deployment_slot', deployment_slot)
_validate_not_none('label', label)
_validate_not_none('role_name', role_name)
return self._perform_post(
self._get_deployment_path_using_name(service_name),
_XmlSerializer.virtual_machine_deployment_to_xml(
deployment_name,
deployment_slot,
label,
role_name,
system_config,
os_virtual_hard_disk,
role_type,
network_config,
availability_set_name,
data_virtual_hard_disks,
role_size,
virtual_network_name,
resource_extension_references,
provision_guest_agent,
vm_image_name,
media_location,
dns_servers,
reserved_ip_name),
as_async=True)
def add_role(self, service_name, deployment_name, role_name, system_config,
os_virtual_hard_disk, network_config=None,
availability_set_name=None, data_virtual_hard_disks=None,
role_size=None, role_type='PersistentVMRole',
resource_extension_references=None,
provision_guest_agent=None, vm_image_name=None,
media_location=None):
'''
Adds a virtual machine to an existing deployment.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
system_config:
Contains the metadata required to provision a virtual machine from
a Windows or Linux OS image. Use an instance of
WindowsConfigurationSet or LinuxConfigurationSet.
os_virtual_hard_disk:
Contains the parameters Windows Azure uses to create the operating
system disk for the virtual machine. If you are creating a Virtual
Machine by using a VM Image, this parameter is not used.
network_config:
Encapsulates the metadata required to create the virtual network
configuration for a virtual machine. If you do not include a
network configuration set you will not be able to access the VM
through VIPs over the internet. If your virtual machine belongs to
a virtual network you can not specify which subnet address space
it resides under.
availability_set_name:
Specifies the name of an availability set to which to add the
virtual machine. This value controls the virtual machine allocation
in the Windows Azure environment. Virtual machines specified in the
same availability set are allocated to different nodes to maximize
availability.
data_virtual_hard_disks:
Contains the parameters Windows Azure uses to create a data disk
for a virtual machine.
role_size:
The size of the virtual machine to allocate. The default value is
Small. Possible values are: ExtraSmall, Small, Medium, Large,
ExtraLarge. The specified value must be compatible with the disk
selected in the OSVirtualHardDisk values.
role_type:
The type of the role for the virtual machine. The only supported
value is PersistentVMRole.
resource_extension_references:
Optional. Contains a collection of resource extensions that are to
be installed on the Virtual Machine. This element is used if
provision_guest_agent is set to True.
provision_guest_agent:
Optional. Indicates whether the VM Agent is installed on the
Virtual Machine. To run a resource extension in a Virtual Machine,
this service must be installed.
vm_image_name:
Optional. Specifies the name of the VM Image that is to be used to
create the Virtual Machine. If this is specified, the
system_config and network_config parameters are not used.
media_location:
Optional. Required if the Virtual Machine is being created from a
published VM Image. Specifies the location of the VHD file that is
created when VMImageName specifies a published VM Image.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
return self._perform_post(
self._get_role_path(service_name, deployment_name),
_XmlSerializer.add_role_to_xml(
role_name,
system_config,
os_virtual_hard_disk,
role_type,
network_config,
availability_set_name,
data_virtual_hard_disks,
role_size,
resource_extension_references,
provision_guest_agent,
vm_image_name,
media_location),
as_async=True)
def update_role(self, service_name, deployment_name, role_name,
os_virtual_hard_disk=None, network_config=None,
availability_set_name=None, data_virtual_hard_disks=None,
role_size=None, role_type='PersistentVMRole',
resource_extension_references=None,
provision_guest_agent=None):
'''
Updates the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
os_virtual_hard_disk:
Contains the parameters Windows Azure uses to create the operating
system disk for the virtual machine.
network_config:
Encapsulates the metadata required to create the virtual network
configuration for a virtual machine. If you do not include a
network configuration set you will not be able to access the VM
through VIPs over the internet. If your virtual machine belongs to
a virtual network you can not specify which subnet address space
it resides under.
availability_set_name:
Specifies the name of an availability set to which to add the
virtual machine. This value controls the virtual machine allocation
in the Windows Azure environment. Virtual machines specified in the
same availability set are allocated to different nodes to maximize
availability.
data_virtual_hard_disks:
Contains the parameters Windows Azure uses to create a data disk
for a virtual machine.
role_size:
The size of the virtual machine to allocate. The default value is
Small. Possible values are: ExtraSmall, Small, Medium, Large,
ExtraLarge. The specified value must be compatible with the disk
selected in the OSVirtualHardDisk values.
role_type:
The type of the role for the virtual machine. The only supported
value is PersistentVMRole.
resource_extension_references:
Optional. Contains a collection of resource extensions that are to
be installed on the Virtual Machine. This element is used if
provision_guest_agent is set to True.
provision_guest_agent:
Optional. Indicates whether the VM Agent is installed on the
Virtual Machine. To run a resource extension in a Virtual Machine,
this service must be installed.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
return self._perform_put(
self._get_role_path(service_name, deployment_name, role_name),
_XmlSerializer.update_role_to_xml(
role_name,
os_virtual_hard_disk,
role_type,
network_config,
availability_set_name,
data_virtual_hard_disks,
role_size,
resource_extension_references,
provision_guest_agent),
as_async=True)
def delete_role(self, service_name, deployment_name, role_name, complete = False):
'''
Deletes the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
complete:
True if all OS/data disks and the source blobs for the disks should
also be deleted from storage.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
path = self._get_role_path(service_name, deployment_name, role_name)
if complete == True:
path = path +'?comp=media'
return self._perform_delete(path,
as_async=True)
def capture_role(self, service_name, deployment_name, role_name,
post_capture_action, target_image_name,
target_image_label, provisioning_configuration=None):
'''
The Capture Role operation captures a virtual machine image to your
image gallery. From the captured image, you can create additional
customized virtual machines.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
post_capture_action:
Specifies the action after capture operation completes. Possible
values are: Delete, Reprovision.
target_image_name:
Specifies the image name of the captured virtual machine.
target_image_label:
Specifies the friendly name of the captured virtual machine.
provisioning_configuration:
Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('post_capture_action', post_capture_action)
_validate_not_none('target_image_name', target_image_name)
_validate_not_none('target_image_label', target_image_label)
return self._perform_post(
self._get_role_instance_operations_path(
service_name, deployment_name, role_name),
_XmlSerializer.capture_role_to_xml(
post_capture_action,
target_image_name,
target_image_label,
provisioning_configuration),
as_async=True)
def start_role(self, service_name, deployment_name, role_name):
'''
Starts the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
return self._perform_post(
self._get_role_instance_operations_path(
service_name, deployment_name, role_name),
_XmlSerializer.start_role_operation_to_xml(),
as_async=True)
def start_roles(self, service_name, deployment_name, role_names):
'''
Starts the specified virtual machines.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_names:
The names of the roles, as an enumerable of strings.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_names', role_names)
return self._perform_post(
self._get_roles_operations_path(service_name, deployment_name),
_XmlSerializer.start_roles_operation_to_xml(role_names),
as_async=True)
def restart_role(self, service_name, deployment_name, role_name):
'''
Restarts the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
return self._perform_post(
self._get_role_instance_operations_path(
service_name, deployment_name, role_name),
_XmlSerializer.restart_role_operation_to_xml(
),
as_async=True)
def shutdown_role(self, service_name, deployment_name, role_name,
post_shutdown_action='Stopped'):
'''
Shuts down the specified virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
post_shutdown_action:
Specifies how the Virtual Machine should be shut down. Values are:
Stopped
Shuts down the Virtual Machine but retains the compute
resources. You will continue to be billed for the resources
that the stopped machine uses.
StoppedDeallocated
Shuts down the Virtual Machine and releases the compute
resources. You are not billed for the compute resources that
this Virtual Machine uses. If a static Virtual Network IP
address is assigned to the Virtual Machine, it is reserved.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('post_shutdown_action', post_shutdown_action)
return self._perform_post(
self._get_role_instance_operations_path(
service_name, deployment_name, role_name),
_XmlSerializer.shutdown_role_operation_to_xml(post_shutdown_action),
as_async=True)
def shutdown_roles(self, service_name, deployment_name, role_names,
post_shutdown_action='Stopped'):
'''
Shuts down the specified virtual machines.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_names:
The names of the roles, as an enumerable of strings.
post_shutdown_action:
Specifies how the Virtual Machine should be shut down. Values are:
Stopped
Shuts down the Virtual Machine but retains the compute
resources. You will continue to be billed for the resources
that the stopped machine uses.
StoppedDeallocated
Shuts down the Virtual Machine and releases the compute
resources. You are not billed for the compute resources that
this Virtual Machine uses. If a static Virtual Network IP
address is assigned to the Virtual Machine, it is reserved.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_names', role_names)
_validate_not_none('post_shutdown_action', post_shutdown_action)
return self._perform_post(
self._get_roles_operations_path(service_name, deployment_name),
_XmlSerializer.shutdown_roles_operation_to_xml(
role_names, post_shutdown_action),
as_async=True)
def add_dns_server(self, service_name, deployment_name, dns_server_name, address):
'''
Adds a DNS server definition to an existing deployment.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Specifies the name of the DNS server.
address:
Specifies the IP address of the DNS server.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('dns_server_name', dns_server_name)
_validate_not_none('address', address)
return self._perform_post(
self._get_dns_server_path(service_name, deployment_name),
_XmlSerializer.dns_server_to_xml(dns_server_name, address),
as_async=True)
def update_dns_server(self, service_name, deployment_name, dns_server_name, address):
'''
Updates the ip address of a DNS server.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Specifies the name of the DNS server.
address:
Specifies the IP address of the DNS server.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('dns_server_name', dns_server_name)
_validate_not_none('address', address)
return self._perform_put(
self._get_dns_server_path(service_name,
deployment_name,
dns_server_name),
_XmlSerializer.dns_server_to_xml(dns_server_name, address),
as_async=True)
def delete_dns_server(self, service_name, deployment_name, dns_server_name):
'''
Deletes a DNS server from a deployment.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Name of the DNS server that you want to delete.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('dns_server_name', dns_server_name)
return self._perform_delete(
self._get_dns_server_path(service_name,
deployment_name,
dns_server_name),
as_async=True)
def list_resource_extensions(self):
'''
Lists the resource extensions that are available to add to a
Virtual Machine.
'''
return self._perform_get(self._get_resource_extensions_path(),
ResourceExtensions)
def list_resource_extension_versions(self, publisher_name, extension_name):
'''
Lists the versions of a resource extension that are available to add
to a Virtual Machine.
publisher_name:
Name of the resource extension publisher.
extension_name:
Name of the resource extension.
'''
return self._perform_get(self._get_resource_extension_versions_path(
publisher_name, extension_name),
ResourceExtensions)
#--Operations for virtual machine images -----------------------------
def replicate_vm_image(self, vm_image_name, regions, offer, sku, version):
'''
Replicate a VM image to multiple target locations. This operation
is only for publishers. You have to be registered as image publisher
with Microsoft Azure to be able to call this.
vm_image_name:
Specifies the name of the VM Image that is to be used for
replication
regions:
Specified a list of regions to replicate the image to
Note: The regions in the request body are not additive. If a VM
Image has already been replicated to Regions A, B, and C, and
a request is made to replicate to Regions A and D, the VM
Image will remain in Region A, will be replicated in Region D,
and will be unreplicated from Regions B and C
offer:
Specifies the publisher defined name of the offer. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.).The maximum allowed length is 64 characters.
sku:
Specifies the publisher defined name of the Sku. The allowed
characters are uppercase or lowercase letters, digit,
hypen(-), period (.). The maximum allowed length is 64 characters.
version:
Specifies the publisher defined version of the image.
The allowed characters are digit and period.
Format: <MajorVersion>.<MinorVersion>.<Patch>
Example: '1.0.0' or '1.1.0' The 3 version number to
follow standard of most of the RPs. See http://semver.org
'''
_validate_not_none('vm_image_name', vm_image_name)
_validate_not_none('regions', regions)
_validate_not_none('offer', offer)
_validate_not_none('sku', sku)
_validate_not_none('version', version)
return self._perform_put(
self._get_replication_path_using_vm_image_name(vm_image_name),
_XmlSerializer.replicate_image_to_xml(
regions,
offer,
sku,
version
),
as_async=True,
x_ms_version='2015-04-01'
)
def unreplicate_vm_image(self, vm_image_name):
'''
Unreplicate a VM image from all regions This operation
is only for publishers. You have to be registered as image publisher
with Microsoft Azure to be able to call this
vm_image_name:
Specifies the name of the VM Image that is to be used for
unreplication. The VM Image Name should be the user VM Image,
not the published name of the VM Image.
'''
_validate_not_none('vm_image_name', vm_image_name)
return self._perform_put(
self._get_unreplication_path_using_vm_image_name(vm_image_name),
None,
as_async=True,
x_ms_version='2015-04-01'
)
def share_vm_image(self, vm_image_name, permission):
'''
Share an already replicated OS image. This operation is only for
publishers. You have to be registered as image publisher with Windows
Azure to be able to call this.
vm_image_name:
The name of the virtual machine image to share
permission:
The sharing permission: public, msdn, or private
'''
_validate_not_none('vm_image_name', vm_image_name)
_validate_not_none('permission', permission)
path = self._get_sharing_path_using_vm_image_name(vm_image_name)
query = '&permission=' + permission
path = path + '?' + query.lstrip('&')
return self._perform_put(
path, None, as_async=True, x_ms_version='2015-04-01'
)
def capture_vm_image(self, service_name, deployment_name, role_name, options):
'''
Creates a copy of the operating system virtual hard disk (VHD) and all
of the data VHDs that are associated with the Virtual Machine, saves
the VHD copies in the same storage location as the original VHDs, and
registers the copies as a VM Image in the image repository that is
associated with the specified subscription.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
options:
An instance of CaptureRoleAsVMImage class.
options.os_state:
Required. Specifies the state of the operating system in the image.
Possible values are: Generalized, Specialized
A Virtual Machine that is fully configured and running contains a
Specialized operating system. A Virtual Machine on which the
Sysprep command has been run with the generalize option contains a
Generalized operating system. If you capture an image from a
generalized Virtual Machine, the machine is deleted after the image
is captured. It is recommended that all Virtual Machines are shut
down before capturing an image.
options.vm_image_name:
Required. Specifies the name of the VM Image.
options.vm_image_label:
Required. Specifies the label of the VM Image.
options.description:
Optional. Specifies the description of the VM Image.
options.language:
Optional. Specifies the language of the VM Image.
options.image_family:
Optional. Specifies a value that can be used to group VM Images.
options.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('options', options)
_validate_not_none('options.os_state', options.os_state)
_validate_not_none('options.vm_image_name', options.vm_image_name)
_validate_not_none('options.vm_image_label', options.vm_image_label)
return self._perform_post(
self._get_capture_vm_image_path(service_name, deployment_name, role_name),
_XmlSerializer.capture_vm_image_to_xml(options),
as_async=True)
def create_vm_image(self, vm_image):
'''
Creates a VM Image in the image repository that is associated with the
specified subscription using a specified set of virtual hard disks.
vm_image:
An instance of VMImage class.
vm_image.name: Required. Specifies the name of the image.
vm_image.label: Required. Specifies an identifier for the image.
vm_image.description: Optional. Specifies the description of the image.
vm_image.os_disk_configuration:
Required. Specifies configuration information for the operating
system disk that is associated with the image.
vm_image.os_disk_configuration.host_caching:
Optional. Specifies the caching behavior of the operating system disk.
Possible values are: None, ReadOnly, ReadWrite
vm_image.os_disk_configuration.os_state:
Required. Specifies the state of the operating system in the image.
Possible values are: Generalized, Specialized
A Virtual Machine that is fully configured and running contains a
Specialized operating system. A Virtual Machine on which the
Sysprep command has been run with the generalize option contains a
Generalized operating system.
vm_image.os_disk_configuration.os:
Required. Specifies the operating system type of the image.
vm_image.os_disk_configuration.media_link:
Required. Specifies the location of the blob in Windows Azure
storage. The blob location belongs to a storage account in the
subscription specified by the <subscription-id> value in the
operation call.
vm_image.data_disk_configurations:
Optional. Specifies configuration information for the data disks
that are associated with the image. A VM Image might not have data
disks associated with it.
vm_image.data_disk_configurations[].host_caching:
Optional. Specifies the caching behavior of the data disk.
Possible values are: None, ReadOnly, ReadWrite
vm_image.data_disk_configurations[].lun:
Optional if the lun for the disk is 0. Specifies the Logical Unit
Number (LUN) for the data disk.
vm_image.data_disk_configurations[].media_link:
Required. Specifies the location of the blob in Windows Azure
storage. The blob location belongs to a storage account in the
subscription specified by the <subscription-id> value in the
operation call.
vm_image.data_disk_configurations[].logical_size_in_gb:
Required. Specifies the size, in GB, of the data disk.
vm_image.language: Optional. Specifies the language of the image.
vm_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
vm_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
vm_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
vm_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
vm_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
vm_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
vm_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
vm_image.show_in_gui:
Optional. Indicates whether the VM Images should be listed in the
portal.
'''
_validate_not_none('vm_image', vm_image)
_validate_not_none('vm_image.name', vm_image.name)
_validate_not_none('vm_image.label', vm_image.label)
_validate_not_none('vm_image.os_disk_configuration.os_state',
vm_image.os_disk_configuration.os_state)
_validate_not_none('vm_image.os_disk_configuration.os',
vm_image.os_disk_configuration.os)
_validate_not_none('vm_image.os_disk_configuration.media_link',
vm_image.os_disk_configuration.media_link)
return self._perform_post(
self._get_vm_image_path(),
_XmlSerializer.create_vm_image_to_xml(vm_image),
as_async=True)
def delete_vm_image(self, vm_image_name, delete_vhd=False):
'''
Deletes the specified VM Image from the image repository that is
associated with the specified subscription.
vm_image_name:
The name of the image.
delete_vhd:
Deletes the underlying vhd blob in Azure storage.
'''
_validate_not_none('vm_image_name', vm_image_name)
path = self._get_vm_image_path(vm_image_name)
if delete_vhd:
path += '?comp=media'
return self._perform_delete(path, as_async=True)
def list_vm_images(self, location=None, publisher=None, category=None):
'''
Retrieves a list of the VM Images from the image repository that is
associated with the specified subscription.
'''
path = self._get_vm_image_path()
query = ''
if location:
query += '&location=' + location
if publisher:
query += '&publisher=' + publisher
if category:
query += '&category=' + category
if query:
path = path + '?' + query.lstrip('&')
return self._perform_get(path, VMImages)
def update_vm_image(self, vm_image_name, vm_image):
'''
Updates a VM Image in the image repository that is associated with the
specified subscription.
vm_image_name:
Name of image to update.
vm_image:
An instance of VMImage class.
vm_image.label: Optional. Specifies an identifier for the image.
vm_image.os_disk_configuration:
Required. Specifies configuration information for the operating
system disk that is associated with the image.
vm_image.os_disk_configuration.host_caching:
Optional. Specifies the caching behavior of the operating system disk.
Possible values are: None, ReadOnly, ReadWrite
vm_image.data_disk_configurations:
Optional. Specifies configuration information for the data disks
that are associated with the image. A VM Image might not have data
disks associated with it.
vm_image.data_disk_configurations[].name:
Required. Specifies the name of the data disk.
vm_image.data_disk_configurations[].host_caching:
Optional. Specifies the caching behavior of the data disk.
Possible values are: None, ReadOnly, ReadWrite
vm_image.data_disk_configurations[].lun:
Optional if the lun for the disk is 0. Specifies the Logical Unit
Number (LUN) for the data disk.
vm_image.description: Optional. Specifies the description of the image.
vm_image.language: Optional. Specifies the language of the image.
vm_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
vm_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
vm_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
vm_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
vm_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
vm_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
vm_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
vm_image.show_in_gui:
Optional. Indicates whether the VM Images should be listed in the
portal.
'''
_validate_not_none('vm_image_name', vm_image_name)
_validate_not_none('vm_image', vm_image)
return self._perform_put(self._get_vm_image_path(vm_image_name),
_XmlSerializer.update_vm_image_to_xml(vm_image),
as_async=True)
#--Operations for operating system images ----------------------------
def list_os_images(self):
'''
Retrieves a list of the OS images from the image repository.
'''
return self._perform_get(self._get_image_path(),
Images)
def get_os_image(self, image_name):
'''
Retrieves an OS image from the image repository.
'''
return self._perform_get(self._get_image_path(image_name),
OSImage)
def get_os_image_details(self, image_name):
'''
Retrieves an OS image from the image repository, including replication
progress.
'''
return self._perform_get(self._get_image_details_path(image_name),
OSImageDetails)
def add_os_image(self, label, media_link, name, os):
'''
Adds an OS image that is currently stored in a storage account in your
subscription to the image repository.
label:
Specifies the friendly name of the image.
media_link:
Specifies the location of the blob in Windows Azure blob store
where the media for the image is located. The blob location must
belong to a storage account in the subscription specified by the
<subscription-id> value in the operation call. Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more virtual machines.
os:
The operating system type of the OS image. Possible values are:
Linux, Windows
'''
_validate_not_none('label', label)
_validate_not_none('media_link', media_link)
_validate_not_none('name', name)
_validate_not_none('os', os)
return self._perform_post(self._get_image_path(),
_XmlSerializer.os_image_to_xml(
label, media_link, name, os),
as_async=True)
def update_os_image(self, image_name, label, media_link, name, os):
'''
Updates an OS image that in your image repository.
image_name:
The name of the image to update.
label:
Specifies the friendly name of the image to be updated. You cannot
use this operation to update images provided by the Windows Azure
platform.
media_link:
Specifies the location of the blob in Windows Azure blob store
where the media for the image is located. The blob location must
belong to a storage account in the subscription specified by the
<subscription-id> value in the operation call. Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more VM Roles.
os:
The operating system type of the OS image. Possible values are:
Linux, Windows
'''
_validate_not_none('image_name', image_name)
_validate_not_none('label', label)
_validate_not_none('media_link', media_link)
_validate_not_none('name', name)
_validate_not_none('os', os)
return self._perform_put(self._get_image_path(image_name),
_XmlSerializer.os_image_to_xml(
label, media_link, name, os),
as_async=True)
def update_os_image_from_image_reference(self, image_name, os_image):
'''
Updates metadata elements from a given OS image reference.
image_name:
The name of the image to update.
os_image:
An instance of OSImage class.
os_image.label: Optional. Specifies an identifier for the image.
os_image.description: Optional. Specifies the description of the image.
os_image.language: Optional. Specifies the language of the image.
os_image.image_family:
Optional. Specifies a value that can be used to group VM Images.
os_image.recommended_vm_size:
Optional. Specifies the size to use for the Virtual Machine that
is created from the VM Image.
os_image.eula:
Optional. Specifies the End User License Agreement that is
associated with the image. The value for this element is a string,
but it is recommended that the value be a URL that points to a EULA.
os_image.icon_uri:
Optional. Specifies the URI to the icon that is displayed for the
image in the Management Portal.
os_image.small_icon_uri:
Optional. Specifies the URI to the small icon that is displayed for
the image in the Management Portal.
os_image.privacy_uri:
Optional. Specifies the URI that points to a document that contains
the privacy policy related to the image.
os_image.published_date:
Optional. Specifies the date when the image was added to the image
repository.
os.image.media_link:
Required: Specifies the location of the blob in Windows Azure
blob store where the media for the image is located. The blob
location must belong to a storage account in the subscription
specified by the <subscription-id> value in the operation call.
Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
os_image.name:
Specifies a name for the OS image that Windows Azure uses to
identify the image when creating one or more VM Roles.
os_image.os:
The operating system type of the OS image. Possible values are:
Linux, Windows
'''
_validate_not_none('image_name', image_name)
_validate_not_none('os_image', os_image)
return self._perform_put(self._get_image_path(image_name),
_XmlSerializer.update_os_image_to_xml(os_image), as_async=True
)
def delete_os_image(self, image_name, delete_vhd=False):
'''
Deletes the specified OS image from your image repository.
image_name:
The name of the image.
delete_vhd:
Deletes the underlying vhd blob in Azure storage.
'''
_validate_not_none('image_name', image_name)
path = self._get_image_path(image_name)
if delete_vhd:
path += '?comp=media'
return self._perform_delete(path, as_async=True)
#--Operations for virtual machine disks ------------------------------
def get_data_disk(self, service_name, deployment_name, role_name, lun):
'''
Retrieves the specified data disk from a virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
lun:
The Logical Unit Number (LUN) for the disk.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('lun', lun)
return self._perform_get(
self._get_data_disk_path(
service_name, deployment_name, role_name, lun),
DataVirtualHardDisk)
def add_data_disk(self, service_name, deployment_name, role_name, lun,
host_caching=None, media_link=None, disk_label=None,
disk_name=None, logical_disk_size_in_gb=None,
source_media_link=None):
'''
Adds a data disk to a virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
lun:
Specifies the Logical Unit Number (LUN) for the disk. The LUN
specifies the slot in which the data drive appears when mounted
for usage by the virtual machine. Valid LUN values are 0 through 15.
host_caching:
Specifies the platform caching behavior of data disk blob for
read/write efficiency. The default vault is ReadOnly. Possible
values are: None, ReadOnly, ReadWrite
media_link:
Specifies the location of the blob in Windows Azure blob store
where the media for the disk is located. The blob location must
belong to the storage account in the subscription specified by the
<subscription-id> value in the operation call. Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
disk_label:
Specifies the description of the data disk. When you attach a disk,
either by directly referencing a media using the MediaLink element
or specifying the target disk size, you can use the DiskLabel
element to customize the name property of the target data disk.
disk_name:
Specifies the name of the disk. Windows Azure uses the specified
disk to create the data disk for the machine and populates this
field with the disk name.
logical_disk_size_in_gb:
Specifies the size, in GB, of an empty disk to be attached to the
role. The disk can be created as part of disk attach or create VM
role call by specifying the value for this property. Windows Azure
creates the empty disk based on size preference and attaches the
newly created disk to the Role.
source_media_link:
Specifies the location of a blob in account storage which is
mounted as a data disk when the virtual machine is created.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('lun', lun)
return self._perform_post(
self._get_data_disk_path(service_name, deployment_name, role_name),
_XmlSerializer.data_virtual_hard_disk_to_xml(
host_caching,
disk_label,
disk_name,
lun,
logical_disk_size_in_gb,
media_link,
source_media_link),
as_async=True)
def update_data_disk(self, service_name, deployment_name, role_name, lun,
host_caching=None, media_link=None, updated_lun=None,
disk_label=None, disk_name=None,
logical_disk_size_in_gb=None):
'''
Updates the specified data disk attached to the specified virtual
machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
lun:
Specifies the Logical Unit Number (LUN) for the disk. The LUN
specifies the slot in which the data drive appears when mounted
for usage by the virtual machine. Valid LUN values are 0 through
15.
host_caching:
Specifies the platform caching behavior of data disk blob for
read/write efficiency. The default vault is ReadOnly. Possible
values are: None, ReadOnly, ReadWrite
media_link:
Specifies the location of the blob in Windows Azure blob store
where the media for the disk is located. The blob location must
belong to the storage account in the subscription specified by
the <subscription-id> value in the operation call. Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
updated_lun:
Specifies the Logical Unit Number (LUN) for the disk. The LUN
specifies the slot in which the data drive appears when mounted
for usage by the virtual machine. Valid LUN values are 0 through 15.
disk_label:
Specifies the description of the data disk. When you attach a disk,
either by directly referencing a media using the MediaLink element
or specifying the target disk size, you can use the DiskLabel
element to customize the name property of the target data disk.
disk_name:
Specifies the name of the disk. Windows Azure uses the specified
disk to create the data disk for the machine and populates this
field with the disk name.
logical_disk_size_in_gb:
Specifies the size, in GB, of an empty disk to be attached to the
role. The disk can be created as part of disk attach or create VM
role call by specifying the value for this property. Windows Azure
creates the empty disk based on size preference and attaches the
newly created disk to the Role.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('lun', lun)
return self._perform_put(
self._get_data_disk_path(
service_name, deployment_name, role_name, lun),
_XmlSerializer.data_virtual_hard_disk_to_xml(
host_caching,
disk_label,
disk_name,
updated_lun,
logical_disk_size_in_gb,
media_link,
None),
as_async=True)
def delete_data_disk(self, service_name, deployment_name, role_name, lun, delete_vhd=False):
'''
Removes the specified data disk from a virtual machine.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
role_name:
The name of the role.
lun:
The Logical Unit Number (LUN) for the disk.
delete_vhd:
Deletes the underlying vhd blob in Azure storage.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('role_name', role_name)
_validate_not_none('lun', lun)
path = self._get_data_disk_path(service_name, deployment_name, role_name, lun)
if delete_vhd:
path += '?comp=media'
return self._perform_delete(path, as_async=True)
#--Operations for virtual machine disks ------------------------------
def list_disks(self):
'''
Retrieves a list of the disks in your image repository.
'''
return self._perform_get(self._get_disk_path(),
Disks)
def get_disk(self, disk_name):
'''
Retrieves a disk from your image repository.
'''
return self._perform_get(self._get_disk_path(disk_name),
Disk)
def add_disk(self, has_operating_system, label, media_link, name, os):
'''
Adds a disk to the user image repository. The disk can be an OS disk
or a data disk.
has_operating_system:
Deprecated.
label:
Specifies the description of the disk.
media_link:
Specifies the location of the blob in Windows Azure blob store
where the media for the disk is located. The blob location must
belong to the storage account in the current subscription specified
by the <subscription-id> value in the operation call. Example:
http://example.blob.core.windows.net/disks/mydisk.vhd
name:
Specifies a name for the disk. Windows Azure uses the name to
identify the disk when creating virtual machines from the disk.
os:
The OS type of the disk. Possible values are: Linux, Windows
'''
_validate_not_none('label', label)
_validate_not_none('media_link', media_link)
_validate_not_none('name', name)
_validate_not_none('os', os)
return self._perform_post(self._get_disk_path(),
_XmlSerializer.disk_to_xml(
label,
media_link,
name,
os))
def update_disk(self, disk_name, has_operating_system=None, label=None, media_link=None,
name=None, os=None):
'''
Updates an existing disk in your image repository.
disk_name:
The name of the disk to update.
has_operating_system:
Deprecated.
label:
Specifies the description of the disk.
media_link:
Deprecated.
name:
Deprecated.
os:
Deprecated.
'''
_validate_not_none('disk_name', disk_name)
_validate_not_none('label', label)
return self._perform_put(self._get_disk_path(disk_name),
_XmlSerializer.disk_to_xml(
label,
None,
None,
None))
def delete_disk(self, disk_name, delete_vhd=False):
'''
Deletes the specified data or operating system disk from your image
repository.
disk_name:
The name of the disk to delete.
delete_vhd:
Deletes the underlying vhd blob in Azure storage.
'''
_validate_not_none('disk_name', disk_name)
path = self._get_disk_path(disk_name)
if delete_vhd:
path += '?comp=media'
return self._perform_delete(path)
#--Operations for virtual networks ------------------------------
def list_virtual_network_sites(self):
'''
Retrieves a list of the virtual networks.
'''
return self._perform_get(self._get_virtual_network_site_path(), VirtualNetworkSites)
#--Helper functions --------------------------------------------------
def _get_replication_path_using_vm_image_name(self, vm_image_name):
return self._get_path(
'services/images/' + _str(vm_image_name) + '/replicate', None
)
def _get_unreplication_path_using_vm_image_name(self, vm_image_name):
return self._get_path(
'services/images/' + _str(vm_image_name) + '/unreplicate', None
)
def _get_sharing_path_using_vm_image_name(self, vm_image_name):
return self._get_path(
'services/images/' + _str(vm_image_name) + '/shareasync', None
)
def _get_role_sizes_path(self):
return self._get_path('rolesizes', None)
def _get_subscriptions_path(self):
return '/subscriptions'
def _get_list_subscription_operations_path(self):
return self._get_path('operations', None)
def _get_virtual_network_site_path(self):
return self._get_path('services/networking/virtualnetwork', None)
def _get_storage_service_path(self, service_name=None):
return self._get_path('services/storageservices', service_name)
def _get_hosted_service_path(self, service_name=None):
return self._get_path('services/hostedservices', service_name)
def _get_deployment_path_using_slot(self, service_name, slot=None):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deploymentslots', slot)
def _get_deployment_path_using_name(self, service_name,
deployment_name=None):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deployments', deployment_name)
def _get_role_path(self, service_name, deployment_name, role_name=None):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deployments/' + deployment_name +
'/roles', role_name)
def _get_role_instance_operations_path(self, service_name, deployment_name,
role_name=None):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deployments/' + deployment_name +
'/roleinstances', role_name) + '/Operations'
def _get_roles_operations_path(self, service_name, deployment_name):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deployments/' + deployment_name +
'/roles/Operations', None)
def _get_resource_extensions_path(self):
return self._get_path('services/resourceextensions', None)
def _get_resource_extension_versions_path(self, publisher_name, extension_name):
return self._get_path('services/resourceextensions',
publisher_name + '/' + extension_name)
def _get_dns_server_path(self, service_name, deployment_name,
dns_server_name=None):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deployments/' + deployment_name +
'/dnsservers', dns_server_name)
def _get_capture_vm_image_path(self, service_name, deployment_name, role_name):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deployments/' + _str(deployment_name) +
'/roleinstances/' + _str(role_name) + '/Operations',
None)
def _get_vm_image_path(self, image_name=None):
return self._get_path('services/vmimages', image_name)
def _get_reserved_ip_path(self, name=None):
return self._get_path('services/networking/reservedips', name)
def _get_reserved_ip_path_for_association(self, name):
return self._get_path('services/networking/reservedips', name) + \
'/operations/associate'
def _get_reserved_ip_path_for_disassociation(self, name):
return self._get_path('services/networking/reservedips', name) + \
'/operations/disassociate'
def _get_data_disk_path(self, service_name, deployment_name, role_name,
lun=None):
return self._get_path('services/hostedservices/' + _str(service_name) +
'/deployments/' + _str(deployment_name) +
'/roles/' + _str(role_name) + '/DataDisks', lun)
def _get_disk_path(self, disk_name=None):
return self._get_path('services/disks', disk_name)
def _get_image_path(self, image_name=None):
return self._get_path('services/images', image_name)
def _get_image_details_path(self, image_name=None):
return self._get_path('services/images', image_name, 'details')
|
py | 1a3b12daa7f9d67e7e2c4d8c0f871c9e0391bbc7 | """
molssi_math.py
A sample repository for the MolSSI Workshop at UF.
Some math functions.
"""
def mean(num_list):
"""
Calculate the mean/average of a list of numbers.
Parameters
-----------
num_list : list
The list to take the average of
Returns
-----------
mean_list : float
The mean of the list
"""
# Check that input is type list
if not isinstance(num_list, list):
raise TypeError('Invalid input %s - must be type list' %(num_list))
# Check that list is not empty
if num_list == []:
raise ValueError('Cannot calculate the mean of an empty list.')
try:
mean_list = sum(num_list) / len(num_list)
except TypeError:
raise TypeError('Cannot calculate mean of list - all list elements must be numeric')
return mean_list
def factorial(n):
"""
Calculate a factorial
Parameters
-----------
n : int
The factorial parameter.
Returns
-------
factorial : int
The requested factorial
"""
fact = 1
for i in range(1, n+1):
fact = fact * i
return fact
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from Henry David Thoreau"
return quote
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
|
py | 1a3b1315daeddc4c5c227f5c196aa5f5ffc10249 | from re import L
import discord
from discord.ext import commands
from discord.ext.commands import bot
from settings import constants
owners = constants.owners
admins = constants.admins
def is_owner(ctx):
""" Checks if the author is one of the owners """
return ctx.author.id in owners
def is_admin(ctx):
if (
ctx.author.id in ctx.bot.constants.admins
or ctx.author.id in ctx.bot.constants.owners
):
return True
return
async def check_permissions(ctx, perms, *, check=all):
""" Checks if author has permissions to a permission """
if ctx.author.id in owners:
return True
resolved = ctx.author.guild_permissions
guild_perm_checker = check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
if guild_perm_checker is False:
# Try to see if the user has channel permissions that override
resolved = ctx.channel.permissions_for(ctx.author)
return check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
async def check_bot_permissions(ctx, perms, *, check=all):
""" Checks if author has permissions to a permission """
if ctx.guild:
resolved = ctx.guild.me.guild_permissions
guild_perm_checker = check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
if guild_perm_checker is False:
# Try to see if the user has channel permissions that override
resolved = ctx.channel.permissions_for(ctx.guild.me)
return check(
getattr(resolved, name, None) == value for name, value in perms.items()
)
else:
return True
def has_perms(*, check=all, **perms): # Decorator to check if a user has perms
async def pred(ctx):
result = await check_permissions(ctx, perms, check=check)
perm_list = [
x.title().replace("_", " ").replace("Tts", "TTS").replace("Guild", "Server")
for x in perms
]
if result is False:
raise commands.BadArgument(
message=f"You are missing the following permission{'' if len(perm_list) == 1 else 's'}: `{', '.join(perm_list)}`"
)
return result
return commands.check(pred)
def bot_has_perms(*, check=all, **perms): # Decorator to check if the bot has perms
async def pred(ctx):
result = await check_bot_permissions(ctx, perms, check=check)
if (
result is False
): # We know its a guild because permissions failed in check_bot_permissions()
guild_perms = [x[0] for x in ctx.guild.me.guild_permissions if x[1] is True]
channel_perms = [
x[0] for x in ctx.channel.permissions_for(ctx.guild.me) if x[1] is True
]
botperms = guild_perms + channel_perms
perms_needed = []
for x in perms:
if not x in botperms: # Only complain about the perms we don't have
perms_needed.append(x)
perm_list = [
x.title().replace("_", " ").replace("Tts", "TTS") for x in perms_needed
]
raise commands.BadArgument(
message=f"I require the following permission{'' if len(perm_list) == 1 else 's'}: `{', '.join(perm_list)}`"
)
return result
return commands.check(pred)
def is_bot_admin(): # Decorator for bot admin commands
async def pred(ctx):
return is_admin(ctx)
return commands.check(pred)
async def check_priv(ctx, member):
"""
Handle permission hierarchy for commands
Return the reason for failure.
"""
try:
# Self checks
if member == ctx.author:
return f"You cannot {ctx.command.name} yourself."
if member.id == ctx.bot.user.id:
return f"I cannot {ctx.command.name} myself."
# Bot lacks permissions
if member.id == ctx.guild.owner.id:
return f"I cannot {ctx.command.name} the server owner."
if ctx.guild.me.top_role.position == member.top_role.position:
return f"I cannot {ctx.command.name} a user with equal permissions."
if ctx.guild.me.top_role.position < member.top_role.position:
return f"I cannot {ctx.command.name} a user with superior permissions."
if member.id in owners:
return f"I cannot {ctx.command.name} my creator."
# Check if user bypasses
if ctx.author.id == ctx.guild.owner.id:
return
if ctx.author.id in owners:
return
# Now permission check
if ctx.author.top_role.position == member.top_role.position:
return f"You cannot {ctx.command.name} a user with equal permissions."
if ctx.author.top_role.position < member.top_role.position:
return f"You cannot {ctx.command.name} a user with superior permissions."
except Exception as e:
print(e)
pass
async def checker(ctx, value):
if type(value) is list:
for x in value:
result = await check_priv(ctx, member=x)
if type(value) is not list:
result = await check_priv(ctx, member=value)
return result
def can_handle(ctx, permission: str):
""" Checks if bot has permissions or is in DMs right now """
return isinstance(ctx.channel, discord.DMChannel) or getattr(
ctx.channel.permissions_for(ctx.guild.me), permission
)
|
py | 1a3b139c4ad21b13c41e1544bc98e13f0da14676 | import matplotlib as mlt
# mlt.use('TkAgg')
# mlt.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
import pykep as pk
from utils import *
from conversions import *
class plotting(object):
"""
Provide visualization for hodographicShaping trajectories
Samples trajectory at initialization
Methods to plot various characteristics (3D trajectory, hodograph, etc.)
"""
def __init__(self, trajectory, samples=100, folder='graveyeard', save=False,
ephemSource = 'jpl'):
'''
Create plotting object
trajectory needs to be of type 'hodographicShaping'
ephemSource needs to correspond to the one used for trajectory (due to
planet names)
'''
print('\nBegin plotting.')
print('Sampling at', samples, 'points.')
self.samples = samples
self.trajectory = trajectory
self.folder = folder
self.save = save
self.ephemSource = ephemSource
if self.save==True:
checkFolder(self.folder)
# sample planets and trajectory
self.plPosCart, self.plPosCyl, self.plVelCart, self.plVelCyl = \
self.samplePlanets(trajectory, samples=samples)
self.traPosCart, self.traPosCyl = \
self.sampleTrajectoryPosition(trajectory, samples=samples)
self.traVelCart, self.traVelCyl = \
self.sampleTrajectoryVelocity(trajectory, samples=samples)
self.traAccCyl = \
self.sampleTrajectoryAcceleration(trajectory, samples=samples)
def trajectory3D(self, save=None, folder=None, scaling=True):
"""
Plot the given trajectory in 3D
"""
print('Plot 3D trajectory')
# start figure
fig = newFigure(height=6.4)
ax = fig.gca(projection='3d')
# Sun
ax.scatter([0], [0], [0], s=100, color='yellow', label='Sun', marker='o', edgecolor='orange',)
# Departure planet
ax.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, self.plPosCart['zDep']/pk.AU, label='Departure planet', c='C0')
ax.scatter(self.plPosCart['xDep'][0]/pk.AU, self.plPosCart['yDep'][0]/pk.AU, self.plPosCart['zDep'][0]/pk.AU, c='k')
ax.scatter(self.plPosCart['xDep'][-1]/pk.AU, self.plPosCart['yDep'][-1]/pk.AU, self.plPosCart['zDep'][-1]/pk.AU, c='k')
# Arrival planet
ax.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, self.plPosCart['zArr']/pk.AU, label='Arrival planet', c='C3')
ax.scatter(self.plPosCart['xArr'][0]/pk.AU, self.plPosCart['yArr'][0]/pk.AU, self.plPosCart['zArr'][0]/pk.AU, c='k')
ax.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, self.plPosCart['zArr'][-1]/pk.AU, c='k')
# Trajectory
ax.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, self.traPosCart['z']/pk.AU, label='Trajectory', c='C1')
ax.scatter(self.traPosCart['x'][0]/pk.AU, self.traPosCart['y'][0]/pk.AU, self.traPosCart['z'][0]/pk.AU, label='launch', c='C2')
ax.scatter(self.traPosCart['x'][-1]/pk.AU, self.traPosCart['y'][-1]/pk.AU, self.traPosCart['z'][-1]/pk.AU, label='arrival', c='C3')
# formatting
if scaling:
axisEqual3D(ax)
# plt.title('Orbits and trajectory')
ax.set_xlabel('x [AU]', labelpad=15)
ax.set_ylabel('y [AU]', labelpad=15)
ax.set_zlabel('z [AU]', labelpad=15)
plt.grid()
plt.legend()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory3D.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory3D.png'), dpi=300)
plt.show()
def trajectory2D(self, save=None, folder=None, quiver=False):
'''
Two dimensional plot in the ecliptic plane
'''
fig = newFigure(height=6.4)
# Sun
sun = plt.scatter([0], [0], s=100, color='yellow', label='Sun', marker='o', edgecolor='orange')
# arrival planet
plot1 = plt.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, label='Arrival Planet', color='C3', zorder=1)
plot0 = plt.scatter(self.plPosCart['xArr'][0]/pk.AU, self.plPosCart['yArr'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='k', zorder=2)
# departure planet
plot1 = plt.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, label='Departure Planet', color='C0', zorder=1)
plot1 = plt.scatter(self.plPosCart['xDep'][0]/pk.AU, self.plPosCart['yDep'][0]/pk.AU, color='C2', label='launch', zorder=2)
plot1 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='C3', label='arrival', zorder=2)
# trajectory
plot1 = plt.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, label='Trajectory', color='C1', zorder=1)
plot0 = plt.scatter(self.traPosCart['x'][0]/pk.AU, self.traPosCart['y'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.traPosCart['x'][-1]/pk.AU, self.traPosCart['y'][-1]/pk.AU, color='k', zorder=2)
plt.xlabel('$x$ [AU]')
plt.ylabel('$y$ [AU]')
plt.grid()
ax = plt.gca()
ax.set_axisbelow(True)
plt.legend()
plt.axis('equal')
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory2D.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'trajectory2D.png'), dpi=300)
plt.show()
def trajectory3Danimation(self, save=None, folder=None):
"""
Animation of the flown trajectory
"""
print('Show animated trajectory.')
import matplotlib.animation as animation
# data = np.array([x, y])
data = np.vstack((self.traPosCart['x'],
self.traPosCart['y'],
self.traPosCart['z']))
dataDep = np.vstack((self.plPosCart['xDep'],
self.plPosCart['yDep'],
self.plPosCart['zDep']))
dataArr = np.vstack((self.plPosCart['xArr'],
self.plPosCart['yArr'],
self.plPosCart['zArr']))
data /= pk.AU
dataDep /= pk.AU
dataArr /= pk.AU
# create figure
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
# start with an empty plot
line0, = plt.plot([], [], [], "C1-", zorder=3)
dot0, = plt.plot([], [], [], "C1o", zorder=3)
dot1, = plt.plot([], [], [], "C0o", zorder=3)
dot2, = plt.plot([], [], [], "C3o", zorder=3)
# Sun
sun = ax.scatter([0], [0], [0], s=100, color='yellow', label='Sun',
marker='o', edgecolor='orange')
# Departure planet
planet1 = ax.plot(self.plPosCart['xDep']/pk.AU,
self.plPosCart['yDep']/pk.AU,
self.plPosCart['zDep']/pk.AU,
label='Departure planet', c='C0')
# Arrival planet
planet2 = ax.plot(self.plPosCart['xArr']/pk.AU,
self.plPosCart['yArr']/pk.AU,
self.plPosCart['zArr']/pk.AU,
label='Arrival planet', c='C3')
# formatting
ax.set_xlabel('x [AU]', labelpad=15)
ax.set_ylabel('y [AU]', labelpad=15)
ax.set_zlabel('z [AU]', labelpad=15)
# ax.set_zlim(-0.05, 0.05)
axisEqual3D(ax)
plt.grid(True)
# plt.title("Low-thrust trajectory")
# this function will be called at every iteration
def update_line(num, data, line, dot0, dot1, dot2):
line.set_data(data[0:2, :num])
line.set_3d_properties(data[2, :num])
dot0.set_data(data[0:2, num])
dot0.set_3d_properties(data[2, num])
dot1.set_data(dataDep[0:2, num])
dot1.set_3d_properties(dataDep[2, num])
dot2.set_data(dataArr[0:2, num])
dot2.set_3d_properties(dataArr[2, num])
return line,
nFrame = int(len(self.traPosCart['x']))
line_ani = animation.FuncAnimation(fig, update_line, frames=nFrame,
fargs=(data, line0, dot0, dot1, dot2),
interval=20, repeat_delay=1e3)
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=60, metadata=dict(artist='Leon S'),
bitrate=1800)
line_ani.save(os.path.join(os.getcwd(), folder, 'trajectory3D.mp4'),
writer=writer)
# line_ani.save(os.path.join(os.getcwd(), folder,
# 'trajectory3D.mp4'), fps=30, extra_args=['-vcodec', 'libx264'])
plt.show()
def hodograph(self, twoDplot=False, save=None, folder=None):
"""
Plot the trajectory's hodograph
Plot the given trajectory in 2D as subplot if twoDplot is set to True
"""
print('Plot hodograph')
# Hoodgraph and orbits
if twoDplot:
figHodoOrbit = newFigure(height=7)
else:
figHodoOrbit = newFigure(height=3)
# Hodograph
if twoDplot:
plt.subplot(2, 1, 1)
#departure planet
plot1 = plt.plot(self.plVelCyl['VrDep']/1E3, self.plVelCyl['VtDep']/1E3, label='Departure Planet', color='C0', zorder=1)
plot0 = plt.scatter(self.plVelCyl['VrDep'][-1]/1E3, self.plVelCyl['VtDep'][-1]/1E3, color='k', zorder=2)
#arrival planet
plot1 = plt.plot(self.plVelCyl['VrArr']/1E3, self.plVelCyl['VtArr']/1E3, label='Arrival Planet', color='C3', zorder=1)
plot0 = plt.scatter(self.plVelCyl['VrArr'][0]/1E3, self.plVelCyl['VtArr'][0]/1E3, color='k', zorder=2)
# trajectory
plot1 = plt.plot(self.traVelCyl['vr']/1E3, self.traVelCyl['vt']/1E3, label='Trajectory', color='C1', zorder=1)
plot1 = plt.scatter(self.traVelCyl['vr'][0]/1E3, self.traVelCyl['vt'][0]/1E3, color='C2', label='launch', zorder=2)
plot1 = plt.scatter(self.traVelCyl['vr'][-1]/1E3, self.traVelCyl['vt'][-1]/1E3, color='C3', label='arrival', zorder=2)
plt.xlabel('$V_r$ [km/s]')
plt.ylabel('$V_t$ [km/s]')
plt.grid()
ax = plt.gca()
ax.set_axisbelow(True)
plt.legend()
plt.axis('equal')
# Positions
if twoDplot:
plt.title('Hodograph')
plt.subplot(2, 1, 2)
# trajectory
plot1 = plt.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, label='Trajectory', color='C1', zorder=1)
plot0 = plt.scatter(self.traPosCart['x'][0]/pk.AU, self.traPosCart['y'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.traPosCart['x'][-1]/pk.AU, self.traPosCart['y'][-1]/pk.AU, color='k', zorder=2)
# arrival planet
plot1 = plt.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, label='Arrival Planet', color='C3', zorder=1)
plot0 = plt.scatter(self.plPosCart['xArr'][0]/pk.AU, self.plPosCart['yArr'][0]/pk.AU, color='k', zorder=2)
plot0 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='k', zorder=2)
# departure planet
plot1 = plt.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, label='Departure Planet', color='C0', zorder=1)
plot1 = plt.scatter(self.plPosCart['xDep'][0]/pk.AU, self.plPosCart['yDep'][0]/pk.AU, color='C2', label='launch', zorder=2)
plot1 = plt.scatter(self.plPosCart['xArr'][-1]/pk.AU, self.plPosCart['yArr'][-1]/pk.AU, color='C3', label='arrival', zorder=2)
plt.xlabel('$x$ [AU]')
plt.ylabel('$y$ [AU]')
plt.grid()
ax = plt.gca()
ax.set_axisbelow(True)
plt.legend()
plt.title('Orbit')
plt.axis('equal')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'hodograph.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'hodograph.png'), dpi=300)
plt.show()
def thrust(self, save=None, folder=None):
"""
Plot the thrust profile in Cylindrical coordinates
"""
print('Plot thrust')
fig = newFigure(height=3)
samplePoints = self.tSampleSec
# Cylindrical accelerations
plot1 = plt.plot(self.tSample, self.trajectory.fr(samplePoints), ':', label=r'$f_r$')
plot1 = plt.plot(self.tSample, self.trajectory.ft(samplePoints), '--', label=r'$f_\theta$')
plot1 = plt.plot(self.tSample, self.trajectory.fz(samplePoints), '-.', label=r'$f_z$')
plot1 = plt.plot(self.tSample, self.trajectory.fTotal(samplePoints), '-', label=r'$f_{\mathrm{total}}$', alpha=0.5)
plt.grid()
plt.xlabel('time [mjd2000]')
plt.ylabel(r'$f$ $[m/s^2]$')
plt.xlim([self.tSample[0], self.tSample[-1]])
# plt.ylim([-0.0004, 0.0005])
plt.title('Thrust acceleration')
plt.legend()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'thrust.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'thrust.png'), dpi=300)
plt.show()
def figure119(self, save=None, folder=None):
"""
Plot the thrust profile next to the 3D trajectory
Recreates Figure 11.9 in [Gondelach, 2012]
"""
print('Plot trajectory and thrust, recreating Figure 11.9')
samplePoints = self.tSampleSec
# initialize figure
fig = plt.figure(figsize=(10, 4))
gs = mlt.gridspec.GridSpec(1, 2, width_ratios=[3, 2])
ax0 = plt.subplot(gs[0], projection='3d')
# plot 3D trajectory
ax0.plot(self.plPosCart['xDep']/pk.AU, self.plPosCart['yDep']/pk.AU, self.plPosCart['zDep']/pk.AU, label='Earth', c='b')
ax0.plot(self.plPosCart['xArr']/pk.AU, self.plPosCart['yArr']/pk.AU, self.plPosCart['zArr']/pk.AU, label='Mars', c='k')
ax0.plot(self.traPosCart['x']/pk.AU, self.traPosCart['y']/pk.AU, self.traPosCart['z']/pk.AU, label='Transfer', c='r')
# axis formatting
ax0.set_xlim(-2, 2)
ax0.set_xticks([-2, -1, 0, 1, 2])
ax0.set_ylim(-2, 2)
ax0.set_yticks([-2, 0, 2])
ax0.set_zlim(-0.06, 0.05)
ax0.view_init(30, -95)
ax0.xaxis.pane.fill = False
ax0.yaxis.pane.fill = False
ax0.zaxis.pane.fill = False
ax0.grid(False)
ax0.set_xlabel('x [AU]')
ax0.set_ylabel('y [AU]')
ax0.set_zlabel('z [AU]', labelpad=10)
ax0.tick_params(axis='z', pad=8)
# plt.legend()
# plot thrust profile
ax1 = plt.subplot(gs[1])
tDays = np.linspace(0, self.trajectory.tof, self.samples)
ax1.plot(tDays, self.trajectory.fr(samplePoints), '-b', label='Radial')
ax1.plot(tDays, self.trajectory.ft(samplePoints), '-r', label='Normal')
ax1.plot(tDays, self.trajectory.fz(samplePoints), '-g', label='Axial')
ax1.plot(tDays, self.trajectory.fTotal(samplePoints), '--k', label='Total')
ax1.set_xlabel('Time [days]')
ax1.set_xticks([0, 200, 400, 600, 800, 1000, 1200])
ax1.set_ylabel('Thrust acceleration [m/s^2]')
ax1.set_ylim([-5e-5, 20e-5])
ax1.set_xlim(left=tDays[0])
ax1.ticklabel_format(style='sci', axis='y', scilimits=(-5,-5))
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.legend()
fig.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, '119.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, '119.png'), dpi=300)
plt.show()
def stateVectorsAll(self, save=None, folder=None):
"""
Plot the spacecraft's state vectors ver time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot position and velocity (cylindrical and cartesian)')
fig = plt.figure(figsize=(12, 15))
# Cartesian velocities
nPlots = 6
plt.subplot(nPlots, 2, 1)
plot1 = plt.plot(self.tSample, self.traVelCart['vx'], color='C0')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$V_x$ [m/s]')
plt.title('Cartesian Velocities')
plt.subplot(nPlots, 2, 3)
plot1 = plt.plot(self.tSample, self.traVelCart['vy'], color='C0')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$V_y$ [m/s]')
plt.subplot(nPlots, 2, 5)
plot1 = plt.plot(self.tSample, self.traVelCart['vz'], color='C0')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$V_z$ [m/s]')
# Cylindrical velocities
plt.subplot(nPlots, 2, 2)
plot1 = plt.plot(self.tSample, self.traVelCyl['vr'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_r$ [m/s]')
plt.title('Cylindrical Velocities')
plt.subplot(nPlots, 2, 4)
plot1 = plt.plot(self.tSample, self.traVelCyl['vt'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_\theta$ [m/s]')
plt.subplot(nPlots, 2, 6)
plot1 = plt.plot(self.tSample, self.traVelCyl['vz'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
# Cartesian positions
plt.subplot(nPlots, 2, 7)
plot1 = plt.plot(self.tSample, self.traPosCart['x']/pk.AU, color='C2')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$x$ [AU]')
plt.title('Cartesian Positions')
plt.subplot(nPlots, 2, 9)
plot1 = plt.plot(self.tSample, self.traPosCart['y']/pk.AU, color='C2')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$y$ [AU]')
plt.subplot(nPlots, 2, 11)
plot1 = plt.plot(self.tSample, self.traPosCart['z']/pk.AU, color='C2')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel('$z$ [AU]')
# Cylindrical positions
plt.subplot(nPlots, 2, 8)
plot1 = plt.plot(self.tSample, self.traPosCyl['r']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$r$ [AU]')
plt.title('Cylindrical Positions')
plt.subplot(nPlots, 2, 10)
plot1 = plt.plot(self.tSample, self.traPosCyl['t']*180/np.pi, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$\theta$ [deg]')
plt.subplot(nPlots, 2, 12)
plot1 = plt.plot(self.tSample, self.traPosCyl['z']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'state.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'state.png'), dpi=300)
plt.show()
def stateVectorsCylindrical(self, save=None, folder=None):
"""
Plot the spacecraft's state vectors ver time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot cylindrical state vectors')
fig = plt.figure(figsize=(12, 12))
nPlots = 3
# Cylindrical positions
plt.subplot(nPlots, 3, 1)
plot1 = plt.plot(self.tSample, self.traPosCyl['r']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$r$ [AU]')
plt.title('Cylindrical Positions')
plt.subplot(nPlots, 3, 4)
plot1 = plt.plot(self.tSample, self.traPosCyl['t']*180/np.pi, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$\theta$ [deg]')
plt.subplot(nPlots, 3, 7)
plot1 = plt.plot(self.tSample, self.traPosCyl['z']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
# Cylindrical velocities
plt.subplot(nPlots, 3, 2)
plot1 = plt.plot(self.tSample, self.traVelCyl['vr'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_r$ [m/s]')
plt.title('Cylindrical Velocities')
plt.subplot(nPlots, 3, 5)
plot1 = plt.plot(self.tSample, self.traVelCyl['vt'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_\theta$ [m/s]')
plt.subplot(nPlots, 3, 8)
plot1 = plt.plot(self.tSample, self.traVelCyl['vz'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
def stateVectorsCylindricalInclPlanets(self, save=None, folder=None):
"""
Plot the spacecraft's and planets' state vectors over time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot cylindrical state vectors')
fig = plt.figure(figsize=(12, 12))
nPlots = 3
# Cylindrical positions
plt.subplot(nPlots, 3, 1)
plot1 = plt.plot(self.tSample, self.traPosCyl['r']/pk.AU, label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plPosCyl['rDep']/pk.AU, label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plPosCyl['rArr']/pk.AU, label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$r$ [AU]')
plt.title('Cylindrical Positions')
plt.subplot(nPlots, 3, 4)
tsaw = self.traPosCyl['t']*180/np.pi
for i in range(0, 6):
tsaw[tsaw > 180] = tsaw[tsaw > 180] - 360 # make saw pattern
plot1 = plt.plot(self.tSample, tsaw, label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plPosCyl['tDep']*180/np.pi, label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plPosCyl['tArr']*180/np.pi, label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$\theta$ [deg]')
plt.subplot(nPlots, 3, 7)
plot1 = plt.plot(self.tSample, self.traPosCyl['z']/pk.AU, label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plPosCyl['zDep']/pk.AU, label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plPosCyl['zArr']/pk.AU, label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
# Cylindrical velocities
plt.subplot(nPlots, 3, 2)
plot1 = plt.plot(self.tSample, self.traVelCyl['vr'], label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plVelCyl['VrDep'], label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plVelCyl['VrArr'], label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$V_r$ [m/s]')
plt.title('Cylindrical Velocities')
plt.subplot(nPlots, 3, 5)
plot1 = plt.plot(self.tSample, self.traVelCyl['vt'], label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plVelCyl['VtDep'], label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plVelCyl['VtArr'], label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$V_\theta$ [m/s]')
plt.subplot(nPlots, 3, 8)
plot1 = plt.plot(self.tSample, self.traVelCyl['vz'], label='Trajectory', c='C1')
plot1 = plt.plot(self.tSample, self.plVelCyl['VzDep'], label='Departure planet', c='C0')
plot1 = plt.plot(self.tSample, self.plVelCyl['VzArr'], label='Arrival planet', c='C3')
plt.grid()
plt.legend()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
# Cylindrical accelerations
plt.subplot(nPlots, 3, 3)
plot1 = plt.plot(self.tSample, self.traAccCyl['ar'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$a_r$ [m/s^2]')
plt.title('Cylindrical Accelerations')
plt.subplot(nPlots, 3, 6)
plot1 = plt.plot(self.tSample, self.traAccCyl['at'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$a_\theta$ [m/s^2]')
plt.subplot(nPlots, 3, 9)
plot1 = plt.plot(self.tSample, self.traAccCyl['az'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$a_z$ [m/s^2]')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCylindricalInclPlanets.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCylindricalInclPlanets.png'), dpi=300)
plt.show()
def stateVectorsCartesian(self, save=None, folder=None):
"""
Plot the spacecraft's state vectors ver time
Velocity, position and acceleration in cylindrical and cartesian coordinates
"""
print('Plot cartesian state vectors')
fig = plt.figure(figsize=(12, 12))
nPlots = 3
# Cartesian positions
plt.subplot(nPlots, 2, 1)
plot1 = plt.plot(self.tSample, self.traPosCart['x']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$x$ [AU]')
plt.title('Cartesian Positions')
plt.subplot(nPlots, 2, 3)
plot1 = plt.plot(self.tSample, self.traPosCart['y']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$y$ [deg]')
plt.subplot(nPlots, 2, 5)
plot1 = plt.plot(self.tSample, self.traPosCart['z']/pk.AU, color='C3')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$z$ [AU]')
# Cartesian velocities
plt.subplot(nPlots, 2, 2)
plot1 = plt.plot(self.tSample, self.traVelCart['vx'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
plt.title('Cartesian Velocities')
plt.subplot(nPlots, 2, 4)
plot1 = plt.plot(self.tSample, self.traVelCart['vy'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_y$ [m/s]')
plt.subplot(nPlots, 2, 6)
plot1 = plt.plot(self.tSample, self.traVelCart['vz'], color='C1')
plt.grid()
plt.xlabel('time [days]')
plt.ylabel(r'$V_z$ [m/s]')
plt.tight_layout()
if save==None:
save = self.save
if folder==None:
folder = self.folder
if save==True:
checkFolder(folder)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCartesian.pdf'), dpi=300)
plt.savefig(os.path.join(os.getcwd(), folder, 'stateCartesian.png'), dpi=300)
plt.show()
def samplePlanets(self, trajectory, samples=100):
"""
Return a dictionary with sampled position vectors of the departure and
arrival planets of the given trajectory
"""
# define planets
if self.ephemSource == 'jpl':
planetDep = pk.planet.jpl_lp(trajectory.departureBody)
planetArr = pk.planet.jpl_lp(trajectory.arrivalBody)
elif self.ephemSource == 'spice':
planetDep = pk.planet.spice(trajectory.departureBody, 'sun', 'eclipj2000')
planetArr = pk.planet.spice(trajectory.arrivalBody, 'sun', 'eclipj2000')
else:
print('ERROR: This is not a valid source of ephemerides.')
# time variable [days]
self.tSample = np.linspace(self.trajectory.jdDep, self.trajectory.jdArr, samples)
tSample = self.tSample
# init planet velocity vectors
tof = self.trajectory.tof
VrDep = np.linspace(0, tof, samples)
VtDep = np.linspace(0, tof, samples)
VzDep = np.linspace(0, tof, samples)
VrArr = np.linspace(0, tof, samples)
VtArr = np.linspace(0, tof, samples)
VzArr = np.linspace(0, tof, samples)
VxDep = np.linspace(0, tof, samples)
VyDep = np.linspace(0, tof, samples)
VzDep = np.linspace(0, tof, samples)
VxArr = np.linspace(0, tof, samples)
VyArr = np.linspace(0, tof, samples)
VzArr = np.linspace(0, tof, samples)
# init position vectors
xDep = np.linspace(0, tof, samples)
yDep = np.linspace(0, tof, samples)
zDep = np.linspace(0, tof, samples)
xArr = np.linspace(0, tof, samples)
yArr = np.linspace(0, tof, samples)
zArr = np.linspace(0, tof, samples)
rDep = np.linspace(0, tof, samples)
tDep = np.linspace(0, tof, samples)
zDep = np.linspace(0, tof, samples)
rArr = np.linspace(0, tof, samples)
tArr = np.linspace(0, tof, samples)
zArr = np.linspace(0, tof, samples)
# retrieve and convert planet state vectors
for i in range(0, len(tSample)):
epochSample = pk.epoch(tSample[i], 'mjd2000')
# Departure planet
rCart, vCart = planetDep.eph(epochSample)
vCyl = Vcart2cyl(vCart, rCart)
rCyl = Pcart2cyl(rCart)
xDep[i] = rCart[0]
yDep[i] = rCart[1]
zDep[i] = rCart[2]
rDep[i] = rCyl[0]
tDep[i] = rCyl[1]
zDep[i] = rCyl[2]
VrDep[i] = vCyl[0]
VtDep[i] = vCyl[1]
VxDep[i] = vCart[0]
VyDep[i] = vCart[1]
VzDep[i] = vCart[2]
# Arrival planet
rCart, vCart = planetArr.eph(epochSample)
vCyl = Vcart2cyl(vCart, rCart)
rCyl = Pcart2cyl(rCart)
xArr[i] = rCart[0]
yArr[i] = rCart[1]
zArr[i] = rCart[2]
rArr[i] = rCyl[0]
tArr[i] = rCyl[1]
zArr[i] = rCyl[2]
VrArr[i] = vCyl[0]
VtArr[i] = vCyl[1]
VxArr[i] = vCart[0]
VyArr[i] = vCart[1]
VzArr[i] = vCart[2]
# dictionary with cartesian positions
planetCartesianPositions = {'xDep' : xDep,
'yDep' : yDep,
'zDep' : zDep,
'xArr' : xArr,
'yArr' : yArr,
'zArr' : zArr}
planetCylindricalPositions = {'rDep' : rDep,
'tDep' : tDep,
'zDep' : zDep,
'rArr' : rArr,
'tArr' : tArr,
'zArr' : zArr}
planetCartesianVelocities = {'VxDep' : VxDep,
'VyDep' : VyDep,
'VzDep' : VzDep,
'VxArr' : VxArr,
'VyArr' : VyArr,
'VzArr' : VzArr}
planetCylindricalVelocity = {'VrDep' : VrDep,
'VtDep' : VtDep,
'VzDep' : VzDep,
'VrArr' : VrArr,
'VtArr' : VtArr,
'VzArr' : VzArr}
print('Done sampling planets.')
return planetCartesianPositions, planetCylindricalPositions, planetCartesianVelocities, planetCylindricalVelocity
def sampleTrajectoryPosition(self, trajectory, samples=100):
"""
Returns Cartesian position vectors of the full trajectory
I.e. from t=0 to t=tof
"""
# time vector
self.tSampleSec = np.linspace(0, self.trajectory.tofSec, samples)
tSampleSec = self.tSampleSec
# sample and compute position vectors
xTra = np.linspace(0, self.trajectory.tofSec, samples)
yTra = np.linspace(0, self.trajectory.tofSec, samples)
zTra = np.linspace(0, self.trajectory.tofSec, samples)
tTra = np.linspace(0, self.trajectory.tofSec, samples)
rTra = np.linspace(0, self.trajectory.tofSec, samples)
zTra = np.linspace(0, self.trajectory.tofSec, samples)
for i in range(0, len(tSampleSec)):
ti = tSampleSec[i]
rTra[i], tTra[i], zTra[i] = [self.trajectory.r(ti), self.trajectory.t(ti), self.trajectory.z(ti)]
xTra[i], yTra[i], zTra[i] = Pcyl2cart([rTra[i], tTra[i], zTra[i]])
# dictionary with cartesian positions
trajectoryCartPositions = {'x' : xTra,
'y' : yTra,
'z' : zTra}
trajectoryCylPositions = {'r' : rTra,
't' : tTra,
'z' : zTra}
print('Done sampling trajectory position.')
return trajectoryCartPositions, trajectoryCylPositions
def sampleTrajectoryVelocity(self, trajectory, samples=100):
"""
Returns Cartesian velocity vectors of the full trajectory
I.e. from t=0 to t=tof
"""
# time vector
tSampleSec = self.tSampleSec
# cartesian velocities
xTraVel = np.linspace(0, self.trajectory.tofSec, samples)
yTraVel = np.linspace(0, self.trajectory.tofSec, samples)
zTraVel = np.linspace(0, self.trajectory.tofSec, samples)
rTraVel = np.linspace(0, self.trajectory.tofSec, samples)
tTraVel = np.linspace(0, self.trajectory.tofSec, samples)
zTraVel = np.linspace(0, self.trajectory.tofSec, samples)
for i in range(0, len(tSampleSec)):
vCyl = [self.trajectory.rDot(tSampleSec[i]), self.trajectory.tDot(tSampleSec[i]), self.trajectory.zDot(tSampleSec[i])]
rCyl = [self.trajectory.r(tSampleSec[i]), self.trajectory.t(tSampleSec[i]), self.trajectory.z(tSampleSec[i])]
vCart = Vcyl2cart(vCyl, rCyl)
xTraVel[i] = vCart[0]
yTraVel[i] = vCart[1]
zTraVel[i] = vCart[2]
rTraVel[i] = vCyl[0]
tTraVel[i] = vCyl[1]
zTraVel[i] = vCyl[2]
# dictionaries
trajectoryVelocitiesCart = {'vx' : xTraVel,
'vy' : yTraVel,
'vz' : zTraVel}
trajectoryVelocitiesCyl = {'vr' : rTraVel,
'vt' : tTraVel,
'vz' : zTraVel}
print('Done sampling trajectory velocity.')
return trajectoryVelocitiesCart, trajectoryVelocitiesCyl
def sampleTrajectoryAcceleration(self, trajectory, samples=100):
"""
Returns cylindrical acceleration vectors of the full trajectory
"""
# initialize vectors
rTraAcc = np.linspace(0, 1, samples)
tTraAcc = np.linspace(0, 1, samples)
zTraAcc = np.linspace(0, 1, samples)
xTraAcc = np.linspace(0, 1, samples)
yTraAcc = np.linspace(0, 1, samples)
totalTraAcc = np.linspace(0, 1, samples)
x = self.traPosCart['x']
y = self.traPosCart['y']
z = self.traPosCart['z']
# sample acceleration vectors
for i in range(0, len(self.tSampleSec)):
ti = self.tSampleSec[i]
aCyl = [self.trajectory.rDDot(ti), self.trajectory.tDDot(ti), self.trajectory.zDDot(ti)]
rTraAcc[i] = aCyl[0]
tTraAcc[i] = aCyl[1]
zTraAcc[i] = aCyl[2]
# dictionaries
trajectoryAccelerationsCyl = {'ar' : rTraAcc,
'at' : tTraAcc,
'az' : zTraAcc}
print('Done sampling trajectory acceleration.')
return trajectoryAccelerationsCyl
|
py | 1a3b13dab80844ba2ffdd83698f595a68a179755 | from django import http
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext
from django.views.decorators.vary import vary_on_headers
import six
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon, Category
from olympia.amo.decorators import json_view
from olympia.amo.templatetags.jinja_helpers import locale_url, urlparams
from olympia.amo.utils import render, sorted_groupby
from olympia.browse.views import personas_listing as personas_listing_view
from olympia.versions.compare import dict_from_int, version_dict, version_int
from .forms import ESSearchForm
DEFAULT_NUM_PERSONAS = 21 # Results appear in a grid of 3 personas x 7 rows.
log = olympia.core.logger.getLogger('z.search')
def _personas(request):
"""Handle the request for persona searches."""
initial = dict(request.GET.items())
# Ignore these filters since return the same results for Firefox
# as for Thunderbird, etc.
initial.update(appver=None, platform=None)
form = ESSearchForm(initial, type=amo.ADDON_PERSONA)
form.is_valid()
qs = Addon.search_public()
filters = ['sort']
mapping = {
'downloads': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name.raw',
'updated': '-last_updated',
'hotness': '-hotness'}
results = _filter_search(request, qs, form.cleaned_data, filters,
sorting=mapping,
sorting_default='-average_daily_users',
types=[amo.ADDON_PERSONA])
form_data = form.cleaned_data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_PERSONAS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
pager = amo.utils.paginate(request, results, per_page=search_opts['limit'])
categories, filter, base, category = personas_listing_view(request)
context = {
'pager': pager,
'form': form,
'categories': categories,
'query': form_data,
'filter': filter,
'search_placeholder': 'themes'}
return render(request, 'search/personas.html', context)
class BaseAjaxSearch(object):
"""Generates a list of dictionaries of add-on objects based on
ID or name matches. Safe to be served to a JSON-friendly view.
Sample output:
[
{
"id": 1865,
"name": "Adblock Plus",
"url": "http://path/to/details/page",
"icons": {
"32": "http://path/to/icon-32",
"64": "http://path/to/icon-64"
}
},
...
]
"""
def __init__(self, request, excluded_ids=(), ratings=False):
self.request = request
self.excluded_ids = excluded_ids
self.src = getattr(self, 'src', None)
self.types = getattr(self, 'types', amo.ADDON_TYPES.keys())
self.limit = 10
self.key = 'q' # Name of search field.
self.ratings = ratings
# Mapping of JSON key => add-on property.
default_fields = {
'id': 'id',
'name': 'name',
'url': 'get_url_path',
'icons': {
'32': ('get_icon_url', 32),
'64': ('get_icon_url', 64)
}
}
self.fields = getattr(self, 'fields', default_fields)
if self.ratings:
self.fields['rating'] = 'average_rating'
def queryset(self):
"""Get items based on ID or search by name."""
results = Addon.objects.none()
q = self.request.GET.get(self.key)
if q:
try:
pk = int(q)
except ValueError:
pk = None
qs = None
if pk:
qs = Addon.objects.public().filter(id=int(q))
elif len(q) > 2:
qs = Addon.search_public().filter_query_string(q.lower())
if qs:
results = qs.filter(type__in=self.types)
return results
def _build_fields(self, item, fields):
data = {}
for key, prop in six.iteritems(fields):
if isinstance(prop, dict):
data[key] = self._build_fields(item, prop)
else:
# prop is a tuple like: ('method', 'arg1, 'argN').
if isinstance(prop, tuple):
val = getattr(item, prop[0])(*prop[1:])
else:
val = getattr(item, prop, '')
if callable(val):
val = val()
data[key] = six.text_type(val)
return data
def build_list(self):
"""Populate a list of dictionaries based on label => property."""
results = []
for item in self.queryset()[:self.limit]:
if item.id in self.excluded_ids:
continue
d = self._build_fields(item, self.fields)
if self.src and 'url' in d:
d['url'] = urlparams(d['url'], src=self.src)
results.append(d)
return results
@property
def items(self):
return self.build_list()
class SearchSuggestionsAjax(BaseAjaxSearch):
src = 'ss'
class AddonSuggestionsAjax(SearchSuggestionsAjax):
# No personas.
types = [amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP]
class PersonaSuggestionsAjax(SearchSuggestionsAjax):
types = [amo.ADDON_PERSONA]
@json_view
@non_atomic_requests
def ajax_search(request):
"""This is currently used only to return add-ons for populating a
new collection. Themes (formerly Personas) are included by default, so
this can be used elsewhere.
"""
search_obj = BaseAjaxSearch(request)
search_obj.types = amo.ADDON_SEARCH_TYPES
return search_obj.items
@json_view
@non_atomic_requests
def ajax_search_suggestions(request):
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
suggester = suggesterClass(request, ratings=False)
return _build_suggestions(
request,
cat,
suggester)
def _build_suggestions(request, cat, suggester):
results = []
q = request.GET.get('q')
if q and (q.isdigit() or len(q) > 2):
q_ = q.lower()
if cat != 'apps':
# Applications.
for a in amo.APP_USAGE:
name_ = six.text_type(a.pretty).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': a.id,
'name': ugettext(u'{0} Add-ons').format(a.pretty),
'url': locale_url(a.short),
'cls': 'app ' + a.short
})
# Categories.
cats = Category.objects
cats = cats.filter(Q(application=request.APP.id) |
Q(type=amo.ADDON_SEARCH))
if cat == 'themes':
cats = cats.filter(type=amo.ADDON_PERSONA)
else:
cats = cats.exclude(type=amo.ADDON_PERSONA)
for c in cats:
if not c.name:
continue
name_ = six.text_type(c.name).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': c.id,
'name': six.text_type(c.name),
'url': c.get_url_path(),
'cls': 'cat'
})
results += suggester.items
return results
def _filter_search(request, qs, query, filters, sorting,
sorting_default='-weekly_downloads', types=None):
"""Filter an ES queryset based on a list of filters."""
if types is None:
types = []
APP = request.APP
# Intersection of the form fields present and the filters we want to apply.
show = [f for f in filters if query.get(f)]
if query.get('q'):
qs = qs.filter_query_string(query['q'])
if 'platform' in show and query['platform'] in amo.PLATFORM_DICT:
ps = (amo.PLATFORM_DICT[query['platform']].id, amo.PLATFORM_ALL.id)
# If we've selected "All Systems" don't filter by platform.
if ps[0] != ps[1]:
qs = qs.filter(platforms__in=ps)
if 'appver' in show:
# Get a min version less than X.0.
low = version_int(query['appver'])
# Get a max version greater than X.0a.
high = version_int(query['appver'] + 'a')
# Note: when strict compatibility is not enabled on add-ons, we
# fake the max version we index in compatible_apps.
qs = qs.filter(**{
'current_version.compatible_apps.%s.max__gte' % APP.id: high,
'current_version.compatible_apps.%s.min__lte' % APP.id: low
})
if 'atype' in show and query['atype'] in amo.ADDON_TYPES:
qs = qs.filter(type=query['atype'])
else:
qs = qs.filter(type__in=types)
if 'cat' in show:
cat = (Category.objects.filter(id=query['cat'])
.filter(Q(application=APP.id) | Q(type=amo.ADDON_SEARCH)))
if not cat.exists():
show.remove('cat')
if 'cat' in show:
qs = qs.filter(category=query['cat'])
if 'tag' in show:
qs = qs.filter(tags=query['tag'])
if 'sort' in show:
qs = qs.order_by(sorting[query['sort']])
elif not query.get('q'):
# Sort by a default if there was no query so results are predictable.
qs = qs.order_by(sorting_default)
return qs
@vary_on_headers('X-PJAX')
@non_atomic_requests
def search(request, tag_name=None):
APP = request.APP
types = (amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP)
category = request.GET.get('cat')
if category == 'collections':
extra_params = {'sort': {'newest': 'created'}}
else:
extra_params = None
fixed = fix_search_query(request.GET, extra_params=extra_params)
if fixed is not request.GET:
# We generally want a 301, except if it's a "type", because that's only
# here to support the new frontend, so a permanent redirect could mess
# things up when the user is going back and forth between the old and
# new frontend. https://github.com/mozilla/addons-server/issues/6846
status = 302 if 'type' in request.GET else 301
return http.HttpResponseRedirect(
urlparams(request.path, **fixed), status=status)
facets = request.GET.copy()
# In order to differentiate between "all versions" and an undefined value,
# we use "any" instead of "" in the frontend.
if 'appver' in facets and facets['appver'] == 'any':
facets['appver'] = ''
form = ESSearchForm(facets or {})
form.is_valid() # Let the form try to clean data.
form_data = form.cleaned_data
if tag_name:
form_data['tag'] = tag_name
if category == 'themes' or form_data.get('atype') == amo.ADDON_PERSONA:
return _personas(request)
sort, extra_sort = split_choices(form.sort_choices, 'created')
if form_data.get('atype') == amo.ADDON_SEARCH:
# Search add-ons should not be searched by ADU, so replace 'Users'
# sort with 'Weekly Downloads'.
sort, extra_sort = list(sort), list(extra_sort)
sort[1] = extra_sort[1]
del extra_sort[1]
# Perform search, using aggregation so that we can build the facets UI.
# Note that we don't need to aggregate on platforms, that facet it built
# from our constants directly, using the current application for this
# request (request.APP).
appversion_field = 'current_version.compatible_apps.%s.max' % APP.id
qs = (Addon.search_public().filter(app=APP.id)
.aggregate(tags={'terms': {'field': 'tags'}},
appversions={'terms': {'field': appversion_field}},
categories={'terms': {'field': 'category', 'size': 200}})
)
filters = ['atype', 'appver', 'cat', 'sort', 'tag', 'platform']
mapping = {'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name.raw',
'downloads': '-weekly_downloads',
'updated': '-last_updated',
'hotness': '-hotness'}
qs = _filter_search(request, qs, form_data, filters, mapping, types=types)
pager = amo.utils.paginate(request, qs)
ctx = {
'is_pjax': request.META.get('HTTP_X_PJAX'),
'pager': pager,
'query': form_data,
'form': form,
'sort_opts': sort,
'extra_sort_opts': extra_sort,
'sorting': sort_sidebar(request, form_data, form),
'sort': form_data.get('sort'),
}
if not ctx['is_pjax']:
aggregations = pager.object_list.aggregations
ctx.update({
'tag': tag_name,
'categories': category_sidebar(request, form_data, aggregations),
'platforms': platform_sidebar(request, form_data),
'versions': version_sidebar(request, form_data, aggregations),
'tags': tag_sidebar(request, form_data, aggregations),
})
return render(request, 'search/results.html', ctx)
class FacetLink(object):
def __init__(self, text, urlparams, selected=False, children=None):
self.text = text
self.urlparams = urlparams
self.selected = selected
self.children = children or []
def sort_sidebar(request, form_data, form):
sort = form_data.get('sort')
return [FacetLink(text, {'sort': key}, key == sort)
for key, text in form.sort_choices]
def category_sidebar(request, form_data, aggregations):
APP = request.APP
qatype, qcat = form_data.get('atype'), form_data.get('cat')
cats = [f['key'] for f in aggregations['categories']]
categories = Category.objects.filter(id__in=cats)
if qatype in amo.ADDON_TYPES:
categories = categories.filter(type=qatype)
# Search categories don't have an application.
categories = categories.filter(Q(application=APP.id) |
Q(type=amo.ADDON_SEARCH))
# If category is listed as a facet but type is not, then show All.
if qcat in cats and not qatype:
qatype = True
# If category is not listed as a facet NOR available for this application,
# then show All.
if qcat not in categories.values_list('id', flat=True):
qatype = qcat = None
categories = [(_atype, sorted(_cats, key=lambda x: x.name))
for _atype, _cats in sorted_groupby(categories, 'type')]
rv = []
cat_params = {'cat': None}
all_label = ugettext(u'All Add-ons')
rv = [FacetLink(all_label, {'atype': None, 'cat': None}, not qatype)]
for addon_type, cats in categories:
selected = addon_type == qatype and not qcat
# Build the linkparams.
cat_params = cat_params.copy()
cat_params.update(atype=addon_type)
link = FacetLink(amo.ADDON_TYPES[addon_type],
cat_params, selected)
link.children = [
FacetLink(c.name, dict(cat_params, cat=c.id), c.id == qcat)
for c in cats]
rv.append(link)
return rv
def version_sidebar(request, form_data, aggregations):
appver = ''
# If appver is in the request, we read it cleaned via form_data.
if 'appver' in request.GET or form_data.get('appver'):
appver = form_data.get('appver')
app = six.text_type(request.APP.pretty)
exclude_versions = getattr(request.APP, 'exclude_versions', [])
# L10n: {0} is an application, such as Firefox. This means "any version of
# Firefox."
rv = [FacetLink(
ugettext(u'Any {0}').format(app), {'appver': 'any'}, not appver)]
vs = [dict_from_int(f['key']) for f in aggregations['appversions']]
# Insert the filtered app version even if it's not a facet.
av_dict = version_dict(appver)
if av_dict and av_dict not in vs and av_dict['major']:
vs.append(av_dict)
# Valid versions must be in the form of `major.minor`.
vs = set((v['major'], v['minor1'] if v['minor1'] not in (None, 99) else 0)
for v in vs)
versions = ['%s.%s' % v for v in sorted(vs, reverse=True)]
for version, floated in zip(versions, map(float, versions)):
if (floated not in exclude_versions and
floated > request.APP.min_display_version):
rv.append(FacetLink('%s %s' % (app, version), {'appver': version},
appver == version))
return rv
def platform_sidebar(request, form_data):
qplatform = form_data.get('platform')
app_platforms = request.APP.platforms.values()
ALL = app_platforms.pop(0)
# The default is to show "All Systems."
selected = amo.PLATFORM_DICT.get(qplatform, ALL)
if selected != ALL and selected not in app_platforms:
# Insert the filtered platform even if it's not a facet.
app_platforms.append(selected)
# L10n: "All Systems" means show everything regardless of platform.
rv = [FacetLink(ugettext(u'All Systems'), {'platform': ALL.shortname},
selected == ALL)]
for platform in app_platforms:
rv.append(FacetLink(platform.name, {'platform': platform.shortname},
platform == selected))
return rv
def tag_sidebar(request, form_data, aggregations):
qtag = form_data.get('tag')
tags = [facet['key'] for facet in aggregations['tags']]
rv = [FacetLink(ugettext(u'All Tags'), {'tag': None}, not qtag)]
rv += [FacetLink(tag, {'tag': tag}, tag == qtag) for tag in tags]
if qtag and qtag not in tags:
rv += [FacetLink(qtag, {'tag': qtag}, True)]
return rv
def fix_search_query(query, extra_params=None):
rv = {force_bytes(k): v for k, v in query.items()}
changed = False
# Change old keys to new names.
keys = {
'lver': 'appver',
'pid': 'platform',
'type': 'atype',
}
for old, new in keys.items():
if old in query:
rv[new] = rv.pop(old)
changed = True
# Change old parameter values to new values.
params = {
'sort': {
'newest': 'updated',
'popularity': 'downloads',
'weeklydownloads': 'users',
'averagerating': 'rating',
'sortby': 'sort',
},
'platform': {
str(p.id): p.shortname
for p in amo.PLATFORMS.values()
},
'atype': {k: str(v) for k, v in amo.ADDON_SEARCH_SLUGS.items()},
}
if extra_params:
params.update(extra_params)
for key, fixes in params.items():
if key in rv and rv[key] in fixes:
rv[key] = fixes[rv[key]]
changed = True
return rv if changed else query
def split_choices(choices, split):
"""Split a list of [(key, title)] pairs after key == split."""
index = [idx for idx, (key, title) in enumerate(choices)
if key == split]
if index:
index = index[0] + 1
return choices[:index], choices[index:]
else:
return choices, []
|
py | 1a3b13f8e2209d3f311b42f42099a2a09b4440ca | import gspread
from oauth2client.service_account import ServiceAccountCredentials
class GoogleSheetMGR(object):
def __init__(self, key, sheet=None, json_file=None):
self.key = key
self.sheet = sheet
if json_file:
self.json_file = json_file
else:
self.json_file = "/root/gspread2.json"
self._authorize()
self.keys = self.get_keys()
def _authorize(self):
"""
See: http://gspread.readthedocs.io/en/latest/oauth2.html
"""
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name(self.json_file, scope)
gc = gspread.authorize(credentials)
self.Worksheet = gc.open_by_key(self.key)
if self.sheet:
self.ws_obj = self.Worksheet.get_worksheet(self.sheet)
else:
self.ws_obj = self.Worksheet.sheet1
def get_all_values(self):
return self._get_all_values()
def _worksheet_action(self, action, *args):
"""
Wrap gspread worksheet actions to make sure no authorization issue
"""
retry = 0
max_retry = 5
while True:
try:
func = getattr(self.ws_obj, action)
return func(*args)
except Exception as details:
self._authorize()
retry += 1
if retry > max_retry:
raise details
def _get_all_values(self):
"""
Get all values from worksheet
"""
return self._worksheet_action('get_all_values')
def _update_cell(self, *args):
"""
Update a cell in spreadsheet
"""
return self._worksheet_action('update_cell', *args)
def get_keys(self):
"""
1st row is the key
"""
return self._get_all_values()[0]
def rework_sheet(self, data):
old_data = self.get_all_values()
for i_row, row_data in enumerate(old_data):
for i_cell, cell_data in enumerate(row_data):
new_val = data[i_row][i_cell]
if new_val != cell_data:
self._update_cell(i_row, i_cell, new_val)
def add_new_row(self, row_data, row=None):
if row:
new_row = row
else:
new_row = len(self._get_all_values()) + 1
for index, cell_data in enumerate(row_data):
self._update_cell(new_row, index + 1, cell_data)
def add_new_row_by_dict(self, data_dict, row=None):
if row:
new_row = row
else:
new_row = len(self._get_all_values()) + 1
for index, key in enumerate(self.keys):
if key in data_dict.keys():
self._update_cell(new_row, index + 1, data_dict[key])
def search_update_by_dict(self, search_dict, data_dict):
table = self._get_all_values()
index_dict = {}
index_dict2 = {}
for index, key in enumerate(self.keys):
if key in search_dict.keys():
index_dict[index] = search_dict[key]
if key in data_dict.keys():
index_dict2[index] = data_dict[key]
for row, i in enumerate(table):
for index, val in index_dict.items():
if i[index] != str(val):
found = False
break
else:
found = True
if not found:
continue
for key, val in index_dict2.items():
self._update_cell(row + 1, key + 1, val)
def search_info_by_dict(self, search_dict, table=None):
# TODO: merge code with search_update_by_dict
if not table:
table = self._get_all_values()
index_dict = {}
for index, key in enumerate(self.keys):
if key in search_dict.keys():
index_dict[index] = search_dict[key]
ret = []
for row, i in enumerate(table):
for index, val in index_dict.items():
if i[index] != str(val):
found = False
break
else:
found = True
if not found:
continue
tmp = {}
for index, key in enumerate(self.keys):
tmp[key] = i[index]
ret.append(tmp)
return ret
|
py | 1a3b153021d35b47204b80dbafc45d0410f39bc3 | """
Train deterministic HM-DenseED
"""
from time import time
import torch
import os
import numpy as np
import scipy.io as io
import sys
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from args import args, device
from models.model import DenseED
from utils.train_data_upload import train_load_data
from utils.test_data_upload import test_load_data
from models.bdsmm import bdsmm
from plot.velocity_plot import velocity_post_process
from plot.plot_PDF import mcs_test
from plot.basis_plot import basis_plot
import matplotlib.pyplot as plt
import scipy.io as io
import matplotlib.ticker as ticker
plt.switch_backend('agg')
import warnings
warnings.filterwarnings("ignore")
torch.set_default_tensor_type('torch.FloatTensor')
# initialize DenseED model
model = DenseED(in_channels=1, out_channels=1,
blocks=args.blocks,
growth_rate=args.growth_rate,
init_features=args.init_features,
drop_rate=args.drop_rate,
bn_size=args.bn_size,
bottleneck=args.bottleneck,
out_activation='Sigmoid').to(device)
print(model)
n_out_pixels_train = args.ntrain * args.imsize
n_out_pixels_test = args.ntest * args.imsize
dir = './plot'
# load data
train_loader = train_load_data()
test_loader = test_load_data()
print('...................Loaded data!...........................')
optimizer = optim.Adam(model.parameters(), lr=args.lr,weight_decay=args.weight_decay)
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10,
verbose=True, threshold=0.01, threshold_mode='rel',
cooldown=0, min_lr=0, eps=1e-8)
#train
def train(epoch):
model.train()
mse = 0.
mse_total = 0.
#===================================================================================
for batch_idx, (input,basis_patch,A_matrix, B_matrix,target_P, q_matrix) in enumerate(train_loader):
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input.float(),basis_patch.float(),A_matrix.float(),B_matrix.float(), target_P.float(), q_matrix.float()
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input_rr.to(device),output_basis.to(device),A1_transformed1.to(device),B1_transformed.to(device), target_pressure.to(device), q1_transformed.to(device)
#================================================================================
output_basis = output_basis.view(144,1,15,15)
input_rr = input_rr.view(144,1,15,15)
#load the A matrix from sparse to dense
A_app = []
for i in range(1):
A_torch = A1_transformed1[i,:,:]
A_torch1 = A_torch[:,0:2]
A_torch2 = A_torch[:,2]
A_torch1 = A_torch1.type(torch.LongTensor).to(device)
A_torch_final = torch.sparse.FloatTensor(A_torch1.t(), A_torch2, torch.Size([16384,16384]))
A_app.append(A_torch_final)
A1_transformed = torch.stack(A_app,dim=0)
#Obtain the indices for the non-interior support region
C = io.loadmat(dir+'/matlab_index_save_1.mat')
C = C['basis_save']
C = np.squeeze(C)
X = np.empty((C.shape[0], C[0].shape[0], C[0].shape[1]))
for i in range(X.shape[0]):
X[i] = C[i]
# -1 because of matlab and python
X1 = X.reshape(144,225)-1
#==============
X2 = np.zeros((144,225))
for i in range(144):
var2 = np.zeros((15,15))
ele = X1[i,0]
for varu in range(15):
var1 = ele+128*(varu)
for vm in range(15):
var2[varu,vm] = var1+vm
var3 = var2.reshape(1,225)
X2[i,:] = var3
X2 = torch.Tensor(X2)
#================================================================================
model.zero_grad()
#obtain the output from the DenseED model
output = model(input_rr)
#================================================================================
output = output.view(1,144,225)
#============================================================================
for RRRR in range (1):
output_RR = output[RRRR,:,:]
output_RR = output_RR.reshape(144,225)
B1_p_out = B1_transformed[RRRR,:,:].reshape(16384, 256)
jjj=0
ss = 0
for ii in range(12):
jjj = 35+16*ii
for k in range (jjj,jjj+12):
ss =ss+1
s = ss-1
basis_temp = X2[s,:]
tem = B1_p_out[:,k-1]
basis_temp = basis_temp.type(torch.LongTensor)
tem = tem.type(torch.cuda.FloatTensor)
temp_variable = output_RR[143-s,:]/torch.max(output_RR[143-s,:])
tem[basis_temp] = temp_variable
B1_p_out[:,k-1] = tem
B1_transformed[RRRR,:,:] = B1_p_out
#====================================================
temp11 = []
for kkk in range(1):
B1_transformed_temp = B1_transformed[kkk,:,:]
B1_transformed1 = torch.transpose(B1_transformed_temp,0,1)
dim = torch.sum(B1_transformed1,dim=0)
B1_transformed2 = torch.div(B1_transformed1,dim)
B1_transformed22 = torch.transpose(B1_transformed2,0,1)
temp11.append(B1_transformed22)
B1_transformed = temp11
B1_transformed = torch.stack(temp11,dim=0)
#============================================
R1_transformed = torch.transpose(B1_transformed,1,2)
A1_transformed = torch.transpose(A1_transformed,1,2)
R1_transformed = torch.transpose(R1_transformed,1,2)
A_c_transformed = torch.matmul(torch.transpose(bdsmm(A1_transformed,R1_transformed),1,2),B1_transformed)
R1_transformed = torch.transpose(R1_transformed,1,2)
temp1_transformed = torch.matmul(R1_transformed,q1_transformed)
temp2_transformed,LU = torch.solve(temp1_transformed,A_c_transformed)
temp3_transformed = torch.matmul(B1_transformed,temp2_transformed)
predict_pressure = temp3_transformed
target_pressure = target_pressure.view(1,16384)
predict_pressure = predict_pressure.view(1,16384)
target_pressure = target_pressure.type(torch.cuda.FloatTensor)
predict_pressure = predict_pressure.type(torch.cuda.FloatTensor)
loss2 = F.mse_loss(predict_pressure,target_pressure, size_average=False)
loss2.backward()
optimizer.step()
mse += loss2.item()
rmse = np.sqrt(mse / n_out_pixels_train)
scheduler.step(rmse)
mse_total += mse
return mse_total
#test
def test(epoch):
model.eval()
mse = 0.
mse_total = 0.
final_target = []
final_predict = []
for batch_idx, (input,basis_patch,A_matrix, B_matrix,target_P, q_matrix, T_val, ft_val) in enumerate(test_loader):
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input.float(),basis_patch.float(),A_matrix.float(),B_matrix.float(), target_P.float(), q_matrix.float()
input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed \
= input_rr.to(device),output_basis.to(device),A1_transformed1.to(device),B1_transformed.to(device), target_pressure.to(device), q1_transformed.to(device)
#================================================================================
output_basis = output_basis.view(144*64,1,15,15)
input_rr = input_rr.view(144*64,1,15,15)
A_app = []
for i in range(64):
A_torch = A1_transformed1[i,:,:]
A_torch1 = A_torch[:,0:2]
A_torch2 = A_torch[:,2]
A_torch1 = A_torch1.type(torch.LongTensor).to(device)
A_torch_final = torch.sparse.FloatTensor(A_torch1.t(), A_torch2, torch.Size([16384,16384]))
A_app.append(A_torch_final)
A1_transformed = torch.stack(A_app,dim=0).to(device)
C = io.loadmat(dir+'/matlab_index_save_1.mat')
C = C['basis_save']
C = np.squeeze(C)
X = np.empty((C.shape[0], C[0].shape[0], C[0].shape[1]))
for i in range(X.shape[0]):
X[i] = C[i]
# -1 because of matlab and python
X1 = X.reshape(144,225)-1
#==============
X2 = np.zeros((144,225))
for i in range(144):
var2 = np.zeros((15,15))
ele = X1[i,0]
for varu in range(15):
var1 = ele+128*(varu)
for vm in range(15):
var2[varu,vm] = var1+vm
var3 = var2.reshape(1,225)
X2[i,:] = var3
X2 = torch.Tensor(X2)
#================================================================================
with torch.no_grad():
output = model(input_rr)
#================================================================================
output = output.view(64,144,225)
output_basis = output_basis.view(64,144,225)
#============================================================================
for RRRR in range (64):
output_RR = output[RRRR,:,:].reshape(144,225)
B1_p_out = B1_transformed[RRRR,:,:].reshape(16384, 256)
jjj=0
ss = 0
for ii in range(12):
jjj = 35+16*ii
for k in range (jjj,jjj+12):
ss =ss+1
s = ss-1
basis_temp = X2[s,:]
tem = B1_p_out[:,k-1]
basis_temp = basis_temp.type(torch.LongTensor)
tem = tem.type(torch.cuda.FloatTensor)
temp_variable = output_RR[143-s,:]/torch.max(output_RR[143-s,:])
tem[basis_temp] = temp_variable
B1_p_out[:,k-1] = tem
B1_transformed[RRRR,:,:] = B1_p_out
#====================================================
temp11 = []
for kkk in range(64):
B1_transformed_temp = B1_transformed[kkk,:,:]
B1_transformed1 = torch.transpose(B1_transformed_temp,0,1)
dim = torch.sum(B1_transformed1,dim=0)
B1_transformed2 = torch.div(B1_transformed1,dim)
B1_transformed22 = torch.transpose(B1_transformed2,0,1)
temp11.append(B1_transformed22)
B1_transformed = temp11
B1_transformed = torch.stack(temp11,dim=0).to(device)
temp_save = B1_transformed.cpu().detach().numpy()
#============================================
R1_transformed = torch.transpose(B1_transformed,1,2)
A1_transformed = torch.transpose(A1_transformed,1,2)
R1_transformed = torch.transpose(R1_transformed,1,2)
A_c_transformed = torch.matmul(torch.transpose(bdsmm(A1_transformed,R1_transformed),1,2),B1_transformed)
R1_transformed = torch.transpose(R1_transformed,1,2)
temp1_transformed = torch.matmul(R1_transformed,q1_transformed)
temp2_transformed,LU = torch.solve(temp1_transformed,A_c_transformed)
temp3_transformed = torch.matmul(B1_transformed,temp2_transformed)
predict_pressure = temp3_transformed.view(64,16384)
target_pressure = target_pressure.view(64,16384).type(torch.cuda.FloatTensor)
predict_pressure = predict_pressure.type(torch.cuda.FloatTensor)
loss2 = F.mse_loss(predict_pressure,target_pressure, size_average=False)
predict_press = predict_pressure.cpu().detach().numpy()
target_press = target_pressure
target_press = target_press.cpu().detach().numpy()
if epoch % args.epochs == 0:
if batch_idx == 0:
interior_basis = output_basis.cpu().detach().numpy()
io.savemat('./result_data/test_interior_basis_%d.mat'%epoch, dict([('interior_basis',np.array(interior_basis))]))
io.savemat('./result_data/test_prolongation_%d.mat'%epoch, dict([('prolongation_operator',np.array(temp_save))]))
if args.kle == 100:
index_val = 28
elif args.kle == 1000:
index_val = 23
elif args.kle == 16384:
index_val = 28
elif args.data == 'channel':
index_val = 7
velocity_x_tar, velocity_y_tar, velocity_x_pred, velocity_y_pred = \
velocity_post_process(target_press[index_val,:], predict_press[index_val,:],T_val[index_val,:],ft_val[index_val,:],epoch,index_val)
mse += loss2.item()
final_target.append(target_press)
final_predict.append(predict_press)
final_target = np.array(final_target)
final_predict = np.array(final_predict)
output_new = output.view(64,144,15,15)
predict_new = output_basis.view(64,144,15,15)
output_new = output_new.cpu().detach().numpy()
predict_new = predict_new.cpu().detach().numpy()
mse_total += mse
return mse_total, final_target, final_predict
#plot rmse
def train_test_error(r2_train,r2_test,epoch):
plt.figure()
plt.plot(r2_train, label="Train: {:.3f}".format(np.mean(r2_train[-5:])))
plt.plot(r2_test, label="Test: {:.3f}".format(np.mean(r2_test[-5:])))
plt.xlabel('Epoch')
plt.ylabel(r'RMSE')
plt.legend(loc='lower right')
plt.savefig("./results/rmse.pdf", dpi=600)
plt.close()
np.savetxt("./results/rmse_train.txt", r2_train)
np.savetxt("./results/rmse_test.txt", r2_test)
#==========================================================
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
#==========================================================
#main
def main():
mkdir('results')
mkdir('result_data')
print('Start training and testing........................................................')
loss_train_all = []
loss_test_all = []
for epoch in range(1, args.epochs + 1):
print ('epoch number .......................................',epoch)
mse_tot = train(epoch)
with torch.no_grad():
mse_tot_test, final_target, final_predict = test(epoch)
rmse = np.sqrt(mse_tot / n_out_pixels_train)
rmse1 = np.sqrt(mse_tot_test / n_out_pixels_test)
loss_train_all.append(rmse)
loss_test_all.append(rmse1)
loss_train_all = np.array(loss_train_all)
loss_test_all = np.array(loss_test_all)
print('.............training and testing done....................')
print('\n')
print('saving the training error and testing error')
io.savemat('./result_data/training_loss.mat', dict([('training_loss',np.array(loss_train_all))]))
io.savemat('./result_data/test_loss.mat', dict([('testing_loss',np.array(loss_test_all))]))
print('.........................plotting results.........................................')
train_test_error(loss_train_all,loss_test_all, 1)
print('.........................saving model..............................................')
torch.save(model, 'KLE_100.pt')
print('.............................plotting basis.........................................')
basis_plot()
"""clean up gpu memory"""
torch.cuda.empty_cache()
print('.............................plotting PDF............................................')
mcs_test(model)
if __name__ == '__main__':
main()
|
py | 1a3b1541b0c4138c0d2e66cc656181453cf97ec0 | from __future__ import print_function
import torch
import os
import shutil
from collections import defaultdict
import numpy as np
def getNeigh(node_num, feature_map, knn):
similarity = np.dot(feature_map, feature_map.T)
sort_id = np.argsort(-similarity, axis=1)
adj_sets = defaultdict(set)
for n in range(node_num):
for k in range(1, knn+1):
adj_sets[n].add(sort_id[n, k])
return adj_sets
def collectGraphTrain(node_num, class_num, feat_dim = 2048, knn = 10, suffix = '_gem.npy'):
'''
(training dataset)
collect info. about graph including: node, label, feature, neighborhood(adjacent) relationship.
neighborhood(adjacent) relationship are constructed based on similarity between features.
'''
print('node_num:', node_num, '\nclass_num:', class_num)
feature_map = np.load('train_feature_map/feature_map' + suffix)
assert node_num == feature_map.shape[0], 'node_num does not match feature shape.'
assert feat_dim == feature_map.shape[1], 'feat_dim does not match feature shape.'
label = np.load('train_feature_map/label' + suffix)
# adj_sets = getNeigh(node_num, feature_map, knn)
neighs = np.load('train_feature_map/neighs' + suffix)
adj_sets = defaultdict(set)
for n in range(node_num):
adj_sets[n] = set(neighs[n, 1:knn+1])
return torch.from_numpy(label), torch.from_numpy(feature_map).float(), adj_sets
def collectGraphTest(feature_path, node_num, feat_dim = 2048, knn = 10, suffix = '_gem.npy'):
print("node num.:", node_num)
feature_map = np.load(os.path.join(feature_path, 'feature_map' + suffix))
assert node_num == feature_map.shape[0], 'node_num does not match feature shape.'
assert feat_dim == feature_map.shape[1], 'feat_dim does not match feature shape.'
neighs = np.load(os.path.join(feature_path, 'neighs' + suffix))
adj_sets = defaultdict(set)
for n in range(node_num):
adj_sets[n] = set(neighs[n, 1:knn+1])
query_feature = np.load(os.path.join(feature_path, 'query' + suffix))
return torch.from_numpy(feature_map).float(), adj_sets, torch.from_numpy(query_feature).float() |
py | 1a3b1563ced00155568fb74bff62dc210df4bdbb | # /* ******************************************************************************
# * Copyright (c) 2021 Deeplearning4j Contributors
# *
# * This program and the accompanying materials are made available under the
# * terms of the Apache License, Version 2.0 which is available at
# * https://www.apache.org/licenses/LICENSE-2.0.
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# * License for the specific language governing permissions and limitations
# * under the License.
# *
# * SPDX-License-Identifier: Apache-2.0
# ******************************************************************************/
################################################################################
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
import pytest
import jumpy as jp
import numpy as np
from numpy.testing import assert_allclose
def _test_ufunc(op, shape1, shape2):
a_np = np.random.random(shape1)
b_np = np.random.random(shape2)
c_np = eval('a_np {} b_np'.format(op))
a_jp = jp.array(a_np)
b_jp = jp.array(b_np)
c_jp = eval('a_jp {} b_jp'.format(op))
c_jp = c_jp.numpy()
assert_allclose(c_jp, c_np)
def _test_ufunc_inplace(op, shape1, shape2):
a_np = np.random.random(shape1)
b_np = np.random.random(shape2)
a_np2 = a_np.copy()
exec('a_np {}= b_np'.format(op))
a_jp = jp.array(a_np2)
b_jp = jp.array(b_np)
exec('a_jp {}= b_jp'.format(op))
a_jp = a_jp.numpy()
assert_allclose(a_jp, a_np)
def test_broadcast():
shapes = [
[(2, 3), (3, )],
[(2, 3, 4), (3, 4)],
[(2, 3), (1, 1)],
[(2, 3), (1, 1, 1)]
]
ops = ['+', '-', '*', '/']
for op in ops:
for shape in shapes:
_test_ufunc(op, *shape)
_test_ufunc(op, *reversed(shape))
if len(shape[0]) > len(shape[1]):
_test_ufunc_inplace(op, *shape)
if __name__ == '__main__':
pytest.main([__file__])
|
py | 1a3b15bdd3832ede867f770081b658d7605f0bfd | from __future__ import division
from scipy.signal import convolve2d, correlate2d
import numpy as np
import numpy.random as random
import sys
def _random_weights(shape):
size = np.prod(shape)
#random.seed(42) # use only for debugging
return random.uniform(-np.sqrt(1/size), np.sqrt(1/size), shape)
class Layer:
def __init__(self, config):
assert(config["input_shape"] and config["l2_decay"] is not None and config["debug"] is not None)
self.input_shape = config["input_shape"]
self.l2_decay = config["l2_decay"]
self.debug = config["debug"]
def forward(self, buffer):
return buffer
def backward(self, input, buffer):
return buffer
def update(self, rate):
pass
def predict(self, input):
return self.forward(input)
def loss(self):
return 0
class InputLayer(Layer):
def __init__(self, config):
Layer.__init__(self, config)
self.output_shape = self.input_shape
class FullyConnectedLayer(Layer):
def __init__(self, config):
Layer.__init__(self, config)
assert(config["neurons"])
neurons = config["neurons"]
self.neurons = neurons
self.output_shape = (neurons, )
self.weights = np.c_[_random_weights((neurons, np.prod(self.input_shape))), np.zeros(neurons)]
def forward(self, input, weights=None):
weights = weights if weights is not None else self.weights # allow to overwrite weights for testing purposes
return np.dot(weights, np.append(input.reshape(-1), 1))
def backward(self, input, parent_gradient):
self.dweights = np.c_[np.tile(input.reshape(-1), (self.neurons, 1)), np.ones(self.neurons)]
if self.debug:
numerical = self.numerical_gradient(input, self.weights)
if not np.all(np.abs(numerical - self.dweights).reshape(-1) <= 0.00001):
print "Numerical Gradient:\n{}\nAnalytical Gradient:\n{}".format(numerical, self.dweights)
assert(False)
decay = self.l2_decay*self.weights
decay[:, -1] = 0 # don't apply decay to bias
self.dweights = self.dweights*parent_gradient[:, None] + decay # loss gradient wrt. weights
return parent_gradient.dot(self.weights)[:-1].reshape(self.input_shape) # loss gradient wrt. input
def update(self, rate):
self.weights = self.weights - self.dweights*rate
def loss(self):
return self.l2_decay*(np.square(self.weights.reshape(-1)[:-1])).sum()/2
def numerical_gradient(self, input, params):
eps = 0.000001
pert = params.copy()
res = np.zeros(shape=params.shape)
for index, x in np.ndenumerate(params):
neuron = index[0]
pert[index] = params[index] + eps
res[index] = (self.forward(input, pert)[neuron] - self.forward(input, params)[neuron])/eps
pert[index] = params[index]
return res
class ReLuLayer(Layer):
def __init__(self, config):
Layer.__init__(self, config)
self.output_shape = self.input_shape
def forward(self, buffer):
# See http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf
# about caveats of ReLu, i.e. "it could lead to cases where a unit never activates as a gradient-based
# optimization algorithm will not adjust the weights of a unit that never activates initially"
return np.where(buffer < 0, 0.01*buffer, buffer)
def backward(self, input, buffer):
return np.where(input < 0, 0.01, 1)*buffer
class DropoutLayer(Layer):
def __init__(self, config):
Layer.__init__(self, config)
self.output_shape = self.input_shape
self.prob = 0.5
def forward(self, input):
self.rnd = random.binomial(1, self.prob, input.size)
self.rnd = self.rnd.reshape(input.shape)
return input*self.rnd
def backward(self, input, parent_gradient):
return parent_gradient*self.rnd
def predict(self, input):
assert(not self.debug) # TODO: make it work in debug mode
return input*self.prob # approximates the geometric mean
class ConvolutionLayer(Layer):
def __init__(self, config):
Layer.__init__(self, config)
assert(config["filters"] > 0 and config["size"] > 0)
assert(len(self.input_shape) >= 2) # Assume 2D Matrices as input
size = config["size"]
self.n_filters = config["filters"]
self.n_input_maps = 1 if len(self.input_shape) == 2 else self.input_shape[0]
self.output_shape = (self.n_filters, self.input_shape[-2] - size + 1, self.input_shape[-1] - size + 1)
self.filter = _random_weights((self.n_filters, size, size))
def forward(self, imgs):
imgs = imgs.reshape(self.n_input_maps, imgs.shape[-2], imgs.shape[1])
return self._convolve(imgs, self.filter)
def _convolve(self, imgs, filters):
assert(imgs.ndim == 3 and filters.ndim == 3)
assert(imgs.shape[-2] >= filters.shape[-2] and imgs.shape[-1] >= filters.shape[-1])
assert(filters.shape[-2] == filters.shape[-1] and filters.shape[-1] % 2 != 0)
lx = filters.shape[-1]//2
rx = imgs.shape[-1] - lx - 1
ly = lx
ry = imgs.shape[-2] - ly - 1
#print "f " + str(filters.shape[0])
output = np.zeros((filters.shape[0], rx - lx + 1, ry - ly + 1))
for f in range(0, filters.shape[0]):
filter = filters[f]
filter_map = np.zeros((rx - lx + 1, ry - ly + 1))
for i in range(0, imgs.shape[0]):
img = imgs[i]
convolved = np.zeros((rx - lx + 1, ry - ly + 1))
#print "convolved shape " + str(convolved.shape)
#print "lx " + str(lx) + " rx " + str(rx) + " ly " + str(ly) + " ry " + str(ry)
for x in range(lx, rx + 1):
for y in range(ly, ry + 1):
subimg = img[y - ly:y + ly + 1:,x - lx:x + lx + 1]
convolved[y - ly, x - lx] = (subimg * filter).sum()
if self.debug:
lib_convolved = correlate2d(img, filter, "valid")
if not np.all(np.abs(convolved - lib_convolved) < 0.000001):
print "Convolved:\n{}\nLib Convolved:\n{}\nFilter:\n{}".format(convolved, lib_convolved, filter)
assert(False)
filter_map += convolved
output[f]=filter_map
return output
def backward(self, imgs, parents_gradient):
imgs = imgs.reshape(self.n_input_maps, imgs.shape[-2], imgs.shape[1])
input_gradient, dfilter = self._gradient(imgs, self.filter, parents_gradient)
self.dfilter = dfilter
return input_gradient
def _gradient(self, imgs, filters, parents_gradient):
assert(imgs.ndim == 3 and filters.ndim == 3)
assert(imgs.shape[-2] >= filters.shape[-2] and imgs.shape[-1] >= filters.shape[-1])
assert(filters.shape[-2] == filters.shape[-1] and filters.shape[-1] % 2 != 0)
lx = filters.shape[-1]//2
rx = imgs.shape[-1] - lx - 1
ly = lx
ry = imgs.shape[-2] - ly - 1
imgs_gradient = np.zeros(imgs.shape)
filters_gradient = np.zeros(filters.shape)
for f in range(0, filters.shape[0]):
filter = filters[f]
filter_gradient = filters_gradient[f]
parent_gradient = parents_gradient[f]
for i in range(0, imgs.shape[0]):
img = imgs[i]
img_gradient = imgs_gradient[i]
for x in range(lx, rx + 1):
for y in range(ly, ry + 1):
img_gradient[y - ly:y + ly + 1:,x - lx:x + lx + 1] += filter*parent_gradient[y - ly, x - lx]
filter_gradient += img[y - ly:y + ly + 1:,x - lx:x + lx + 1]*parent_gradient[y - ly, x - lx]
filter_gradient += self.l2_decay*filter
return (imgs_gradient, filters_gradient)
def update(self, rate):
self.filter = self.filter - self.dfilter*rate
def loss(self):
return self.l2_decay*(np.square(self.filter.reshape(-1))).sum()/2
class PoolingLayer(Layer):
def __init__(self, config):
Layer.__init__(self, config)
assert(config["size"] > 0)
assert(len(self.input_shape) == 3)
self.size = config["size"]
self.output_shape = (self.input_shape[0],
(self.input_shape[1] - self.size)//self.size + 1,
(self.input_shape[2] - self.size)//self.size + 1)
def forward(self, imgs):
assert(imgs.ndim == 3)
maps = np.zeros(self.output_shape)
for i in range(0, imgs.shape[0]):
img = imgs[i]
map = maps[i]
for x in range(0, self.output_shape[1]):
x_img = x*self.size
for y in range(0, self.output_shape[2]):
y_img = y*self.size
map[y][x] = img[y_img:y_img+self.size, x_img:x_img+self.size].max()
return maps
def backward(self, imgs, parents_gradient):
imgs_gradient = np.zeros(self.input_shape)
for i in range(0, imgs.shape[0]):
img = imgs[i]
img_gradient = imgs_gradient[i]
parent_gradient = parents_gradient[i]
for x in range(0, self.output_shape[1]):
x_img = x*self.size
for y in range(0, self.output_shape[2]):
y_img = y*self.size
sub = img[y_img:y_img+self.size, x_img:x_img+self.size]
sub_max_index = np.unravel_index(sub.argmax(), sub.shape)
max_index = np.add(sub_max_index, (y_img, x_img))
img_gradient[tuple(max_index)] = parent_gradient[y, x]
return imgs_gradient
class SquaredLossLayer(Layer):
def __init__(self, config):
Layer.__init__(self, config)
def forward(self, buffer):
return buffer
def backward(self, input, expected):
if np.isscalar(expected):
expected = np.array([expected])
assert(input.shape == expected.shape)
return input - expected
def loss(self, predicted, expected):
if np.isscalar(expected):
expected = np.array([expected])
assert(predicted.shape == expected.shape)
return np.square(predicted - expected).sum()*0.5
class SoftmaxLossLayer(Layer):
def __init__(self, config):
assert(config["categories"] > 0)
assert(config["categories"] == config["input_shape"][0] and len(config["input_shape"]) == 1)
Layer.__init__(self, config)
self.categories = config["categories"]
def forward(self, buffer):
max = np.max(buffer)
exp = np.exp(buffer - max) # numerically stable
total = exp.sum()
return exp/total
def backward(self, input, expected):
assert(expected.dtype.kind == 'i')
output = self.forward(input)
mask = np.zeros(shape=input.shape)
mask[expected] = 1
return output - mask
def loss(self, predicted, expected):
output = predicted[expected]
return -np.log(output) |
py | 1a3b176deb74adaf40a8baa9fadd26b37d7ee083 | """Constants for the FRITZ!Box Tools integration."""
DOMAIN = "fritz"
PLATFORMS = ["device_tracker"]
DATA_FRITZ = "fritz_data"
DEFAULT_DEVICE_NAME = "Unknown device"
DEFAULT_HOST = "192.168.178.1"
DEFAULT_PORT = 49000
DEFAULT_USERNAME = ""
ERROR_AUTH_INVALID = "invalid_auth"
ERROR_CONNECTION_ERROR = "connection_error"
ERROR_UNKNOWN = "unknown_error"
TRACKER_SCAN_INTERVAL = 30
|
py | 1a3b1771d6ce39030213a5dc17b1e26d3df02de6 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Convert layout related registration"""
from __future__ import absolute_import
from tvm.relay.op import op as reg
from ...op.strategy.generic import is_depthwise_conv2d
@reg.register_convert_op_layout("qnn.conv2d")
def convert_qnn_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for QNN conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
assert len(desired_layouts) == 2, "A desired layout is expected for both of qnn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs = dict(attrs)
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.qnn.op.conv2d(*inputs, **new_attrs)
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.qnn.op.conv2d(*inputs, **new_attrs)
if desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info = tinfos[0]
weight_info = tinfos[1]
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.qnn.op.conv2d(*inputs, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
@reg.register_convert_op_layout("qnn.conv2d_transpose")
def convert_qnn_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for QNN conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
assert (
len(desired_layouts) == 2
), "A desired layout is expected for both of qnn.conv2d_transpose's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs = dict(attrs)
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
if desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
|
py | 1a3b1795cbd3e44d831f279b8f05c709adce6948 | import lzma
import os
import subprocess
import re
import zipfile
from typing import Tuple
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp.test_utils import TestClient
from otupdate import buildroot, common
from otupdate import openembedded
from otupdate.common.update_actions import Partition
from otupdate.openembedded import PartitionManager
from tests.common.config import FakeRootPartElem
HERE = os.path.abspath(os.path.dirname(__file__))
one_up = os.path.abspath(os.path.join(__file__, "../../"))
@pytest.fixture(params=[openembedded, buildroot])
async def test_cli(
aiohttp_client, otupdate_config, request, version_file_path, mock_name_synchronizer
) -> Tuple[TestClient, str]:
"""
Build an app using dummy versions, then build a test client and return it
"""
cli_client_pkg = request.param
app = cli_client_pkg.get_app(
name_synchronizer=mock_name_synchronizer,
system_version_file=version_file_path,
config_file_override=otupdate_config,
boot_id_override="dummy-boot-id-abc123",
)
client = await aiohttp_client(app)
return client, cli_client_pkg.__name__
@pytest.fixture
def downloaded_update_file_consolidated(request, extracted_update_file_consolidated):
"""
Return the path to a zipped update file
To exclude files, mark with ``exclude_rootfs_ext4``,
``exclude_rootfs_ext4_hash``, ``exclude_rootfs_ext4_hash_sig``.
This uses :py:meth:`extracted_update_file` to generate the contents, so
marks that fixture understands can be used when requesting this fixture
Can also be used by tests that will upload it to a test server, since
when the test server boots its download path will be somewhere else
"""
zip_path_arr = []
list_of_update_files = [
(
"rootfs.xz",
"rootfs.xz.sha256",
"rootfs.xz.hash.sig",
"tmp_uncomp_xz_hash_path",
"ot3-system.zip",
),
(
"rootfs.ext4",
"rootfs.ext4.hash",
"rootfs.ext4.hash.sig",
"tmp_uncomp_xz_hash_path",
"ot2-system.zip",
),
]
for index, (rootfs, sha256, sig, xz_hash, pkg) in enumerate(list_of_update_files):
rootfs_path = os.path.join(extracted_update_file_consolidated[index], rootfs)
hash_path = os.path.join(extracted_update_file_consolidated[index], sha256)
sig_path = os.path.join(extracted_update_file_consolidated[index], sig)
xz_hash_path = os.path.join(extracted_update_file_consolidated[index], xz_hash)
zip_path = os.path.join(extracted_update_file_consolidated[index], pkg)
with zipfile.ZipFile(zip_path, "w") as zf:
if not request.node.get_closest_marker("exclude_rootfs_ext4"):
zf.write(rootfs_path, rootfs)
if not request.node.get_closest_marker("exclude_rootfs_ext4_hash"):
zf.write(hash_path, sha256)
if not request.node.get_closest_marker("exclude_rootfs_ext4_hash_sig"):
zf.write(sig_path, sig)
zf.write(xz_hash_path, xz_hash)
zip_path_arr.append(zip_path)
os.unlink(rootfs_path)
os.unlink(hash_path)
os.unlink(sig_path)
return zip_path_arr
def write_fake_rootfs(
rootfs_name: str, rootfs_path: str, rootfs_contents: bytes, uncomp_xz_path: str
) -> str:
if rootfs_name == "rootfs.xz":
with lzma.open(rootfs_path, "w") as f:
f.write(rootfs_contents)
with lzma.open(rootfs_path, "rb") as fsrc, open(uncomp_xz_path, "wb") as fdst:
while True:
chunk = fsrc.read(1024)
fdst.write(chunk)
if len(chunk) != 1024:
break
return uncomp_xz_path
else:
with open(rootfs_path, "wb") as rfs:
rfs.write(rootfs_contents)
return rootfs_path
def gen_hash_val_direct(rfs_path: str) -> str:
try:
shasum_out = subprocess.check_output(
[
"shasum",
"-a",
"256",
rfs_path,
]
)
return shasum_out
except (subprocess.CalledProcessError, FileNotFoundError):
pytest.skip("no shasum invokeable on command line")
@pytest.fixture
def extracted_update_file_consolidated(request, tmpdir):
"""
Return the path to a dir containing an unzipped update file.
To make a bad hash, mark with ``bad_hash``. To make a bad
signature, mark with ``bad_sig``.
"""
extracted_files_dir_path_arr = []
list_of_extracted_files = [
(
"rootfs.xz",
"rootfs.xz.sha256",
"rootfs.xz.hash.sig",
),
(
"rootfs.ext4",
"rootfs.ext4.hash",
"rootfs.ext4.hash.sig",
),
]
for (rootfs, sha256, sig) in list_of_extracted_files:
rootfs_path = os.path.join(tmpdir, rootfs)
hash_path = os.path.join(tmpdir, sha256)
uncomp_xz_hash_path = os.path.join(tmpdir, "tmp_uncomp_xz_hash_path")
sig_path = os.path.join(tmpdir, sig)
uncomp_xz_path = os.path.join(tmpdir, "tmp_uncomp")
rootfs_contents = os.urandom(100000)
write_fake_rootfs(rootfs, rootfs_path, rootfs_contents, uncomp_xz_path)
if request.node.get_closest_marker("bad_hash"):
hashval = b"0oas0ajcs0asd0asjc0ans0d9ajsd0ian0s9djas"
else:
hashval = re.match(
b"^([a-z0-9]+) ",
gen_hash_val_direct(rootfs_path),
).group(1)
hashval2 = re.match(
b"^([a-z0-9]+) ",
gen_hash_val_direct(uncomp_xz_path),
).group(1)
with open(hash_path, "wb") as rfsh:
rfsh.write(hashval)
with open(uncomp_xz_hash_path, "wb") as rfsh:
rfsh.write(hashval2)
if not request.node.get_closest_marker("bad_sig"):
try:
subprocess.check_output(["openssl", "version"])
except (subprocess.CalledProcessError, FileNotFoundError):
pytest.skip("requires openssl binary to be installed")
subprocess.check_call(
[
"openssl",
"dgst",
"-sha256",
"-sign",
os.path.join(one_up, "ot-update-server-unit-tests.key"),
"-out",
sig_path,
hash_path,
]
)
else:
with open(sig_path, "wb") as sigfile:
sigfile.write(os.urandom(256))
extracted_files_dir_path_arr.append(tmpdir)
return extracted_files_dir_path_arr
@pytest.fixture
def testing_partition(monkeypatch, tmpdir):
partfile = os.path.join(tmpdir, "fake-partition")
find_unused = mock.Mock()
monkeypatch.setattr(buildroot.update_actions, "_find_unused_partition", find_unused)
find_unused.return_value = FakeRootPartElem(
"TWO", common.update_actions.Partition(2, partfile)
)
return partfile
@pytest.fixture
def mock_partition_manager_valid_switch(tmpdir) -> MagicMock:
"""Mock Partition Manager."""
partfile = os.path.join(tmpdir, "fake-partition")
mock_part = MagicMock(spec=PartitionManager)
mock_part.find_unused_partition.return_value = Partition(2, partfile)
mock_part.switch_partition.return_value = Partition(2, partfile)
mock_part.resize_partition.return_value = True
mock_part.mount_fs.return_value = True
mock_part.umount_fs.return_value = True
mock_part.mountpoint_root.return_value = "/mnt"
return mock_part
|
py | 1a3b185faf4ccc3b0c94dcb491073bb939997ea3 | from collections import defaultdict
import logging
from . import base
class DummyBalancer(base.Balancer):
"""
Fake balancer class for local development.
"""
def __init__(self, config):
self.config = config
self.pools = defaultdict(set)
def add_nodes(self, pool, nodes):
self.pools[pool].update(nodes)
self.log_pool(pool)
def delete_nodes(self, pool, nodes):
self.pools[pool].difference_update(nodes)
self.log_pool(pool)
def get_nodes(self, pool):
return self.pools[pool]
def log_pool(self, pool):
msg = 'Dummy Pool "%s": %s' % (pool, list(self.pools[pool]))
logging.info(msg)
|
py | 1a3b18cbfa5728e33989b80f83cee6e787b5dc1a | import gym
import vision_arena
import time
import pybullet as p
import pybullet_data
import cv2
import os
if __name__=="__main__":
parent_path = os.path.dirname(os.getcwd())
os.chdir(parent_path)
env = gym.make("vision_arena-v0")
time.sleep(3)
img = env.camera_feed()
cv2.imshow("img", img)
cv2.waitKey(0)
time.sleep(100)
|
py | 1a3b18e8c3ba5f05b6cb1522ba17448ebb053924 | #!/usr/bin/env python3
# Copyright (c) 2020 GBCR Developers
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import GoldBCRTestFramework
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(GoldBCRTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
self.sync_blocks()
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received by test connection for all txs at
# feerate of .2 sat/byte
node1.settxfee(Decimal("0.00000200"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Set a filter of .15 sat/byte on test connection
self.nodes[0].p2p.send_and_ping(msg_feefilter(150))
# Test that txs are still being received by test connection (paying .15 sat/byte)
node1.settxfee(Decimal("0.00000150"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to .1 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.00000100"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert allInvsMatch(txids, self.nodes[0].p2p)
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
|
py | 1a3b1a04639e2cdbe95a506b8b9a9518de44e506 | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class QualityInspectionParameterGroup(Document):
pass
|
py | 1a3b1a5967c33393b53c9fdf0be3bfbf1e0aedea | #!/usr/bin/env python3
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
from typing import Tuple
import numpy as np
class FrameRecord:
"""
Store representation of a bounding box in some timeframe, in different coordinate systems.
This bounding box comes from a track that shares the same color.
"""
def __init__(
self,
bbox_city_fr: np.ndarray,
bbox_ego_frame: np.ndarray,
occlusion_val: int,
color: Tuple[float, float, float],
track_uuid: str,
obj_class_str: str,
) -> None:
"""Initialize FrameRecord.
Args:
bbox_city_fr: bounding box for city frame.
bbox_ego_frame: bounding box for ego frame.
occlusion_val: occlusion value.
color: tuple representing color. RGB values should be within [0,1] range.
track_uuid: track uuid
obj_class_str: object class string
"""
self.bbox_city_fr = bbox_city_fr
self.bbox_ego_frame = bbox_ego_frame
self.occlusion_val = occlusion_val
self.color = color
self.track_uuid = track_uuid
self.obj_class_str = obj_class_str
|
py | 1a3b1b3b68a8e9cd7531fe9c1ad145f668c8e9bc | #!/usr/bin/env python3
import serial, sys, json, logging
import paho.mqtt.client as mqtt
CONFIG_FILE='config.json'
try:
with open(CONFIG_FILE) as config_file:
config = json.load(config_file)
except:
print("Config file not present or invalid JSON!")
sys.exit(1)
header_retained = b'#R'
header_unretained = b'#U'
ser = serial.Serial()
ser.baudrate = config['baudrate']
ser.port = config['port']
ser.timeout = 5
logging.basicConfig(level=logging.DEBUG if config['debug'] else logging.WARNING, format="%(levelname)s: %(message)s")
log = logging.getLogger("")
try:
ser.open()
except:
log.error("Failed to open serial port {}!".format(config['port']))
import serial.tools.list_ports
ports = serial.tools.list_ports.comports()
print("Available serial ports:")
for port, desc, hwid in sorted(ports):
print("{}: {}".format(port, desc))
sys.exit(1)
def onConnect(client, userdata, flags, rc):
log.info("Connected to MQTT server")
def onDisconnect(client, userdata, rc):
log.info("Disconnected from MQTT server")
def onLog(client, userdata, level, buf):
if (level == mqtt.MQTT_LOG_INFO or level == mqtt.MQTT_LOG_NOTICE):
log.info(buf)
elif (level == mqtt.MQTT_LOG_WARNING or level == mqtt.MQTT_LOG_ERR):
log.warning(buf)
def post_mqtt(topic, message, retain = False):
(rc, mid) = mqttc.publish(topic, message, qos=0, retain=retain)
if (rc != mqtt.MQTT_ERR_SUCCESS):
log.warning("MQTT Publish unsuccessful!")
mqttc = mqtt.Client()
mqttc.on_connect = onConnect
mqttc.on_disconnect = onDisconnect
mqttc.on_log = onLog
try:
mqttc.connect(config['mqtt_server'], config['mqtt_port'], 60)
except Exception as e:
log.error("Can't connect to the MQTT broker! {}".format(e))
if ser.is_open:
ser.close()
sys.exit(1)
mqttc.loop_start()
while True:
try:
line = ser.readline().rstrip()
if line is not b'':
log.debug(line.decode("utf-8"))
if line.startswith(header_retained) or line.startswith(header_unretained):
topic = line[2:]
try:
(topic, message) = topic.split(b' ', 1)
except ValueError:
message = b''
log.info("Posting {} to topic {}".format(
message.decode("utf-8"), topic.decode("utf-8")))
post_mqtt(topic.decode('utf8'), message, line.startswith(header_retained))
except KeyboardInterrupt:
print('\n')
mqttc.disconnect()
if ser.is_open:
ser.close()
sys.exit(0)
except Exception as e:
log.error("{}".format(e))
if ser.is_open:
ser.close()
sys.exit(1)
|
py | 1a3b1b862c7ac98a9d8136533331458a6bbb3cf1 | from PIL import Image
def bytes_to_bin(data):
return "".join(f"{bin(i)[2:]:>08}" for i in data)
def hide_lsb_image(image, binary):
pixels = image.load()
for i in range(image.height):
for j in range(image.width):
r, g, b = pixels[j, i]
bit = int(binary[i % len(binary)])
r = (r & (~0x01)) | bit
g = (g & (~0x01)) | bit
b = (b & (~0x01)) | bit
pixels[j, i] = (r, g, b)
return pixels
data = b"donnuCTF{d52127b1b3f17805675280653e10fb66}"
image = Image.open("easy2.png")
hide_lsb_image(image, bytes_to_bin(data))
image.save("encoded.png") |
py | 1a3b1bd493c8242109a9be5f41fb26053ef23ce3 | import re
import numbers
from collections import namedtuple
from .shapes import *
LAYER_DESCRIPTORS = {
# Caffe Types
'AbsVal': shape_identity,
'Accuracy': shape_scalar,
'ArgMax': shape_not_implemented,
'BatchNorm': shape_identity,
'BNLL': shape_not_implemented,
'Concat': shape_concat,
'ContrastiveLoss': shape_scalar,
'Convolution': shape_convolution,
'Deconvolution': shape_not_implemented,
'Data': shape_data,
'Dropout': shape_identity,
'DummyData': shape_data,
'EuclideanLoss': shape_scalar,
'Eltwise': shape_identity,
'Exp': shape_identity,
'Flatten': flatten_shape,
'HDF5Data': shape_data,
'HDF5Output': shape_identity,
'HingeLoss': shape_scalar,
'Im2col': shape_not_implemented,
'ImageData': shape_data,
'InfogainLoss': shape_scalar,
'InnerProduct': shape_inner_product,
'Input': shape_data,
'LRN': shape_identity,
'MemoryData': shape_mem_data,
'MultinomialLogisticLoss': shape_scalar,
'MVN': shape_not_implemented,
'Pooling': shape_pool,
'Power': shape_identity,
'ReLU': shape_identity,
'Scale': shape_identity,
'Sigmoid': shape_identity,
'SigmoidCrossEntropyLoss': shape_scalar,
'Silence': shape_not_implemented,
'Softmax': shape_identity,
'SoftmaxWithLoss': shape_scalar,
'Split': shape_not_implemented,
'Slice': shape_not_implemented,
'TanH': shape_identity,
'WindowData': shape_not_implemented,
'Threshold': shape_identity,
'Reshape': reshape_shape
}
LAYER_TYPES = LAYER_DESCRIPTORS.keys()
LayerType = type('LayerType', (), {t: t for t in LAYER_TYPES})
class NodeKind(LayerType):
@staticmethod
def map_raw_kind(kind):
if kind in LAYER_TYPES:
return kind
return None
@staticmethod
def compute_output_shape(node):
try:
val = LAYER_DESCRIPTORS[node.kind](node)
return val
except NotImplementedError:
raise KaffeError('Output shape computation not implemented for type: %s' % node.kind)
class NodeDispatchError(KaffeError):
pass
class NodeDispatch(object):
@staticmethod
def get_handler_name(node_kind):
if len(node_kind) <= 4:
# A catch-all for things like ReLU and tanh
return node_kind.lower()
# Convert from CamelCase to under_scored
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', node_kind)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def get_handler(self, node_kind, prefix):
name = self.get_handler_name(node_kind)
name = '_'.join((prefix, name))
try:
return getattr(self, name)
except AttributeError:
raise NodeDispatchError('No handler found for node kind: %s (expected: %s)' %
(node_kind, name))
class LayerAdapter(object):
def __init__(self, layer, kind):
self.layer = layer
self.kind = kind
@property
def parameters(self):
name = NodeDispatch.get_handler_name(self.kind)
name = '_'.join((name, 'param'))
try:
return getattr(self.layer, name)
except AttributeError:
raise NodeDispatchError('Caffe parameters not found for layer kind: %s' % (self.kind))
@staticmethod
def get_kernel_value(scalar, repeated, idx, default=None):
if scalar:
return scalar
if repeated:
if isinstance(repeated, numbers.Number):
return repeated
if len(repeated) == 1:
# Same value applies to all spatial dimensions
return int(repeated[0])
assert idx < len(repeated)
# Extract the value for the given spatial dimension
return repeated[idx]
if default is None:
raise ValueError('Unable to determine kernel parameter!')
return default
@property
def kernel_parameters(self):
assert self.kind in (NodeKind.Convolution, NodeKind.Pooling)
params = self.parameters
k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0)
k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1)
s_h = self.get_kernel_value(params.stride_h, params.stride, 0, default=1)
s_w = self.get_kernel_value(params.stride_w, params.stride, 1, default=1)
p_h = self.get_kernel_value(params.pad_h, params.pad, 0, default=0)
p_w = self.get_kernel_value(params.pad_h, params.pad, 1, default=0)
return KernelParameters(k_h, k_w, s_h, s_w, p_h, p_w)
KernelParameters = namedtuple('KernelParameters', ['kernel_h', 'kernel_w', 'stride_h', 'stride_w',
'pad_h', 'pad_w'])
|
py | 1a3b1bfb163f439f9727c5e0c2925ebd12323bb2 | import os
import download
import settings
import translate
import utils
def create_profileicon_json(lang, path):
cdragon_profileicons = download.download_versioned_cdragon_profileicons_summary()
profileicon = {
"type": "profileicon",
"version": settings.patch['json'],
"data": {}
}
for x in cdragon_profileicons:
if "iconPath" not in x:
continue
icon_id = x["id"]
profileicon["data"][icon_id] = {
"id": icon_id,
"title": translate.t(lang, "summoner_icon_title_" + str(icon_id)),
"description": translate.t(lang, "summoner_icon_description_" + str(icon_id)),
"image": {
"full": str(icon_id) + ".png"
}
}
utils.save_json(profileicon, os.path.join(path, "profileicon.json"))
return profileicon
def add_sprite_info(lang, path):
"""
Adds Sprite Info to JSONs
"""
data = utils.load_json(os.path.join(path, "spriter_output.json"))
profileicons = utils.load_json(os.path.join(path, f"data/{lang}/profileicon.json"))
for profileicon in profileicons['data']:
try:
profileicons['data'][profileicon]['image'].update({
'sprite': data['result']['profileicon'][profileicon]['regular']['texture'] + ".png",
'group': "profileicon",
'x': data['result']['profileicon'][profileicon]['regular']['x'],
'y': data['result']['profileicon'][profileicon]['regular']['y'],
'w': data['result']['profileicon'][profileicon]['regular']['width'],
'h': data['result']['profileicon'][profileicon]['regular']['height'],
})
except KeyError:
print("Failed to add sprite of profileicon: " + profileicon)
utils.save_json(profileicons, os.path.join(path, f"data/{lang}/profileicon.json"))
|
py | 1a3b1ca99b6c577cdf7016dd3900285908dbde26 | # Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Author - Shivam Mishra <[email protected]>
from __future__ import unicode_literals
import frappe
from json import loads, dumps
from frappe import _, DoesNotExistError, ValidationError, _dict
from frappe.boot import get_allowed_pages, get_allowed_reports
from six import string_types
from functools import wraps
from frappe.cache_manager import (
build_domain_restriced_doctype_cache,
build_domain_restriced_page_cache,
build_table_count_cache
)
def handle_not_exist(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except DoesNotExistError:
if frappe.message_log:
frappe.message_log.pop()
return []
return wrapper
class Workspace:
def __init__(self, page_name, minimal=False):
self.page_name = page_name
self.extended_links = []
self.extended_charts = []
self.extended_shortcuts = []
self.user = frappe.get_user()
self.allowed_modules = self.get_cached('user_allowed_modules', self.get_allowed_modules)
self.doc = self.get_page_for_user()
if self.doc.module and self.doc.module not in self.allowed_modules:
raise frappe.PermissionError
self.can_read = self.get_cached('user_perm_can_read', self.get_can_read_items)
self.allowed_pages = get_allowed_pages(cache=True)
self.allowed_reports = get_allowed_reports(cache=True)
if not minimal:
self.onboarding_doc = self.get_onboarding_doc()
self.onboarding = None
self.table_counts = get_table_with_counts()
self.restricted_doctypes = frappe.cache().get_value("domain_restricted_doctypes") or build_domain_restriced_doctype_cache()
self.restricted_pages = frappe.cache().get_value("domain_restricted_pages") or build_domain_restriced_page_cache()
def is_page_allowed(self):
cards = self.doc.get_link_groups() + get_custom_reports_and_doctypes(self.doc.module) + self.extended_links
shortcuts = self.doc.shortcuts + self.extended_shortcuts
for section in cards:
links = loads(section.get('links')) if isinstance(section.get('links'), string_types) else section.get('links')
for item in links:
if self.is_item_allowed(item.get('link_to'), item.get('link_type')):
return True
def _in_active_domains(item):
if not item.restrict_to_domain:
return True
else:
return item.restrict_to_domain in frappe.get_active_domains()
for item in shortcuts:
if self.is_item_allowed(item.link_to, item.type) and _in_active_domains(item):
return True
return False
def get_cached(self, cache_key, fallback_fn):
_cache = frappe.cache()
value = _cache.get_value(cache_key, user=frappe.session.user)
if value:
return value
value = fallback_fn()
# Expire every six hour
_cache.set_value(cache_key, value, frappe.session.user, 21600)
return value
def get_can_read_items(self):
if not self.user.can_read:
self.user.build_permissions()
return self.user.can_read
def get_allowed_modules(self):
if not self.user.allow_modules:
self.user.build_permissions()
return self.user.allow_modules
def get_page_for_user(self):
filters = {
'extends': self.page_name,
'for_user': frappe.session.user
}
user_pages = frappe.get_all("Workspace", filters=filters, limit=1)
if user_pages:
return frappe.get_cached_doc("Workspace", user_pages[0])
filters = {
'extends_another_page': 1,
'extends': self.page_name,
'is_default': 1
}
default_page = frappe.get_all("Workspace", filters=filters, limit=1)
if default_page:
return frappe.get_cached_doc("Workspace", default_page[0])
self.get_pages_to_extend()
return frappe.get_cached_doc("Workspace", self.page_name)
def get_onboarding_doc(self):
# Check if onboarding is enabled
if not frappe.get_system_settings("enable_onboarding"):
return None
if not self.doc.onboarding:
return None
if frappe.db.get_value("Module Onboarding", self.doc.onboarding, "is_complete"):
return None
doc = frappe.get_doc("Module Onboarding", self.doc.onboarding)
# Check if user is allowed
allowed_roles = set(doc.get_allowed_roles())
user_roles = set(frappe.get_roles())
if not allowed_roles & user_roles:
return None
# Check if already complete
if doc.check_completion():
return None
return doc
def get_pages_to_extend(self):
pages = frappe.get_all("Workspace", filters={
"extends": self.page_name,
'restrict_to_domain': ['in', frappe.get_active_domains()],
'for_user': '',
'module': ['in', self.allowed_modules]
})
pages = [frappe.get_cached_doc("Workspace", page['name']) for page in pages]
for page in pages:
self.extended_links = self.extended_links + page.get_link_groups()
self.extended_charts = self.extended_charts + page.charts
self.extended_shortcuts = self.extended_shortcuts + page.shortcuts
def is_item_allowed(self, name, item_type):
if frappe.session.user == "Administrator":
return True
item_type = item_type.lower()
if item_type == "doctype":
return (name in self.can_read or [] and name in self.restricted_doctypes or [])
if item_type == "page":
return (name in self.allowed_pages and name in self.restricted_pages)
if item_type == "report":
return name in self.allowed_reports
if item_type == "help":
return True
if item_type == "dashboard":
return True
return False
def build_workspace(self):
self.cards = {
'label': _(self.doc.cards_label),
'items': self.get_links()
}
self.charts = {
'label': _(self.doc.charts_label),
'items': self.get_charts()
}
self.shortcuts = {
'label': _(self.doc.shortcuts_label),
'items': self.get_shortcuts()
}
if self.onboarding_doc:
self.onboarding = {
'label': _(self.onboarding_doc.title),
'subtitle': _(self.onboarding_doc.subtitle),
'success': _(self.onboarding_doc.success_message),
'docs_url': self.onboarding_doc.documentation_url,
'items': self.get_onboarding_steps()
}
def _doctype_contains_a_record(self, name):
exists = self.table_counts.get(name, False)
if not exists and frappe.db.exists(name):
if not frappe.db.get_value('DocType', name, 'issingle'):
exists = bool(frappe.db.get_all(name, limit=1))
else:
exists = True
self.table_counts[name] = exists
return exists
def _prepare_item(self, item):
if item.dependencies:
dependencies = [dep.strip() for dep in item.dependencies.split(",")]
incomplete_dependencies = [d for d in dependencies if not self._doctype_contains_a_record(d)]
if len(incomplete_dependencies):
item.incomplete_dependencies = incomplete_dependencies
else:
item.incomplete_dependencies = ""
if item.onboard:
# Mark Spotlights for initial
if item.get("type") == "doctype":
name = item.get("name")
count = self._doctype_contains_a_record(name)
item["count"] = count
# Translate label
item["label"] = _(item.label) if item.label else _(item.name)
return item
@handle_not_exist
def get_links(self):
cards = self.doc.get_link_groups()
if not self.doc.hide_custom:
cards = cards + get_custom_reports_and_doctypes(self.doc.module)
if len(self.extended_links):
cards = merge_cards_based_on_label(cards + self.extended_links)
default_country = frappe.db.get_default("country")
new_data = []
for card in cards:
new_items = []
card = _dict(card)
links = card.get('links', [])
for item in links:
item = _dict(item)
# Condition: based on country
if item.country and item.country != default_country:
continue
# Check if user is allowed to view
if self.is_item_allowed(item.link_to, item.link_type):
prepared_item = self._prepare_item(item)
new_items.append(prepared_item)
if new_items:
if isinstance(card, _dict):
new_card = card.copy()
else:
new_card = card.as_dict().copy()
new_card["links"] = new_items
new_card["label"] = _(new_card["label"])
new_data.append(new_card)
return new_data
@handle_not_exist
def get_charts(self):
all_charts = []
if frappe.has_permission("Dashboard Chart", throw=False):
charts = self.doc.charts
if len(self.extended_charts):
charts = charts + self.extended_charts
for chart in charts:
if frappe.has_permission('Dashboard Chart', doc=chart.chart_name):
# Translate label
chart.label = _(chart.label) if chart.label else _(chart.chart_name)
all_charts.append(chart)
return all_charts
@handle_not_exist
def get_shortcuts(self):
def _in_active_domains(item):
if not item.restrict_to_domain:
return True
else:
return item.restrict_to_domain in frappe.get_active_domains()
items = []
shortcuts = self.doc.shortcuts
if len(self.extended_shortcuts):
shortcuts = shortcuts + self.extended_shortcuts
for item in shortcuts:
new_item = item.as_dict().copy()
if self.is_item_allowed(item.link_to, item.type) and _in_active_domains(item):
if item.type == "Report":
report = self.allowed_reports.get(item.link_to, {})
if report.get("report_type") in ["Query Report", "Script Report", "Custom Report"]:
new_item['is_query_report'] = 1
else:
new_item['ref_doctype'] = report.get('ref_doctype')
# Translate label
new_item["label"] = _(item.label) if item.label else _(item.link_to)
items.append(new_item)
return items
@handle_not_exist
def get_onboarding_steps(self):
steps = []
for doc in self.onboarding_doc.get_steps():
step = doc.as_dict().copy()
step.label = _(doc.title)
if step.action == "Create Entry":
step.is_submittable = frappe.db.get_value("DocType", step.reference_document, 'is_submittable', cache=True)
steps.append(step)
return steps
@frappe.whitelist()
@frappe.read_only()
def get_desktop_page(page):
"""Applies permissions, customizations and returns the configruration for a page
on desk.
Args:
page (string): page name
Returns:
dict: dictionary of cards, charts and shortcuts to be displayed on website
"""
try:
wspace = Workspace(page)
wspace.build_workspace()
return {
'charts': wspace.charts,
'shortcuts': wspace.shortcuts,
'cards': wspace.cards,
'onboarding': wspace.onboarding,
'allow_customization': not wspace.doc.disable_user_customization
}
except DoesNotExistError:
return {}
@frappe.whitelist()
def get_desk_sidebar_items():
"""Get list of sidebar items for desk"""
# don't get domain restricted pages
blocked_modules = frappe.get_doc('User', frappe.session.user).get_blocked_modules()
filters = {
'restrict_to_domain': ['in', frappe.get_active_domains()],
'extends_another_page': 0,
'for_user': '',
'module': ['not in', blocked_modules]
}
if not frappe.local.conf.developer_mode:
filters['developer_mode_only'] = '0'
# pages sorted based on pinned to top and then by name
order_by = "pin_to_top desc, pin_to_bottom asc, name asc"
all_pages = frappe.get_all("Workspace", fields=["name", "category", "icon", "module"],
filters=filters, order_by=order_by, ignore_permissions=True)
pages = []
# Filter Page based on Permission
for page in all_pages:
try:
wspace = Workspace(page.get('name'), True)
if wspace.is_page_allowed():
pages.append(page)
page['label'] = _(page.get('name'))
except frappe.PermissionError:
pass
return pages
def get_table_with_counts():
counts = frappe.cache().get_value("information_schema:counts")
if not counts:
counts = build_table_count_cache()
return counts
def get_custom_reports_and_doctypes(module):
return [
_dict({
"label": _("Custom Documents"),
"links": get_custom_doctype_list(module)
}),
_dict({
"label": _("Custom Reports"),
"links": get_custom_report_list(module)
}),
]
def get_custom_doctype_list(module):
doctypes = frappe.get_all("DocType", fields=["name"], filters={"custom": 1, "istable": 0, "module": module}, order_by="name")
out = []
for d in doctypes:
out.append({
"type": "Link",
"link_type": "doctype",
"link_to": d.name,
"label": _(d.name)
})
return out
def get_custom_report_list(module):
"""Returns list on new style reports for modules."""
reports = frappe.get_all("Report", fields=["name", "ref_doctype", "report_type"], filters=
{"is_standard": "No", "disabled": 0, "module": module},
order_by="name")
out = []
for r in reports:
out.append({
"type": "Link",
"link_type": "report",
"doctype": r.ref_doctype,
"is_query_report": 1 if r.report_type in ("Query Report", "Script Report", "Custom Report") else 0,
"label": _(r.name),
"link_to": r.name,
})
return out
def get_custom_workspace_for_user(page):
"""Get custom page from workspace if exists or create one
Args:
page (stirng): Page name
Returns:
Object: Document object
"""
filters = {
'extends': page,
'for_user': frappe.session.user
}
pages = frappe.get_list("Workspace", filters=filters)
if pages:
return frappe.get_doc("Workspace", pages[0])
doc = frappe.new_doc("Workspace")
doc.extends = page
doc.for_user = frappe.session.user
return doc
@frappe.whitelist()
def save_customization(page, config):
"""Save customizations as a separate doctype in Workspace per user
Args:
page (string): Name of the page to be edited
config (dict): Dictionary config of al widgets
Returns:
Boolean: Customization saving status
"""
original_page = frappe.get_doc("Workspace", page)
page_doc = get_custom_workspace_for_user(page)
# Update field values
page_doc.update({
"icon": original_page.icon,
"charts_label": original_page.charts_label,
"cards_label": original_page.cards_label,
"shortcuts_label": original_page.shortcuts_label,
"module": original_page.module,
"onboarding": original_page.onboarding,
"developer_mode_only": original_page.developer_mode_only,
"category": original_page.category
})
config = _dict(loads(config))
if config.charts:
page_doc.charts = prepare_widget(config.charts, "Workspace Chart", "charts")
if config.shortcuts:
page_doc.shortcuts = prepare_widget(config.shortcuts, "Workspace Shortcut", "shortcuts")
if config.cards:
page_doc.build_links_table_from_cards(config.cards)
# Set label
page_doc.label = page + '-' + frappe.session.user
try:
if page_doc.is_new():
page_doc.insert(ignore_permissions=True)
else:
page_doc.save(ignore_permissions=True)
except (ValidationError, TypeError) as e:
# Create a json string to log
json_config = dumps(config, sort_keys=True, indent=4)
# Error log body
log = \
"""
page: {0}
config: {1}
exception: {2}
""".format(page, json_config, e)
frappe.log_error(log, _("Could not save customization"))
return False
return True
def prepare_widget(config, doctype, parentfield):
"""Create widget child table entries with parent details
Args:
config (dict): Dictionary containing widget config
doctype (string): Doctype name of the child table
parentfield (string): Parent field for the child table
Returns:
TYPE: List of Document objects
"""
if not config:
return []
order = config.get('order')
widgets = config.get('widgets')
prepare_widget_list = []
for idx, name in enumerate(order):
wid_config = widgets[name].copy()
# Some cleanup
wid_config.pop("name", None)
# New Doc
doc = frappe.new_doc(doctype)
doc.update(wid_config)
# Manually Set IDX
doc.idx = idx + 1
# Set Parent Field
doc.parentfield = parentfield
prepare_widget_list.append(doc)
return prepare_widget_list
@frappe.whitelist()
def update_onboarding_step(name, field, value):
"""Update status of onboaridng step
Args:
name (string): Name of the doc
field (string): field to be updated
value: Value to be updated
"""
frappe.db.set_value("Onboarding Step", name, field, value)
@frappe.whitelist()
def reset_customization(page):
"""Reset workspace customizations for a user
Args:
page (string): Name of the page to be reset
"""
page_doc = get_custom_workspace_for_user(page)
page_doc.delete()
def merge_cards_based_on_label(cards):
"""Merge cards with common label."""
cards_dict = {}
for card in cards:
label = card.get('label')
if label in cards_dict:
links = cards_dict[label].links + card.links
cards_dict[label].update(dict(links=links))
cards_dict[label] = cards_dict.pop(label)
else:
cards_dict[label] = card
return list(cards_dict.values())
|
py | 1a3b1d9bf7427f6426ddb3743875aedbf7b22a06 | from cloudproxy.providers.digitalocean.main import do_deployment, do_start
from tests.test_providers_digitalocean_functions import test_list_droplets, test_create_proxy, test_delete_proxy
def test_do_deployment(mocker):
mocker.patch(
'cloudproxy.providers.digitalocean.main.list_droplets',
return_value=test_list_droplets(mocker)
)
mocker.patch(
'cloudproxy.providers.digitalocean.main.create_proxy',
return_value=test_create_proxy(mocker)
)
mocker.patch(
'cloudproxy.providers.digitalocean.main.delete_proxy',
return_value=test_delete_proxy(mocker)
)
assert do_deployment(1) == 1
def test_initiatedo(mocker):
mocker.patch(
'cloudproxy.providers.digitalocean.main.do_deployment',
return_value=2
)
mocker.patch(
'cloudproxy.providers.digitalocean.main.do_check_alive',
return_value=["192.1.1.1"]
)
mocker.patch(
'cloudproxy.providers.digitalocean.main.do_check_delete',
return_value=True
)
assert do_start() == ["192.1.1.1"] |
py | 1a3b1df57779352a86ad4ac93c066d419504af84 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession.builder().getOrCreate()
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
# Create the Spark DataFrame directly from the Arrow data and schema
jrdd = self._sc._serialize_to_jvm(batches, len(batches), ArrowSerializer())
jdf = self._jvm.PythonSQLUtils.arrowPayloadToDataFrame(
jrdd, schema.json(), self._wrapped._jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self.conf.get("spark.sql.execution.pandas.respectSessionTimeZone").lower() \
== "true":
timezone = self.conf.get("spark.sql.session.timeZone")
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self.conf.get("spark.sql.execution.arrow.enabled", "false").lower() == "true" \
and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
warnings.warn("Arrow will not be used in createDataFrame: %s" % str(e))
# Fallback to create DataFrame without arrow if raise some exception
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
py | 1a3b1e83453d7cecbc7e3296398be42b3c25a261 | # This file contains globals used across the dottizer program
# ==============================================================================
# User Specified Params
# ==============================================================================
# drill_sizes = [] # Sets available drill bits when creating images
drill_sizes = [
0.0,
1.0/8.0,
9.0/64.0,
5.0/32.0,
3.0/16.0,
1.0/4.0,
5.0/16.0,
3.0/8.0,
1.0/2.0,
5.0/8.0,
3.0/4.0,
7.0/8.0,
1.0
]
# Function for generating drill_sizes global var
def generate_global_drill_sizes():
print('called generate_global_drill_sizes()')
global drill_sizes
drill_sizes = []
for size in optional_drill_sizes:
if size[1]:
drill_sizes.append(size[0])
print(drill_sizes)
# drill_sizes = np.asarray(temp_drill_sizes)
optional_drill_sizes = [
[0.0, True],
[1.0/8.0, True],
[9.0/64.0, True],
[5.0/32.0, True],
[3.0/16.0, True],
[1.0/4.0, True],
[5.0/16.0, True],
[3.0/8.0, True],
[1.0/2.0, True], # Largest regular drill bit
[5.0/8.0, False],
[3.0/4.0, False],
[7.0/8.0, False],
[1.0, False]
]
generate_global_drill_sizes()
dist_between_holes = 0.4 # Sets the distance between holes (edge to edge in inches)
border_x = 2.0 # Sets the border width (in inches)
border_y = 2.0 # Sets the border height (in inches)
input_image = "input.png" # Input image for dottizing
dots_wide = 50 # How many drill holes wide the image should be (dotsTall will be calculated from this)
pixels_per_inch = 40 # Sets scaling of rendered image(s)
pixels_per_inch_render = 80
# ==============================================================================
# Other Params (not exposed to user yet)
# ==============================================================================
font_size = 12 # Size of global font
# Image colors
font_color = (255, 255, 255, 255) # Color of global font
grid_color = (128, 128, 128, 128) # Color of background dot grid
inset_border_color = (0, 0, 255, 255) # Color of edge border line
# Params for making series of images
# series_base_width = dotsWide # dotWidth for initial image
# series_increment = 10 # How many dot increments to make images for
# ==============================================================================
# Internal Dottizer Stuff
# ==============================================================================
# workDir = 'dottizerFiles/' # Directory for temporary dottizer files
out_dir = 'out/' # Directory for output images
# Size of source image view
src_view_x = 300
src_view_y = 225
# Size of output image view
out_view_x = 1200/2
out_view_y = 900/2
# out_view_x = 1200
# out_view_y = 900
|
py | 1a3b1ec5e03808872bef6727f70083b04ac7c803 |
from .tflite_model_parameters import (TfliteModelParameters, TFLITE_METADATA_TAG)
|
py | 1a3b1f0849c75bb92d35204b0d998d727e882e75 | import numpy as np
import pandas as pd
def clean_Data(data, fill=0, with_pages = False):
'''
Clean the input data, including fill nan with 0 and removing page column
Args:
data: input data for clean
type: ndarray, shape: (series, time_step)
fill: the number to fill
type: INT
with_pages: check whether it has page column
type: Boolean
Return:
data: clean data
type: ndarray, shape: (series, time_step)
'''
data = pd.DataFrame(data)
data = data.fillna(0).values
if with_pages:
data = data[:,1:]
return data
def normalise_transform(data):
'''
This is for normalising the input data.
Args:
data: input data to transform
type: ndarray, shape: (series, time_step)
Return:
transformed_Data: transformed data
type: ndarray, shape: (series, time_step)
'''
transformed_Data = np.log1p(data*0.5).astype('float32')
return transformed_Data
def normalise_reverse(data):
'''
This is for rescaling the scaled data.
Args:
data: input data to rescale
type: ndarray, shape: (series, time_step)
Return:
reversed_data: rescaled data
type: ndarray, shape: (series, time_step)
'''
reversed_data = np.expm1(data)/0.5
return reversed_data
def split_data(train, test, pred_days=60):
'''
This is for spliting raw data into train_X, train_y, test_X, and test_y
Args:
train: raw train data
type: ndarray, shape: (series, time_step)
test: raw test data
type: ndarray, shape: (series, time_step)
pred_day: number of days to forecast
type: INT
Return:
train_X: data of training input
type: ndarray, shape: (series, time_step)
train_y: data of training output
type: ndarray, shape: (series, pred_days)
test_X: data of testing input
type: ndarray, shape: (series, time_step)
test_y: raw data for validating testing (ground truth)
type: ndarray, shape: (series, pred_days)
'''
series_numb, total_length = train.shape
print(series_numb, total_length)
train_X = train[:, :(total_length - pred_days)]
train_y = train[:, -(pred_days):]
test_X = train[:, pred_days:total_length]
test_y = test[:, total_length:(total_length+pred_days)]
print(test_X.shape, test_y.shape)
series, train_step = train_X.shape
series, pred_step = train_y.shape
train_X = train_X.reshape(series,1,train_step)
train_y = train_y.reshape(series,1,pred_step)
test_X = test_X.reshape(series,1,train_step)
test_y = test_y.reshape(series,1,pred_step)
return train_X, train_y, test_X, test_y
|
py | 1a3b1fb90dc274920331d73e1970e886be68388a | """Functions for building the face recognition network.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from subprocess import Popen, PIPE
import tensorflow as tf
import numpy as np
from scipy import misc
from sklearn.model_selection import KFold
from scipy import interpolate
from tensorflow.python.training import training
import random
import re
from tensorflow.python.platform import gfile
import math
from six import iteritems
import imageio
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.variable_scope('triplet_loss'):
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
with tf.control_dependencies([centers]):
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def get_image_paths_and_labels(dataset):
image_paths_flat = []
labels_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat
def shuffle_examples(image_paths, labels):
shuffle_list = list(zip(image_paths, labels))
random.shuffle(shuffle_list)
image_paths_shuff, labels_shuff = zip(*shuffle_list)
return image_paths_shuff, labels_shuff
def random_rotate_image(image):
angle = np.random.uniform(low=-10.0, high=10.0)
return misc.imrotate(image, angle, 'bicubic')
# 1: Random rotate 2: Random crop 4: Random flip 8: Fixed image standardization 16: Flip
RANDOM_ROTATE = 1
RANDOM_CROP = 2
RANDOM_FLIP = 4
FIXED_STANDARDIZATION = 8
FLIP = 16
def create_input_pipeline(input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder):
images_and_labels_list = []
for _ in range(nrof_preprocess_threads):
filenames, label, control = input_queue.dequeue()
images = []
for filename in tf.unstack(filenames):
file_contents = tf.io.read_file(filename)
image = tf.image.decode_image(file_contents, 3)
image = tf.cond(get_control_flag(control[0], RANDOM_ROTATE),
lambda:tf.compat.v1.py_func(random_rotate_image, [image], tf.uint8),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], RANDOM_CROP),
lambda:tf.compat.v1.random_crop(image, image_size + (3,)),
lambda:tf.image.resize_with_crop_or_pad(image, image_size[0], image_size[1]))
image = tf.cond(get_control_flag(control[0], RANDOM_FLIP),
lambda:tf.image.random_flip_left_right(image),
lambda:tf.identity(image))
image = tf.cond(get_control_flag(control[0], FIXED_STANDARDIZATION),
lambda:(tf.cast(image, tf.float32) - 127.5)/128.0,
lambda:tf.image.per_image_standardization(image))
image = tf.cond(get_control_flag(control[0], FLIP),
lambda:tf.image.flip_left_right(image),
lambda:tf.identity(image))
#pylint: disable=no-member
image.set_shape(image_size + (3,))
images.append(image)
images_and_labels_list.append([images, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels_list, batch_size=batch_size_placeholder,
shapes=[image_size + (3,), ()], enqueue_many=True,
capacity=4 * nrof_preprocess_threads * 100,
allow_smaller_final_batch=True)
return image_batch, label_batch
def get_control_flag(control, field):
return tf.equal(tf.compat.v1.mod(tf.compat.v1.floor_div(control, field), 2), 1)
def _add_loss_summaries(total_loss):
"""Add summaries for losses.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name +' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
if optimizer=='ADAGRAD':
opt = tf.train.AdagradOptimizer(learning_rate)
elif optimizer=='ADADELTA':
opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
elif optimizer=='ADAM':
opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
elif optimizer=='RMSPROP':
opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
elif optimizer=='MOM':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError('Invalid optimization algorithm')
grads = opt.compute_gradients(total_loss, update_gradient_vars)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
if log_histograms:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
if log_histograms:
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
moving_average_decay, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def prewhiten(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
return y
def crop(image, random_crop, image_size):
if image.shape[1]>image_size:
sz1 = int(image.shape[1]//2)
sz2 = int(image_size//2)
if random_crop:
diff = sz1-sz2
(h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1))
else:
(h, v) = (0,0)
image = image[(sz1-sz2+v):(sz1+sz2+v),(sz1-sz2+h):(sz1+sz2+h),:]
return image
def flip(image, random_flip):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image)
return image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
nrof_samples = len(image_paths)
images = np.zeros((nrof_samples, image_size, image_size, 3))
data_type = type(image_paths[0])
for i in range(nrof_samples):
if data_type is str:
img = imageio.imread(image_paths[i])
else:
img = image_paths[i]
if img.ndim == 2:
img = to_rgb(img)
if do_prewhiten:
img = prewhiten(img)
img = crop(img, do_random_crop, image_size)
img = flip(img, do_random_flip)
images[i,:,:,:] = img
return images
def get_label_batch(label_data, batch_size, batch_index):
nrof_examples = np.size(label_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = label_data[j:j+batch_size]
else:
x1 = label_data[j:nrof_examples]
x2 = label_data[0:nrof_examples-j]
batch = np.vstack([x1,x2])
batch_int = batch.astype(np.int64)
return batch_int
def get_batch(image_data, batch_size, batch_index):
nrof_examples = np.size(image_data, 0)
j = batch_index*batch_size % nrof_examples
if j+batch_size<=nrof_examples:
batch = image_data[j:j+batch_size,:,:,:]
else:
x1 = image_data[j:nrof_examples,:,:,:]
x2 = image_data[0:nrof_examples-j,:,:,:]
batch = np.vstack([x1,x2])
batch_float = batch.astype(np.float32)
return batch_float
def get_triplet_batch(triplets, batch_index, batch_size):
ax, px, nx = triplets
a = get_batch(ax, int(batch_size/3), batch_index)
p = get_batch(px, int(batch_size/3), batch_index)
n = get_batch(nx, int(batch_size/3), batch_index)
batch = np.vstack([a, p, n])
return batch
def get_learning_rate_from_file(filename, epoch):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.split('#', 1)[0]
if line:
par = line.strip().split(':')
e = int(par[0])
if par[1]=='-':
lr = -1
else:
lr = float(par[1])
if e <= epoch:
learning_rate = lr
else:
return learning_rate
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(facedir)
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def split_dataset(dataset, split_ratio, min_nrof_images_per_class, mode):
if mode=='SPLIT_CLASSES':
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
split = int(round(nrof_classes*(1-split_ratio)))
train_set = [dataset[i] for i in class_indices[0:split]]
test_set = [dataset[i] for i in class_indices[split:-1]]
elif mode=='SPLIT_IMAGES':
train_set = []
test_set = []
for cls in dataset:
paths = cls.image_paths
np.random.shuffle(paths)
nrof_images_in_class = len(paths)
split = int(math.floor(nrof_images_in_class*(1-split_ratio)))
if split==nrof_images_in_class:
split = nrof_images_in_class-1
if split>=min_nrof_images_per_class and nrof_images_in_class-split>=1:
train_set.append(ImageClass(cls.name, paths[:split]))
test_set.append(ImageClass(cls.name, paths[split:]))
else:
raise ValueError('Invalid train/test split mode "%s"' % mode)
return train_set, test_set
def load_model(model, input_map=None):
# Check if the model is a model directory (containing a metagraph and a checkpoint file)
# or if it is a protobuf file with a frozen graph
model_exp = os.path.expanduser(model)
if (os.path.isfile(model_exp)):
print('Model filename: %s' % model_exp)
with gfile.FastGFile(model_exp,'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, input_map=input_map, name='')
else:
print('Model directory: %s' % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file), input_map=input_map)
saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
def get_model_filenames(model_dir):
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
if len(meta_files)==0:
raise ValueError('No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files)>1:
raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
meta_file = meta_files[0]
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_file = os.path.basename(ckpt.model_checkpoint_path)
return meta_file, ckpt_file
meta_files = [s for s in files if '.ckpt' in s]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
if step_str is not None and len(step_str.groups())>=2:
step = int(step_str.groups()[1])
if step > max_step:
max_step = step
ckpt_file = step_str.groups()[0]
return meta_file, ckpt_file
def distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff),1)
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
tpr = np.mean(tprs,0)
fpr = np.mean(fprs,0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0, subtract_mean=False):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if subtract_mean:
mean = np.mean(np.concatenate([embeddings1[train_set], embeddings2[train_set]]), axis=0)
else:
mean = 0.0
dist = distance(embeddings1-mean, embeddings2-mean, distance_metric)
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def store_revision_info(src_path, output_dir, arg_string):
try:
# Get git hash
cmd = ['git', 'rev-parse', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_hash = stdout.strip()
except OSError as e:
git_hash = ' '.join(cmd) + ': ' + e.strerror
try:
# Get local changes
cmd = ['git', 'diff', 'HEAD']
gitproc = Popen(cmd, stdout = PIPE, cwd=src_path)
(stdout, _) = gitproc.communicate()
git_diff = stdout.strip()
except OSError as e:
git_diff = ' '.join(cmd) + ': ' + e.strerror
# Store a text file in the log directory
rev_info_filename = os.path.join(output_dir, 'revision_info.txt')
with open(rev_info_filename, "w") as text_file:
text_file.write('arguments: %s\n--------------------\n' % arg_string)
text_file.write('tensorflow version: %s\n--------------------\n' % tf.__version__) # @UndefinedVariable
text_file.write('git hash: %s\n--------------------\n' % git_hash)
text_file.write('%s' % git_diff)
def list_variables(filename):
reader = training.NewCheckpointReader(filename)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
return names
def put_images_on_grid(images, shape=(16,8)):
nrof_images = images.shape[0]
img_size = images.shape[1]
bw = 3
img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
for i in range(shape[1]):
x_start = i*(img_size+bw)+bw
for j in range(shape[0]):
img_index = i*shape[0]+j
if img_index>=nrof_images:
break
y_start = j*(img_size+bw)+bw
img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
if img_index>=nrof_images:
break
return img
def write_arguments_to_file(args, filename):
with open(filename, 'w') as f:
for key, value in iteritems(vars(args)):
f.write('%s: %s\n' % (key, str(value)))
|
py | 1a3b1fd7cefd03d3f7aec6ec39b460f9a22fec35 | import logging
import typing
from abc import ABC
from abc import abstractmethod
T = typing.TypeVar("T")
logger = logging.getLogger(__name__)
class AbstractExtension(ABC):
@abstractmethod
async def get_events(self) -> typing.List:
"""
Get events from any resource and returns list of events.
:return: list of coming events.
"""
pass
@abstractmethod
async def run(self, dp):
"""
In endless cycle get events from self.get_events function
and call dispatcher method dp._process_events.
:param dp: dispatcher
:return:
"""
pass
class BaseExtension(AbstractExtension, ABC):
"""
May be added to extensions with ExtensionsManager and
used for get events.
>>> extension_manager.run_extension(name=unique_key)
"""
key = None # unique key for access to extension
class ExtensionsManager:
def __init__(
self, dp, default_extensions: typing.Dict[str, typing.Type[BaseExtension]]
):
self.dp = dp
self.extensions: typing.Dict[str, typing.Type[BaseExtension]] = {}
self.extensions.update(default_extensions)
def setup(self, extension: typing.Type[BaseExtension]):
if extension.key is None:
raise RuntimeError("Unallowed key for extension")
self.extensions[extension.key] = extension
def run_extension(self, name: str, **extension_init_params) -> None:
"""
:param name: name of extension
:param extension_init_params: params which accept extension constructor
:return:
"""
if typing.TYPE_CHECKING:
BaseExtension = typing.Type[T] # noqa
extension: BaseExtension = self.extensions.get(name) # noqa
if not extension:
raise RuntimeError("Undefined extension")
extension: BaseExtension = extension(**extension_init_params)
self.dp.vk.loop.create_task(extension.run(self.dp))
|
py | 1a3b20a7798b0134b6700b22de3fa20fea67d09d | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._application_gateways_operations_async import ApplicationGatewaysOperations
from ._application_security_groups_operations_async import ApplicationSecurityGroupsOperations
from ._network_management_client_operations_async import NetworkManagementClientOperationsMixin
from ._ddos_protection_plans_operations_async import DdosProtectionPlansOperations
from ._available_endpoint_services_operations_async import AvailableEndpointServicesOperations
from ._express_route_circuit_authorizations_operations_async import ExpressRouteCircuitAuthorizationsOperations
from ._express_route_circuit_peerings_operations_async import ExpressRouteCircuitPeeringsOperations
from ._express_route_circuit_connections_operations_async import ExpressRouteCircuitConnectionsOperations
from ._express_route_circuits_operations_async import ExpressRouteCircuitsOperations
from ._express_route_service_providers_operations_async import ExpressRouteServiceProvidersOperations
from ._express_route_cross_connections_operations_async import ExpressRouteCrossConnectionsOperations
from ._express_route_cross_connection_peerings_operations_async import ExpressRouteCrossConnectionPeeringsOperations
from ._load_balancers_operations_async import LoadBalancersOperations
from ._load_balancer_backend_address_pools_operations_async import LoadBalancerBackendAddressPoolsOperations
from ._load_balancer_frontend_ip_configurations_operations_async import LoadBalancerFrontendIPConfigurationsOperations
from ._inbound_nat_rules_operations_async import InboundNatRulesOperations
from ._load_balancer_load_balancing_rules_operations_async import LoadBalancerLoadBalancingRulesOperations
from ._load_balancer_network_interfaces_operations_async import LoadBalancerNetworkInterfacesOperations
from ._load_balancer_probes_operations_async import LoadBalancerProbesOperations
from ._network_interfaces_operations_async import NetworkInterfacesOperations
from ._network_interface_ip_configurations_operations_async import NetworkInterfaceIPConfigurationsOperations
from ._network_interface_load_balancers_operations_async import NetworkInterfaceLoadBalancersOperations
from ._network_security_groups_operations_async import NetworkSecurityGroupsOperations
from ._security_rules_operations_async import SecurityRulesOperations
from ._default_security_rules_operations_async import DefaultSecurityRulesOperations
from ._network_watchers_operations_async import NetworkWatchersOperations
from ._packet_captures_operations_async import PacketCapturesOperations
from ._connection_monitors_operations_async import ConnectionMonitorsOperations
from ._operations_async import Operations
from ._public_ip_addresses_operations_async import PublicIPAddressesOperations
from ._route_filters_operations_async import RouteFiltersOperations
from ._route_filter_rules_operations_async import RouteFilterRulesOperations
from ._route_tables_operations_async import RouteTablesOperations
from ._routes_operations_async import RoutesOperations
from ._bgp_service_communities_operations_async import BgpServiceCommunitiesOperations
from ._usages_operations_async import UsagesOperations
from ._virtual_networks_operations_async import VirtualNetworksOperations
from ._subnets_operations_async import SubnetsOperations
from ._virtual_network_peerings_operations_async import VirtualNetworkPeeringsOperations
from ._virtual_network_gateways_operations_async import VirtualNetworkGatewaysOperations
from ._virtual_network_gateway_connections_operations_async import VirtualNetworkGatewayConnectionsOperations
from ._local_network_gateways_operations_async import LocalNetworkGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'ApplicationSecurityGroupsOperations',
'NetworkManagementClientOperationsMixin',
'DdosProtectionPlansOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitConnectionsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'ExpressRouteCrossConnectionsOperations',
'ExpressRouteCrossConnectionPeeringsOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'ConnectionMonitorsOperations',
'Operations',
'PublicIPAddressesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
]
|
py | 1a3b20e84478ed7c6c29229f83e28f6a5a58b778 | from board import Board
from player import HumanPlayer, RandomPlayer, MinimaxPlayer
class Game:
__active_player__ = None
__inactive_player__ = None
__player_1__ = None
__player_2__ = None
__move_symbol__ = {}
def __init__(self, player_1, player_2, size):
self.__player_1__ = player_1
self.__player_2__ = player_2
self.__board__ = Board(size)
self.__active_player__ = player_1
self.__inactive_player__ = player_2
def switch_turn(self):
temp = self.__inactive_player__
self.__inactive_player__ = self.__active_player__
self.__active_player__ = temp
def play_game(self):
game_over = False
# self.__board__.board = [["O", "X", " "],
# ["X", " ", " "],
# [" ", " ", " "]]
# self.__board__.possible_moves = [(0,2), (1, 2), (2,0), (2,2)]
while not game_over:
print("======== {} turn! ========".format(self.__active_player__.get_name()))
move = self.__active_player__.move(self.__board__)
game_over, msg, player = self.__board__.apply_move(self.__active_player__, move)
if (game_over):
ending_msg = "><><><><><>< {} ><><><><><><".format(msg)
print("*" * len(ending_msg))
print(ending_msg)
print("*" * len(ending_msg))
else:
self.switch_turn()
player1 = HumanPlayer("O")
player2 = MinimaxPlayer("X")
board_size = 3
game = Game(player1, player2, board_size)
game.play_game() |
py | 1a3b21604ff52496a4ed537fa11cf2d681c1f1a7 | #!/usr/bin/env python3
# MIT License
#
# Copyright (C) 2019-2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import random
mails = (
"mail.ru",
"inbox.ru",
"list.ru",
"bk.ru",
"ya.ru",
"yandex.com",
"yandex.ua",
"yandex.ru",
"gmail.com"
)
# Get random service
def random_service(list):
return random.choice(list)
# Create random name
def random_name():
with open("tools/SMS/names.json", 'r') as names:
names = json.load(names)["names"]
return random.choice(names)
# Create random suffix for email
# %random_name%SUFFIX@%random_email%
def random_suffix(int_range = 4):
numbers = []
for _ in range(int_range):
numbers.append(str(random.randint(1, 9)))
return "".join(numbers)
# Create random email by name, suffix, mail
# Example: [email protected]
def random_email():
return random_name() + random_suffix() + "@" + random.choice(mails)
# Create random password
# %random_name%%random_suffix%
def random_password():
return random_name() + random_suffix(int_range = 10)
# Get random user agent
def random_useragent():
with open("tools/SMS/user_agents.json", 'r') as agents:
user_agents = json.load(agents)["agents"]
return random.choice(user_agents)
|
py | 1a3b228b86be260f75e87c0811574b92168d8c9d | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example script to train the DNC on a repeated copy task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sonnet as snt
from tensorflow.contrib.layers.python.layers import initializers
from dnc import dnc
import numpy as np
import cv2
from scipy import ndimage as nd
from PIL import Image
import os, sys
import time
from utility import alrc
experiment_number = 103
FLAGS = tf.flags.FLAGS
# Model parameters
tf.flags.DEFINE_integer("hidden_size", 128, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("memory_size", 16, "The number of memory slots.")
tf.flags.DEFINE_integer("word_size", 64, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_write_heads", 1, "Number of memory write heads.")
tf.flags.DEFINE_integer("num_read_heads", 4, "Number of memory read heads.")
tf.flags.DEFINE_integer("clip_value", 0, "Maximum absolute value of controller and dnc outputs.")
tf.flags.DEFINE_bool("use_batch_norm", True, "Use batch normalization in generator.")
tf.flags.DEFINE_string("model", "LSTM", "LSTM or DNC.")
tf.flags.DEFINE_integer("projection_size", 0, "Size of projection layer. Zero for no projection.")
tf.flags.DEFINE_bool("is_input_embedder", False, "Embed inputs before they are input.")
# Optimizer parameters.
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("replay_size", 25000, "Maximum examples in ring buffer.")
tf.flags.DEFINE_integer("avg_replays", 4, "Mean frequency each experience is used.")
tf.flags.DEFINE_float("max_grad_norm", 50, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 1e-4, "Optimizer learning rate.")
tf.flags.DEFINE_float("optimizer_epsilon", 1e-10, "Epsilon used for RMSProp optimizer.")
tf.flags.DEFINE_float("L2_norm", 1.e-4, "Decay rate for L2 regularization. 0 for no regularization.")
# Task parameters
tf.flags.DEFINE_integer("img_side", 96, "Number of image pixels for square image")
tf.flags.DEFINE_integer("num_steps", 20, "Number of image pixels for square image")
tf.flags.DEFINE_integer("step_size", 20, "Distance STEM probe moves at each step (in px).")
tf.flags.DEFINE_integer("num_actions", 2, "Number of parameters to describe actions.")
tf.flags.DEFINE_integer("shuffle_size", 2000, "Size of moving buffer to sample data from.")
tf.flags.DEFINE_integer("prefetch_size", 10, "Number of batches to prepare in advance.")
# Training options.
tf.flags.DEFINE_float("actor_lr", 0.001, "Actor learning rate.")
tf.flags.DEFINE_float("critic_lr", 0.001, "Critic learning rate.")
tf.flags.DEFINE_float("generator_lr", 0.003, "Generator learning rate.")
tf.flags.DEFINE_float("gamma", 0.97, "Reward/loss decay.")
tf.flags.DEFINE_bool("is_advantage_actor_critic", False, "Use advantage rather than Q errors for actor.")
tf.flags.DEFINE_bool("is_cyclic_generator_learning_rate", False, "Use advantage rather than Q errors for actor.")
tf.flags.DEFINE_integer("supervision_iters", 100_000, "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_start", 1., "Starting value for supeversion.")
tf.flags.DEFINE_float("supervision_end", 0., "Starting value for supeversion.")
if FLAGS.supervision_iters:
#Flag will not be used
tf.flags.DEFINE_float("supervision", 0.5, "Weighting for known discounted future reward.")
else:
#Flag will be used
tf.flags.DEFINE_float("supervision", 0.0, "Weighting for known discounted future reward.")
tf.flags.DEFINE_bool("is_target_actor", True and FLAGS.supervision != 1, "True to use target actor.")
tf.flags.DEFINE_bool("is_target_critic", True and FLAGS.supervision != 1, "True to use target critic.")
tf.flags.DEFINE_bool("is_target_generator", False, "True to use target generator.")
tf.flags.DEFINE_integer("update_frequency", 0, "Frequency of hard target network updates. Zero for soft updates.")
tf.flags.DEFINE_float("target_decay", 0.9997, "Decay rate for target network soft updates.")
tf.flags.DEFINE_bool("is_generator_batch_norm_tracked", False, "True to track generator batch normalization.")
tf.flags.DEFINE_bool("is_positive_qs", True, "Whether to clip qs to be positive.")
tf.flags.DEFINE_bool("is_infilled", True, "True to use infilling rather than generator.")
tf.flags.DEFINE_bool("is_prev_position_input", True, "True to input previous positions.")
tf.flags.DEFINE_bool("is_ornstein_uhlenbeck", True, "True for O-U exploration noise.")
tf.flags.DEFINE_bool("is_noise_decay", True, "Decay noise if true.")
tf.flags.DEFINE_float("ou_theta", 0.1, "Drift back to mean.")
tf.flags.DEFINE_float("ou_sigma", 0.2, "Size of random process.")
tf.flags.DEFINE_bool("is_rel_to_truth", False, "True to normalize losses using expected losses.")
tf.flags.DEFINE_bool("is_clipped_reward", True, "True to clip rewards.")
tf.flags.DEFINE_bool("is_clipped_critic", False, "True to clip critic predictions for actor training.")
tf.flags.DEFINE_float("over_edge_penalty", 0.05, "Penalty for action going over edge of image.")
tf.flags.DEFINE_bool("is_prioritized_replay", False, "True to prioritize the replay of difficult experiences.")
tf.flags.DEFINE_bool("is_biased_prioritized_replay", False, "Priority sampling without bias correction.")
tf.flags.DEFINE_bool("is_relative_to_spirals", False, "True to compare generator losses against losses for spirals.")
tf.flags.DEFINE_bool("is_self_competition", False, "Oh it is on. True to compete against past versions of itself.")
tf.flags.DEFINE_float("norm_generator_losses_decay", 0.999, "Divide generator losses by their running mean. Zero for no normalization.")
tf.flags.DEFINE_integer("start_iter", 0, "Starting iteration")
tf.flags.DEFINE_integer("train_iters", 500_000, "Training iterations")
tf.flags.DEFINE_integer("val_examples", 20_000, "Number of validation examples")
tf.flags.DEFINE_string("model_dir",
f"//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/recurrent_conv-1/{experiment_number}/",
"Working directory.")
tf.flags.DEFINE_string("data_file",
"//Desktop-sa1evjv/h/small_scans/96x96.npy",
"Datafile containing 19769 96x96 downsampled STEM crops.")
tf.flags.DEFINE_integer("report_freq", 10, "How often to print losses to the console.")
os.chdir(FLAGS.model_dir)
sys.path.insert(0, FLAGS.model_dir)
def norm_img(img, min=None, max=None, get_min_and_max=False):
if min == None:
min = np.min(img)
if max == None:
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
if get_min_and_max:
return img.astype(np.float32), (min, max)
else:
return img.astype(np.float32)
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img - min)/(max - min)
return img.astype(np.float32)
def disp(img):
#if len(img.shape) == 3:
# img = np.sum(img, axis=2)
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
def run_model(input_sequence, output_size):
"""Runs model on input sequence."""
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
}
clip_value = FLAGS.clip_value
dnc_core = dnc.DNC(access_config, controller_config, output_size, clip_value)
initial_state = dnc_core.initial_state(FLAGS.batch_size)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=dnc_core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
return output_sequence
class RingBuffer(object):
def __init__(
self,
action_shape,
observation_shape,
full_scan_shape,
batch_size,
buffer_size=1000,
num_past_losses=None,
):
self.buffer_size = buffer_size
self.actions = np.zeros([buffer_size]+list(action_shape)[1:])
self.observations = np.zeros([buffer_size]+list(observation_shape)[1:])
self.full_scans = np.zeros([buffer_size]+list(full_scan_shape)[1:])
self.position = 0
self._batch_size = batch_size
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
self.priorities = np.zeros([buffer_size])
self.indices = np.arange(buffer_size)
if FLAGS.is_self_competition:
self.past_losses = np.zeros([num_past_losses])
self.labels = np.zeros([buffer_size], np.int32)
def add(self, actions, observations, full_scans, labels=None):
i0 = self.position % self.buffer_size
num_before_cycle = min(self.buffer_size-i0, self._batch_size)
self.actions[i0:i0+num_before_cycle] = actions[:num_before_cycle]
self.observations[i0:i0+num_before_cycle] = observations[:num_before_cycle]
self.full_scans[i0:i0+num_before_cycle] = full_scans[:num_before_cycle]
num_remaining = self._batch_size - num_before_cycle
if num_remaining > 0:
self.actions[0:num_remaining] = actions[num_before_cycle:]
self.observations[:num_remaining] = observations[num_before_cycle:]
self.full_scans[:num_remaining] = full_scans[num_before_cycle:]
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
if self.position:
mean_priority = np.sum(self.priorities) / min(self.position, self.buffer_size)
else:
mean_priority = 0.3
self.priorities[i0:i0+num_before_cycle] = mean_priority*np.ones([num_before_cycle])
if num_before_cycle < self._batch_size:
self.priorities[0:num_remaining] = mean_priority*np.ones([self._batch_size - num_before_cycle])
if FLAGS.is_self_competition:
self.labels[i0:i0+num_before_cycle] = labels[:num_before_cycle]
if num_remaining > 0:
self.labels[0:num_remaining] = labels[num_before_cycle:]
self.position += self._batch_size
def get(self):
limit = min(self.position, self.buffer_size)
if FLAGS.is_prioritized_replay:
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
) #alpha=1
beta = 0.5 + 0.5*(FLAGS.train_iters - self.position)/FLAGS.train_iters
sampled_priority_weights = self.priorities[sample_idxs]**( -beta )
sampled_priority_weights /= np.max(sampled_priority_weights)
elif FLAGS.is_biased_prioritized_replay:
alpha = (FLAGS.train_iters - self.position)/FLAGS.train_iters
priorities = self.priorities**alpha
sample_idxs = np.random.choice(
self.indices,
size=self._batch_size,
replace=False,
p=self.priorities/np.sum(self.priorities)
)
else:
sample_idxs = np.random.randint(0, limit, size=self._batch_size)
sampled_actions = np.stack([self.actions[i] for i in sample_idxs])
sampled_observations = np.stack([self.observations[i] for i in sample_idxs])
sampled_full_scans = np.stack([self.full_scans[i] for i in sample_idxs])
if FLAGS.is_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs, sampled_priority_weights
elif FLAGS.is_biased_prioritized_replay:
return sampled_actions, sampled_observations, sampled_full_scans, sample_idxs
elif FLAGS.is_self_competition:
sampled_labels = np.stack([self.labels[i] for i in sample_idxs])
sampled_past_losses = np.stack([self.past_losses[i] for i in sampled_labels])
return sampled_actions, sampled_observations, sampled_full_scans, sampled_labels, sampled_past_losses
else:
return sampled_actions, sampled_observations, sampled_full_scans
def update_priorities(self, idxs, priorities):
"""For prioritized experience replay"""
self.priorities[idxs] = priorities
def update_past_losses(self, idxs, losses):
self.past_losses[idxs] = losses
class Agent(snt.AbstractModule):
def __init__(
self,
num_outputs,
name,
is_new=False,
noise_decay=None,
is_double_critic=False,
sampled_full_scans=None,
val_full_scans=None
):
super(Agent, self).__init__(name=name)
access_config = {
"memory_size": FLAGS.memory_size,
"word_size": FLAGS.word_size,
"num_reads": FLAGS.num_read_heads,
"num_writes": FLAGS.num_write_heads,
}
controller_config = {
"hidden_size": FLAGS.hidden_size,
"projection_size": FLAGS.projection_size or None,
}
clip_value = FLAGS.clip_value
with self._enter_variable_scope():
components = dnc.Components(access_config, controller_config, num_outputs)
self._dnc_core = dnc.DNC(components, num_outputs, clip_value, is_new=False, is_double_critic=is_double_critic)
if is_new:
self._dnc_core_new = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
noise_decay=noise_decay,
sampled_full_scans=sampled_full_scans,
is_noise=True
)
if not val_full_scans is None:
self._dnc_core_val = dnc.DNC(
components,
num_outputs,
clip_value,
is_new=True,
sampled_full_scans=val_full_scans
)
self._initial_state = self._dnc_core.initial_state(FLAGS.batch_size)
#self._action_embedder = snt.Linear(output_size=64)
#self._observation_embedder = snt.Linear(output_size=64)
def _build(self, observations, actions):
#Tiling here is a hack to make inputs the same size
num_tiles = 2 // (actions.get_shape().as_list()[-1] // FLAGS.num_actions)
tiled_actions = tf.tile(actions, [1, 1, num_tiles])
input_sequence = tf.concat([observations, tiled_actions], axis=-1)
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core,
inputs=input_sequence,
time_major=False,
initial_state=self._initial_state
)
return output_sequence
def get_new_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_new,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
def get_val_experience(self):
output_sequence, _ = tf.nn.dynamic_rnn(
cell=self._dnc_core_val,
inputs=tf.zeros([FLAGS.batch_size, FLAGS.num_steps, 1]),
time_major=False,
initial_state=self._initial_state
)
if hasattr(tf, 'ensure_shape'):
output_sequence = tf.ensure_shape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
else:
output_sequence = tf.reshape(output_sequence, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size+FLAGS.num_actions])
observations = output_sequence[:,:,:FLAGS.step_size]
actions = output_sequence[:,:,FLAGS.step_size:]
return observations, actions
def spectral_norm(w, iteration=1, in_place_updates=False):
"""Spectral normalization. It imposes Lipschitz continuity by constraining the
spectral norm (maximum singular value) of weight matrices.
Inputs:
w: Weight matrix to spectrally normalize.
iteration: Number of times to apply the power iteration method to
enforce spectral norm.
Returns:
Weight matrix with spectral normalization control dependencies.
"""
w0 = w
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable(auto_name("u"),
[1, w_shape[-1]],
initializer=tf.random_normal_initializer(mean=0.,stddev=0.03),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
if in_place_updates:
#In-place control dependencies bottlenect training
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
else:
#Execute control dependency in parallel with other update ops
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u.assign(u_hat))
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def spectral_norm_conv(
inputs,
num_outputs,
stride=1,
kernel_size=3,
padding='VALID',
biases_initializer=tf.zeros_initializer()
):
"""Convolutional layer with spectrally normalized weights."""
w = tf.get_variable(auto_name("kernel"), shape=[kernel_size, kernel_size, inputs.get_shape()[-1], num_outputs])
x = tf.nn.conv2d(input=inputs, filter=spectral_norm(w),
strides=[1, stride, stride, 1], padding=padding)
if biases_initializer != None:
b = tf.get_variable(auto_name("bias"), [num_outputs], initializer=biases_initializer)
x = tf.nn.bias_add(x, b)
return x
def conv(
inputs,
num_outputs,
kernel_size=3,
stride=1,
padding='SAME',
data_format="NHWC",
actv_fn=tf.nn.relu,
is_batch_norm=True,
is_spectral_norm=False,
is_depthwise_sep=False,
extra_batch_norm=False,
biases_initializer=tf.zeros_initializer,
weights_initializer=initializers.xavier_initializer,
transpose=False,
is_training=True
):
"""Convenience function for a strided convolutional or transpositional
convolutional layer.
Intro: https://towardsdatascience.com/intuitively-understanding-convolutions-for-deep-learning-1f6f42faee1.
The order is: Activation (Optional) -> Batch Normalization (optional) -> Convolutions.
Inputs:
inputs: Tensor of shape `[batch_size, height, width, channels]` to apply
convolutions to.
num_outputs: Number of feature channels to output.
kernel_size: Side lenth of square convolutional kernels.
stride: Distance between convolutional kernel applications.
padding: 'SAME' for zero padding where kernels go over the edge.
'VALID' to discard features where kernels go over the edge.
activ_fn: non-linearity to apply after summing convolutions.
is_batch_norm: If True, add batch normalization after activation.
is_spectral_norm: If True, spectrally normalize weights.
is_depthwise_sep: If True, depthwise separate convolutions into depthwise
spatial convolutions, then 1x1 pointwise convolutions.
extra_batch_norm: If True and convolutions are depthwise separable, implement
batch normalization between depthwise and pointwise convolutions.
biases_initializer: Function to initialize biases with. None for no biases.
weights_initializer: Function to initialize weights with. None for no weights.
transpose: If True, apply convolutional layer transpositionally to the
described convolutional layer.
is_training: If True, use training specific operations e.g. batch normalization
update ops.
Returns:
Output of convolutional layer.
"""
x = inputs
num_spatial_dims = len(x.get_shape().as_list()) - 2
if biases_initializer == None:
biases_initializer = lambda: None
if weights_initializer == None:
weights_initializer = lambda: None
if not is_spectral_norm:
#Convolutional layer without spectral normalization
if transpose:
stride0 = 1
if type(stride) == list or is_depthwise_sep or stride % 1:
#Apparently there is no implementation of transpositional
#depthwise separable convolutions, so bilinearly upsample then
#depthwise separably convolute
if kernel_size != 1:
x = tf.image.resize_bilinear(
images=x,
size=stride if type(stride) == list else \
[int(stride*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
stride0 = stride
stride = 1
if type(stride0) == list and not is_depthwise_sep:
layer = tf.contrib.layers.conv2d
elif is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d_transpose
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
if type(stride0) != list:
if (is_depthwise_sep or stride0 % 1) and kernel_size == 1:
x = tf.image.resize_bilinear(
images=x,
size=[int(stride0*d) for d in x.get_shape().as_list()[1:3]],
align_corners=True
)
else:
if num_spatial_dims == 1:
layer = tf.contrib.layers.conv1d
elif num_spatial_dims == 2:
if is_depthwise_sep:
layer = tf.contrib.layers.separable_conv2d
else:
layer = tf.contrib.layers.conv2d
x = layer(
inputs=x,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding=padding,
data_format=data_format,
activation_fn=None,
weights_initializer=weights_initializer(),
biases_initializer=biases_initializer())
else:
#Weights are spectrally normalized
x = spectral_norm_conv(
inputs=x,
num_outputs=num_outputs,
stride=stride,
kernel_size=kernel_size,
padding=padding,
biases_initializer=biases_initializer())
if actv_fn:
x = actv_fn(x)
if is_batch_norm and FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
def residual_block(inputs, skip=3, is_training=True):
"""Residual block whre the input is added to the signal after skipping some
layers. This architecture is good for learning purturbative transformations.
If no layer is provided, it defaults to a convolutional layer.
Deep residual learning: https://arxiv.org/abs/1512.03385.
Inputs:
inputs: Tensor to apply residual block to. Outputs of every layer will
have the same shape.
skip: Number of layers to skip before adding input to layer output.
layer: Layer to apply in residual block. Defaults to convolutional
layer. Custom layers must support `inputs`, `num_outputs` and `is_training`
arguments.
Returns:
Final output of residual block.
"""
x = x0 = inputs
def layer(inputs, num_outputs, is_training, is_batch_norm, actv_fn):
x = conv(
inputs=inputs,
num_outputs=num_outputs,
is_training=is_training,
actv_fn=actv_fn
)
return x
for i in range(skip):
x = layer(
inputs=x,
num_outputs=x.get_shape()[-1],
is_training=is_training,
is_batch_norm=i < skip - 1,
actv_fn=tf.nn.relu
)
x += x0
if FLAGS.use_batch_norm:
x = tf.contrib.layers.batch_norm(x, is_training=is_training)
return x
class Generator(snt.AbstractModule):
def __init__(self,
name,
is_training
):
super(Generator, self).__init__(name=name)
self._is_training = is_training
def _build(self, inputs):
x = inputs
std_actv = tf.nn.relu#lambda x: tf.nn.leaky_relu(x, alpha=0.1)
is_training = self._is_training
is_depthwise_sep = False
base_size = 32
#x = tf.contrib.layers.batch_norm(x, is_training=is_training)
x = conv(
x,
num_outputs=32,
is_training=is_training,
actv_fn=std_actv
)
#Encoder
for i in range(1, 3):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
actv_fn=std_actv
)
if i == 2:
low_level = x
#Residual blocks
for _ in range(5): #Number of blocks
x = residual_block(
x,
skip=3,
is_training=is_training
)
#Decoder
for i in range(1, -1, -1):
x = conv(
x,
num_outputs=base_size*2**i,
stride=2,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training,
transpose=True,
actv_fn=std_actv
)
x = conv(
x,
num_outputs=base_size,
is_depthwise_sep=is_depthwise_sep,
is_training=is_training
)
#Project features onto output image
x = conv(
x,
num_outputs=1,
biases_initializer=None,
actv_fn=None,
is_batch_norm=False,
is_training=is_training
)
return x
def construct_partial_scans(actions, observations):
"""
actions: [batch_size, num_steps, 2]
observations: [batch_size, num_steps, 10]
"""
#Last action unused and the first action is always the same
actions = np.concatenate((np.ones([FLAGS.batch_size, 1, 2]), actions[:,:-1,:]), axis=1)
starts = 0.5*FLAGS.img_side + FLAGS.step_size*(np.cumsum(actions, axis=1) - actions)
#starts = np.zeros(actions.shape)
#starts[:,0,:] = actions[:,0,:]
#for i in range(1, FLAGS.num_steps):
# starts[:,i,:] = actions[:,i,:] + starts[:,i-1,:]
#starts -= actions
#starts *= FLAGS.step_size
#starts += 0.5*FLAGS.img_side
positions = np.stack([starts + i*actions for i in range(FLAGS.step_size)], axis=-2)
x = np.minimum(np.maximum(positions, 0), FLAGS.img_side-1)
indices = []
for j in range(FLAGS.batch_size):
for k in range(FLAGS.num_steps):
for i in range(FLAGS.step_size):
indices.append( [j, int(x[j,k,i,0]), int(x[j,k,i,1])] )
indices = np.array(indices)
indices = tuple([indices[:,i] for i in range(3)])
partial_scans = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
masks = np.zeros([FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side])
partial_scans[indices] = observations.reshape([-1])
masks[indices] = 1
partial_scans /= np.maximum(masks, 1)
masks = np.minimum(masks, 1)
partial_scans = np.stack([partial_scans, masks], axis=-1)
return partial_scans
def target_update_ops(target_network, network, decay=FLAGS.target_decay, l2_norm=False):
t_vars = target_network.variables
v_vars = network.variables
update_ops = []
for t, v in zip(t_vars, v_vars):
if FLAGS.is_generator_batch_norm_tracked or not "BatchNorm" in t.name: #Don't track batch normalization
if l2_norm:
v_new = (1-FLAGS.L2_norm)*v
op = v.assign(v_new)
update_ops.append(op)
op = t.assign(decay*t + (1-decay)*v_new)
update_ops.append(op)
else:
op = t.assign(decay*t + (1-decay)*v)
update_ops.append(op)
print(t.name.replace("target_", "") == v.name, t.name.replace("target_", ""), v.name)
return update_ops
def load_data(shape):
data_ph = tf.placeholder(tf.float32, shape=list(shape))
ds = tf.data.Dataset.from_tensor_slices(tuple([data_ph]))
if FLAGS.is_self_competition:
labels = tf.data.Dataset.range(0, list(shape)[0])
ds = tf.data.Dataset.zip((ds, labels))
ds = ds.shuffle(buffer_size=FLAGS.shuffle_size)
ds = ds.repeat()
ds = ds.batch(FLAGS.batch_size)
ds = ds.prefetch(FLAGS.prefetch_size)
iterator = ds.make_initializable_iterator()
return data_ph, iterator
@tf.custom_gradient
def overwrite_grads(x, y):
print("OG", x, y)
def grad(dy):
return y, None
return x, grad
def infill(data, mask):
return data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
#def infill(data, mask):
# x = np.zeros(data.shape)
# c = (cv2.GaussianBlur(mask.astype(np.float32), (7, 7), 3.5, None, 3.5) > 0).astype(np.float32)
# truth = data[tuple(nd.distance_transform_edt(np.equal(mask, 0), return_distances=False, return_indices=True))]
# x = (truth*c).astype(np.float32)
# return x
def fill(input):
return np.expand_dims(np.stack([infill(img, mask) for img, mask in zip(input[:,:,:,0], input[:,:,:,1])]), -1)
def flip_rotate(img, choice):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
if choice == 0:
return img
elif choice == 1:
return np.rot90(img, 1)
elif choice == 2:
return np.rot90(img, 2)
elif choice == 3:
return np.rot90(img, 3)
elif choice == 4:
return np.flip(img, 0)
elif choice == 5:
return np.flip(img, 1)
elif choice == 6:
return np.flip(np.rot90(img, 1), 0)
else:
return np.flip(np.rot90(img, 1), 1)
def draw_spiral(coverage, side, num_steps=10_000):
"""Duration spent at each location as a particle falls in a magnetic
field. Trajectory chosen so that the duration density is (approx.)
evenly distributed. Trajectory is calculated stepwise.
Args:
coverage: Average amount of time spent at a random pixel
side: Sidelength of square image that the motion is
inscribed on.
Returns:
A spiral
"""
#Use size that is larger than the image
size = int(np.ceil(np.sqrt(2)*side))
#Maximum radius of motion
R = size/2
#Get constant in equation of motion
k = 1/ (2*np.pi*coverage)
#Maximum theta that is in the image
theta_max = R / k
#Equispaced steps
theta = np.arange(0, theta_max, theta_max/num_steps)
r = k * theta
#Convert to cartesian, with (0,0) at the center of the image
x = r*np.cos(theta) + R
y = r*np.sin(theta) + R
#Draw spiral
z = np.empty((x.size + y.size,), dtype=x.dtype)
z[0::2] = x
z[1::2] = y
z = list(z)
img = Image.new('F', (size,size), "black")
img_draw = ImageDraw.Draw(img)
img_draw = img_draw.line(z)
img = np.asarray(img)
img = img[size//2-side//2:size//2+side//2+side%2,
size//2-side//2:size//2+side//2+side%2]
return img
def main(unused_argv):
"""Trains the DNC and periodically reports the loss."""
graph = tf.get_default_graph()
action_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.num_actions]
observation_shape = [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size]
full_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 1]
partial_scan_shape = [FLAGS.batch_size, FLAGS.img_side, FLAGS.img_side, 2]
images = np.load(FLAGS.data_file)
images[np.logical_not(np.isfinite(images))] = 0
images = np.stack([norm_img(x) for x in images])
train_images = images[:int(0.8*len(images))]
val_images = images[int(0.8*len(images)):]
train_data_ph, train_iterator = load_data(train_images.shape)
val_data_ph, val_iterator = load_data(val_images.shape)
if FLAGS.is_self_competition:
(full_scans, labels) = train_iterator.get_next()
(val_full_scans, val_labels) = val_iterator.get_next()
full_scans = full_scans[0]
val_full_scans = val_full_scans[0]
else:
(full_scans, ) = train_iterator.get_next()
(val_full_scans, ) = val_iterator.get_next()
if hasattr(tf, 'ensure_shape'):
full_scans = tf.ensure_shape(full_scans, full_scan_shape)
val_full_scans = tf.ensure_shape(val_full_scans, full_scan_shape)
else:
full_scans = tf.reshape(full_scans, full_scan_shape)
val_full_scans = tf.reshape(full_scans, full_scan_shape)
replay = RingBuffer(
action_shape=action_shape,
observation_shape=observation_shape,
full_scan_shape=full_scan_shape,
batch_size=FLAGS.batch_size,
buffer_size=FLAGS.replay_size,
num_past_losses=train_images.shape[0],
)
replay_actions_ph = tf.placeholder(tf.float32, shape=action_shape, name="replay_action")
replay_observations_ph = tf.placeholder(tf.float32, shape=observation_shape, name="replay_observation")
replay_full_scans_ph = tf.placeholder(tf.float32, shape=full_scan_shape, name="replay_full_scan")
partial_scans_ph = tf.placeholder(tf.float32, shape=partial_scan_shape, name="replay_partial_scan")
is_training_ph = tf.placeholder(tf.bool, name="is_training")
if FLAGS.is_noise_decay:
noise_decay_ph = tf.placeholder(tf.float32, shape=(), name="noise_decay")
else:
noise_decay_ph = None
if FLAGS.supervision_iters:
supervision_ph = tf.placeholder(tf.float32, name="supervision")
else:
supervision_ph = FLAGS.supervision
if FLAGS.is_prioritized_replay:
priority_weights_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="priority_weights")
if FLAGS.is_self_competition:
past_losses_ph = tf.placeholder(tf.float32, shape=[FLAGS.batch_size], name="past_losses")
batch_size = FLAGS.batch_size
if FLAGS.is_relative_to_spirals:
coverage = FLAGS.num_steps*FLAGS.step_size/FLAGS.img_side**2
spiral = draw_spiral(coverage=coverage, side=FLAGS.img_side)
ys = [1/i**2 for i in range(9, 2, -1)]
xs = [np.sum(draw_spiral(coverage=c, side=FLAGS.img_side)) / FLAGS.img_side**2 for c in ys]
ub_idx = next(i for i, x in xs if x > coverage)
lb = xs[ub_idx-1]
ub = xs[ub_idx]
input_coverage = ( (coverage - lb)*X + (ub - coverage)*Y ) / (lb - ub)
actor = Agent(
num_outputs=FLAGS.num_actions,
is_new=True,
noise_decay=noise_decay_ph,
sampled_full_scans=full_scans,
val_full_scans=val_full_scans,
name="actor"
)
target_actor = Agent(num_outputs=FLAGS.num_actions, name="target_actor")
critic = Agent(num_outputs=1, is_double_critic=True, name="critic")
target_critic = Agent(num_outputs=1, is_double_critic=True, name="target_critic")
new_observations, new_actions = actor.get_new_experience()
#Last actions are unused
replay_observations = replay_observations_ph[:,:-1,:]
replay_actions = replay_actions_ph[:,:-1,:]
#First action must be added for actors (not critics)
start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
started_replay_actions = tf.concat([start_actions, replay_actions[:,:-1,:]], axis=1)
actions = actor(replay_observations, started_replay_actions)
if FLAGS.is_target_actor:
target_actions = target_actor(replay_observations, started_replay_actions)
elif FLAGS.supervision != 1:
target_actions = tf.stop_gradient(actions)
#The last action is never used, and the first action is diagonally north-east
#Shifting because network expect actions from previous steps to be inputted
#start_actions = tf.ones([FLAGS.batch_size, 1, FLAGS.num_actions])/np.sqrt(2)
#actions = tf.concat([start_actions, actions[:, :-1, :]], axis=1)
#target_actions = tf.concat([start_actions, target_actions[:, :-1, :]], axis=1)
actor_actions = tf.concat([replay_actions, actions], axis=-1)
qs = critic(replay_observations, actor_actions)
critic_qs = qs[:,:,:1]
actor_qs = qs[:,:,1:]
if FLAGS.is_target_critic:
target_actor_actions = tf.concat([replay_actions, target_actions], axis=-1)
target_actor_qs = target_critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
elif FLAGS.supervision != 1:
target_actor_qs = actor_qs#critic(replay_observations, target_actor_actions)[:,:,1:]
target_actor_qs = tf.stop_gradient(target_actor_qs)
if not FLAGS.is_infilled:
generator = Generator(name="generator", is_training=is_training_ph)
generation = generator(partial_scans_ph)
else:
generation = tf.py_func(fill, [partial_scans_ph], tf.float32)
if hasattr(tf, 'ensure_shape'):
generation = tf.ensure_shape(generation, full_scan_shape)
else:
generation = tf.reshape(generation, full_scan_shape)
generator_losses = 10*tf.reduce_mean( (generation - replay_full_scans_ph)**2, axis=[1,2,3] )
if FLAGS.is_target_generator and not FLAGS.is_infilled:
target_generator = Generator(name="target_generator", is_training=is_training_ph)
target_generation = target_generator(partial_scans_ph)
target_generator_losses = 10*tf.reduce_mean( (target_generation - replay_full_scans_ph)**2, axis=[1,2,3] )
losses = target_generator_losses #For RL
else:
losses = generator_losses #For RL
val_observations, val_actions = actor.get_val_experience()
unclipped_losses = losses
if FLAGS.is_positive_qs and (FLAGS.is_target_critic or FLAGS.supervision != 1):
target_actor_qs = tf.nn.relu(target_actor_qs)
if FLAGS.norm_generator_losses_decay:
mu = tf.get_variable(name="loss_mean", initializer=tf.constant(1., dtype=tf.float32))
mu_op = mu.assign(FLAGS.norm_generator_losses_decay*mu+(1-FLAGS.norm_generator_losses_decay)*tf.reduce_mean(losses))
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mu_op)
losses /= tf.stop_gradient(mu)
if FLAGS.is_clipped_reward:
losses = alrc(losses)
if FLAGS.is_self_competition:
self_competition_losses = tf.where(
past_losses_ph > unclipped_losses,
tf.ones([FLAGS.batch_size]),
tf.zeros([FLAGS.batch_size])
)
losses += self_competition_losses
if FLAGS.over_edge_penalty:
positions = (
0.5 + #middle of image
FLAGS.step_size/(np.sqrt(2)*FLAGS.img_side) + #First step
(FLAGS.step_size/FLAGS.img_side)*tf.cumsum(replay_actions_ph[:,:-1,:], axis=1) # Actions
)
#new_positions = (
# positions - replay_actions_ph[:,:-1,:] + #Go back one action
# (FLAGS.step_size/FLAGS.img_side)*actions #New actions
# )
is_over_edge = tf.logical_or(tf.greater(positions, 1), tf.less(positions, 0))
is_over_edge = tf.logical_or(is_over_edge[:,:,0], is_over_edge[:,:,1])
over_edge_losses = tf.where(
is_over_edge,
FLAGS.over_edge_penalty*tf.ones(is_over_edge.get_shape()),
tf.zeros(is_over_edge.get_shape())
)
over_edge_losses = tf.cumsum(over_edge_losses, axis=1)
if FLAGS.supervision > 0 or FLAGS.is_advantage_actor_critic:
supervised_losses = []
for i in reversed(range(FLAGS.num_steps-1)):
if i == FLAGS.num_steps-1 - 1: #Extra -1 as idxs start from 0
step_loss = tf.expand_dims(losses, axis=-1)
else:
step_loss = FLAGS.gamma*step_loss
if FLAGS.over_edge_penalty:
step_loss += over_edge_losses[:,i:i+1]
supervised_losses.append(step_loss)
supervised_losses = tf.concat(supervised_losses, axis=-1)
if FLAGS.supervision < 1:
bellman_losses = tf.concat(
[FLAGS.gamma*target_actor_qs[:,1:,0], tf.expand_dims(losses, axis=-1)],
axis=-1
)
if FLAGS.over_edge_penalty:
bellman_losses += over_edge_losses
bellman_losses = supervision_ph * supervised_losses + (1 - supervision_ph) * bellman_losses
else:
bellman_losses = supervised_losses
if FLAGS.is_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
critic_losses = tf.reduce_mean( priority_weights_ph*unweighted_critic_losses )
else:
critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2 )
if FLAGS.is_biased_prioritized_replay:
unweighted_critic_losses = tf.reduce_mean( ( critic_qs[:,:,0] - bellman_losses )**2, axis=-1 )
if FLAGS.is_clipped_critic:
actor_qs = alrc(actor_qs)
if FLAGS.is_advantage_actor_critic:
actor_losses = tf.reduce_mean( supervised_losses - actor_qs[:,:,0] )
else:
actor_losses = tf.reduce_mean( actor_qs )
#critic_losses /= FLAGS.num_steps
#actor_losses /= FLAGS.num_steps
#Outputs to provide feedback for the developer
info = {
"actor_losses": actor_losses,
"critic_losses": critic_losses,
"generator_losses": tf.reduce_mean(generator_losses)
}
if FLAGS.is_prioritized_replay or FLAGS.is_biased_prioritized_replay:
info.update( {"priority_weights": unweighted_critic_losses} )
if FLAGS.is_self_competition:
info.update( {"unclipped_losses": unclipped_losses} )
outputs = {
"generation": generation[0,:,:,0],
"truth": replay_full_scans_ph[0,:,:,0],
"input": partial_scans_ph[0,:,:,0]
}
history_op = {
"actions": new_actions,
"observations": new_observations,
"full_scans": full_scans
}
if FLAGS.is_self_competition:
history_op.update( {"labels": labels} )
##Modify actor gradients
#[actor_grads] = tf.gradients(actor_losses, replay_actions_ph)
#actor_losses = overwrite_grads(actions, actor_grads)
start_iter = FLAGS.start_iter
train_iters = FLAGS.train_iters
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #Only use required GPU memory
#config.gpu_options.force_gpu_compatible = True
model_dir = FLAGS.model_dir
log_filepath = model_dir + "log.txt"
save_period = 1; save_period *= 3600
log_file = open(log_filepath, "a")
with tf.Session(config=config) as sess:
if FLAGS.is_target_actor:
if FLAGS.update_frequency <= 1:
update_target_critic_op = target_update_ops(target_actor, actor)
else:
update_target_critic_op = []
initial_update_target_critic_op = target_update_ops(target_actor, actor, decay=0)
else:
update_target_critic_op = []
initial_update_target_critic_op = []
if FLAGS.is_target_critic:
if FLAGS.update_frequency <= 1:
update_target_actor_op = target_update_ops(target_critic, critic)
else:
update_target_actor_op = []
initial_update_target_actor_op = target_update_ops(target_critic, critic, decay=0)
else:
update_target_actor_op = []
initial_update_target_actor_op = []
if FLAGS.is_target_generator and not FLAGS.is_infilled:
if FLAGS.update_frequency <= 1:
update_target_generator_op = target_update_ops(target_generator, generator, l2_norm=FLAGS.L2_norm)
else:
update_target_generator_op = []
initial_update_target_generator_op = target_update_ops(target_generator, generator, decay=0)
else:
update_target_generator_op = []
initial_update_target_generator_op = []
initial_update_target_network_ops = (
initial_update_target_actor_op +
initial_update_target_critic_op +
initial_update_target_generator_op
)
actor_lr = FLAGS.actor_lr
critic_lr = FLAGS.critic_lr
if FLAGS.is_cyclic_generator_learning_rate:
generator_lr = tf.placeholder(tf.float32, name="generator_lr")
else:
generator_lr = FLAGS.generator_lr
#critic_rep = (critic_qs[:,:,0] - bellman_losses)**2
#ps = [critic_qs[0,:,0], target_actor_qs[0,:,0], bellman_losses[0], critic_rep[0]]
#ps = [critic.trainable_variables[0], target_critic.trainable_variables[0]]
ps = []
#p = bellman_losses[0]
#p = generation[0,:,:,0]
train_op_dependencies = [tf.print(p) for p in ps] + tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if not FLAGS.update_frequency:
update_target_network_ops = (
update_target_actor_op +
update_target_critic_op +
update_target_generator_op
)
train_op_dependencies += update_target_network_ops
train_ops = []
with tf.control_dependencies(train_op_dependencies):
actor_train_op = tf.train.AdamOptimizer(learning_rate=actor_lr).minimize(
loss=actor_losses, var_list=actor.trainable_variables)
critic_train_op = tf.train.AdamOptimizer(learning_rate=critic_lr).minimize(
loss=critic_losses, var_list=critic.trainable_variables)
train_ops += [actor_train_op, critic_train_op]
if not FLAGS.is_infilled:
generator_train_op = tf.train.AdamOptimizer(learning_rate=generator_lr).minimize(
loss=generator_losses, var_list=generator.trainable_variables)
train_ops.append(generator_train_op)
else:
generator_train_op = tf.no_op()
feed_dict = {}
sess.run(tf.global_variables_initializer(), feed_dict=feed_dict)
saver = tf.train.Saver(max_to_keep=1)
noteable_saver = tf.train.Saver(max_to_keep=2)
if start_iter:
saver.restore(
sess,
tf.train.latest_checkpoint(model_dir+"model/")
)
else:
if len(initial_update_target_network_ops):
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
sess.run(train_iterator.initializer, feed_dict={train_data_ph: train_images})
sess.run(val_iterator.initializer, feed_dict={val_data_ph: val_images})
time0 = time.time()
for iter in range(start_iter, train_iters):
if iter < FLAGS.replay_size or not iter % FLAGS.avg_replays:
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
if FLAGS.is_noise_decay:
noise_decay = np.float32( (train_iters - iter)/train_iters )
feed_dict.update( {noise_decay_ph: noise_decay} )
history = sess.run(
history_op,
feed_dict=feed_dict)
replay.add(**history)
#Sample experiences from the replay
if FLAGS.is_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs, sampled_priority_weights = replay.get()
elif FLAGS.is_biased_prioritized_replay:
sampled_actions, sampled_observations, replay_sampled_full_scans, sample_idxs = replay.get()
elif FLAGS.is_self_competition:
sampled_actions, sampled_observations, replay_sampled_full_scans, sampled_labels, sampled_past_losses = replay.get()
else:
sampled_actions, sampled_observations, replay_sampled_full_scans = replay.get()
replay_partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
if not FLAGS.is_infilled:
sampled_full_scans = []
partial_scans = []
spiral_scans = []
for sampled_full_scan, partial_scan in zip(replay_sampled_full_scans, replay_partial_scans):
c = np.random.randint(0, 8)
sampled_full_scans.append( flip_rotate(sampled_full_scan, c) )
partial_scans.append( flip_rotate(partial_scan, c) )
if FLAGS.is_relative_to_spirals:
spiral_scan = spiral * sampled_full_scan
spiral_scans.append( flip_rotate(spiral_scan, c) )
sampled_full_scans = np.stack( sampled_full_scans )
partial_scans = np.stack( partial_scans )
else:
sampled_full_scans = replay_sampled_full_scans
partial_scans = replay_partial_scans
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(True)
}
if FLAGS.is_prioritized_replay:
feed_dict.update({priority_weights_ph: sampled_priority_weights})
if FLAGS.supervision_iters:
supervision = FLAGS.supervision_start + min(iter, FLAGS.supervision_iters)*(FLAGS.supervision_end-FLAGS.supervision_start) / FLAGS.supervision_iters
feed_dict.update( {supervision_ph: supervision } )
if FLAGS.is_self_competition:
feed_dict.update( {past_losses_ph: sampled_past_losses} )
if FLAGS.is_cyclic_generator_learning_rate:
envelope = 0.75**(iter/(train_iters//5))
cycle_half = train_iters//10
cycle_full = 2*cycle_half
cyclic_sawtooth = (max(iter%cycle_full, cycle_half) - min(iter%cycle_full - cycle_half, 0))/cycle_half
cyclic_lr = evenvelope*(0.1 + 0.9*cyclic_sawtooth)
feed_dict.update( {generator_lr: np.float32(cyclic_lr)} )
#Train
if iter in [0, 100, 500] or not iter % 25_000 or (0 <= iter < 10_000 and not iter % 1000) or iter == start_iter:
_, step_info, step_outputs = sess.run([train_ops, info, outputs], feed_dict=feed_dict)
for k in step_outputs:
save_loc = FLAGS.model_dir + k + str(iter)+".tif"
Image.fromarray( (0.5*step_outputs[k]+0.5).astype(np.float32) ).save( save_loc )
else:
_, step_info = sess.run([train_ops, info], feed_dict=feed_dict)
if FLAGS.update_frequency and not iter % FLAGS.update_frequency:
sess.run(initial_update_target_network_ops, feed_dict=feed_dict)
if FLAGS.is_prioritized_replay:
replay.update_priorities(sample_idxs, step_info["priority_weights"])
if FLAGS.is_self_competition:
replay.update_past_losses(sampled_labels, step_info["unclipped_losses"])
output = f"Iter: {iter}"
for k in step_info:
if k not in ["priority_weights", "unclipped_losses"]:
output += f", {k}: {step_info[k]}"
if not iter % FLAGS.report_freq:
print(output)
#if "nan" in output:
# saver.restore(
# sess,
# tf.train.latest_checkpoint(model_dir+"model/")
# )
try:
log_file.write(output)
except:
while True:
print("Issue writing log.")
time.sleep(1)
log_file = open(log_filepath, "a")
try:
log_file.write(output)
break
except:
continue
if iter in [train_iters//2-1, train_iters-1]:
noteable_saver.save(sess, save_path=model_dir+"noteable_ckpt/model", global_step=iter)
time0 = time.time()
start_iter = iter
elif time.time() >= time0 + save_period:
saver.save(sess, save_path=model_dir+"model/model", global_step=iter)
time0 = time.time()
val_losses_list = []
for iter in range(0, FLAGS.val_examples//FLAGS.batch_size):
#Add experiences to the replay
feed_dict = {is_training_ph: np.bool(True)}
sampled_actions, sampled_observations, sampled_full_scans = sess.run(
[val_actions, val_observations, val_full_scans],
feed_dict=feed_dict
)
partial_scans = construct_partial_scans(sampled_actions, sampled_observations)
feed_dict = {
replay_actions_ph: sampled_actions,
replay_observations_ph: sampled_observations,
replay_full_scans_ph: sampled_full_scans,
partial_scans_ph: partial_scans,
is_training_ph: np.bool(False)
}
val_losses = sess.run( unclipped_losses, feed_dict=feed_dict )
val_losses_list.append( val_losses )
val_losses = np.concatenate(tuple(val_losses_list), axis=0)
np.save(model_dir + "val_losses.npy", val_losses)
if __name__ == "__main__":
tf.app.run() |
py | 1a3b235d0c34fe1b140a767317d9765b2a4dfde3 | """
Reaction Result
===============
"""
class ReactionResult:
"""
The result of a reaction.
"""
__slots__ = [
'_new_atoms',
'_new_bonds',
'_deleted_atoms',
'_deleted_bonds',
]
def __init__(
self,
new_atoms,
new_bonds,
deleted_atoms,
deleted_bonds,
):
"""
Initialize a :class:`.ReactionResult` instance.
Parameters
----------
new_atoms : :class:`tuple` of :class:`.NewAtom`
The new atoms added by the reaction.
new_bonds : :class:`tuple` of :class:`.Bond`
The bonds added by the reaction.
deleted_atoms : :class:`tuple` of :class:`.Atom`
The atoms deleted by the reaction.
deleted_bonds : :class:`tuple` of :class:`.Bond`
The bonds deleted by the reaction.
"""
self._new_atoms = new_atoms
self._new_bonds = new_bonds
self._deleted_atoms = deleted_atoms
self._deleted_bonds = deleted_bonds
def get_new_atoms(self):
"""
Get the new atoms added by the reaction.
Returns
-------
:class:`tuple` of :class:`.NewAtom`
The new atoms added by the reaction.
"""
return self._new_atoms
def get_new_bonds(self):
"""
Get the new bonds added by the reaction.
Returns
-------
:class:`tuple` of :class:`.Bond`
The new bonds added by the reaction.
"""
return self._new_bonds
def get_deleted_atoms(self):
"""
Get the atoms deleted by the reaction.
Returns
-------
:class:`tuple` of :class:`.Atom`
The atoms deleted by the reaction.
"""
return self._deleted_atoms
def get_deleted_bonds(self):
"""
Get the bonds deleted by the reaction.
Returns
-------
:class:`tuple` of :class:`.Bond`
The bonds deleted by the reaction.
"""
return self._deleted_bonds
|
py | 1a3b248d12824fd1219217c90828902b57a86087 | import unittest
from time import sleep
from blockchain import Actor, Chain, Config
from blockchain.exceptions import ValidationError
class MyActorTesterOnTestNet(unittest.TestCase):
def setUp(self) -> None:
Config.test_net = True
Config.test_net_wallet_initial_coins = 100
Config.new_block_interval = 1
def test_create_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
tx = actor1.create_transaction(recipient=actor2.address, amount=10)
blockchain.add_transaction(tx.to_dict())
def test_forge_empty_block(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
block = actor1.forge_block()
blockchain.add_block(block.to_dict())
def test_forge_block_with_transaction(self):
blockchain = Chain()
forger = Actor(secret_key="forger_secret!", blockchain=blockchain)
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
tx = actor1.create_transaction(recipient=actor2.address, amount=10)
blockchain.add_transaction(tx.to_dict())
block = forger.forge_block()
blockchain.add_block(block.to_dict())
def test_transaction_to_self(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor1.address, amount=10)
blockchain.add_transaction(tx.to_dict())
def test_negative_amount_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor2.address, amount=-10)
blockchain.add_transaction(tx.to_dict())
def test_negative_fee_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor2.address, amount=10, fee=-1)
blockchain.add_transaction(tx.to_dict())
def test_zero_fee_transaction(self):
blockchain = Chain()
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
with self.assertRaises(ValidationError):
tx = actor1.create_transaction(recipient=actor2.address, amount=10, fee=0)
blockchain.add_transaction(tx.to_dict())
def test_chain_tx_counter(self):
blockchain = Chain()
forger = Actor(secret_key="forger_secret!", blockchain=blockchain)
sender = Actor(secret_key="super_secret1!", blockchain=blockchain)
recipient = Actor(secret_key="super_secret2!", blockchain=blockchain)
tx = sender.create_transaction(recipient=recipient.address, amount=10)
blockchain.add_transaction(tx.to_dict())
tx_counter_before = sender.chain_tx_counter
block = forger.forge_block()
blockchain.add_block(block.to_dict())
sleep(2) # wait for block creation
tx_counter_after = sender.chain_tx_counter
self.assertGreater(tx_counter_after, tx_counter_before)
def test_actor_balance(self):
blockchain = Chain()
forger = Actor(secret_key="forger_secret!", blockchain=blockchain)
actor1 = Actor(secret_key="super_secret1!", blockchain=blockchain)
actor2 = Actor(secret_key="super_secret2!", blockchain=blockchain)
transaction_amount = 10
transaction_fee = 1
before_transaction_actor2_balance = actor2.balance
before_transaction_actor1_balance = actor1.balance
before_transaction_forger_balance = forger.balance
tx = actor1.create_transaction(
recipient=actor2.address, amount=transaction_amount, fee=transaction_fee
)
blockchain.add_transaction(tx.to_dict())
block = forger.forge_block()
blockchain.add_block(block.to_dict())
sleep(
2
) # Wait until the block is added, we set the block adding interval to 1 in the setUp method
after_transaction_actor2_balance = actor2.balance
after_transaction_actor1_balance = actor1.balance
after_transaction_forger_balance = forger.balance
self.assertEqual(
before_transaction_actor1_balance - transaction_amount - transaction_fee,
after_transaction_actor1_balance,
)
self.assertEqual(
before_transaction_actor2_balance + transaction_amount,
after_transaction_actor2_balance,
)
self.assertEqual(
before_transaction_forger_balance + transaction_fee,
after_transaction_forger_balance,
)
|
py | 1a3b2497f4de59566ebb84fb2f50dceebe5dcd47 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
from warehouse import search
def test_es(monkeypatch):
search_obj = pretend.stub()
index_obj = pretend.stub(
doc_type=pretend.call_recorder(lambda d: None),
search=pretend.call_recorder(lambda: search_obj),
settings=pretend.call_recorder(lambda **kw: None),
)
index_cls = pretend.call_recorder(lambda name, using: index_obj)
monkeypatch.setattr(search, "Index", index_cls)
doc_types = [pretend.stub(), pretend.stub()]
client = pretend.stub()
request = pretend.stub(
registry={
"elasticsearch.client": client,
"elasticsearch.index": "warehouse",
"search.doc_types": doc_types,
},
)
es = search.es(request)
assert es is search_obj
assert index_cls.calls == [pretend.call("warehouse", using=client)]
assert index_obj.doc_type.calls == [pretend.call(d) for d in doc_types]
assert index_obj.settings.calls == [
pretend.call(
number_of_shards=1,
number_of_replicas=0,
refresh_interval="1s",
)
]
assert index_obj.search.calls == [pretend.call()]
|
py | 1a3b251a7d399890c57a47ac69b7b5bb181eee39 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
_uuid = uuidutils.generate_uuid
def gbp_attributes(func):
def inner(**kwargs):
attrs = func()
attrs.update(kwargs)
project_id = _uuid()
if 'prj' in func.__name__ or 'default' not in func.__name__ and (
'update' not in func.__name__):
attrs.update({'project_id': project_id, 'tenant_id': project_id})
return attrs
return inner
@gbp_attributes
def get_create_policy_target_default_attrs():
return {'name': '', 'description': '', 'policy_target_group_id': None,
'cluster_id': ''}
@gbp_attributes
def get_create_policy_target_attrs():
return {'name': 'ep1', 'policy_target_group_id': _uuid(),
'description': 'test policy_target',
'cluster_id': 'some_cluster_id'}
@gbp_attributes
def get_update_policy_target_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_application_policy_group_default_attrs():
return {'name': '', 'description': '', 'shared': False}
@gbp_attributes
def get_create_application_policy_group_attrs():
return {'name': 'apg1', 'tenant_id': _uuid(),
'description': 'test application_policy_group',
'shared': False}
@gbp_attributes
def get_update_application_policy_group_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_policy_target_group_default_attrs():
return {'name': '', 'description': '', 'l2_policy_id': None,
'application_policy_group_id': None,
'provided_policy_rule_sets': {},
'consumed_policy_rule_sets': {},
'network_service_policy_id': None, 'shared': False,
'service_management': False}
@gbp_attributes
def get_create_policy_target_group_attrs():
return {'name': 'ptg1',
'description': 'test policy_target group',
'l2_policy_id': _uuid(),
'application_policy_group_id': _uuid(),
'provided_policy_rule_sets': {_uuid(): None},
'consumed_policy_rule_sets': {_uuid(): None},
'network_service_policy_id': _uuid(),
'shared': False, 'service_management': False}
@gbp_attributes
def get_update_policy_target_group_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_l2_policy_default_attrs():
return {'name': '', 'description': '', 'shared': False,
'inject_default_route': True}
@gbp_attributes
def get_create_l2_policy_attrs():
return {'name': 'l2p1',
'description': 'test L2 policy', 'l3_policy_id': _uuid(),
'inject_default_route': True, 'shared': False}
@gbp_attributes
def get_update_l2_policy_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_l3_policy_default_attrs():
return {'name': '', 'description': '', 'ip_version': 4,
'ip_pool': '10.0.0.0/8', 'subnet_prefix_length': 24,
'external_segments': {}, 'shared': False}
@gbp_attributes
def get_create_l3_policy_attrs():
return {'name': 'l3p1',
'description': 'test L3 policy', 'ip_version': 6,
'ip_pool': 'fd01:2345:6789::/48',
'external_segments': {_uuid(): ['192.168.0.3']},
'subnet_prefix_length': 64, 'shared': False}
@gbp_attributes
def get_update_l3_policy_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_policy_action_default_attrs():
return {'name': '',
'description': '',
'action_type': 'allow',
'action_value': None,
'shared': False}
@gbp_attributes
def get_create_policy_action_attrs():
return {'name': 'pa1',
'description': 'test policy action',
'action_type': 'redirect',
'action_value': _uuid(),
'shared': False}
@gbp_attributes
def get_update_policy_action_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_policy_classifier_default_attrs():
return {'name': '',
'description': '',
'protocol': None,
'port_range': None,
'direction': None,
'shared': False}
@gbp_attributes
def get_create_policy_classifier_attrs():
return {'name': 'pc1',
'description': 'test policy classifier',
'protocol': 'tcp',
'port_range': '100:200',
'direction': 'in',
'shared': False}
@gbp_attributes
def get_update_policy_classifier_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_policy_rule_default_attrs():
return {'name': '',
'description': '',
'enabled': True,
'policy_actions': [],
'shared': False}
@gbp_attributes
def get_create_policy_rule_attrs():
return {'name': 'pr1',
'description': 'test policy rule',
'enabled': True,
'policy_classifier_id': _uuid(),
'policy_actions': [_uuid()],
'shared': False}
@gbp_attributes
def get_update_policy_rule_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_policy_rule_set_default_attrs():
return {'name': '',
'description': '',
'child_policy_rule_sets': [],
'policy_rules': [],
'shared': False}
@gbp_attributes
def get_create_policy_rule_set_attrs():
return {'name': 'policy_rule_set1',
'description': 'test policy_rule_set',
'child_policy_rule_sets': [_uuid()],
'policy_rules': [_uuid()],
'shared': False}
@gbp_attributes
def get_update_policy_rule_set_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_network_service_policy_default_attrs():
return {'name': '', 'description': '',
'network_service_params': [], 'shared': False}
@gbp_attributes
def get_create_network_service_policy_attrs():
return {'name': 'nsp1',
'shared': False,
'description': 'test Net Svc Policy',
'network_service_params': [{'type': 'ip_single', 'name': 'vip',
'value': 'self_subnet'}]}
@gbp_attributes
def get_update_network_service_policy_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_external_policy_default_attrs():
return {'name': '', 'description': '',
'external_segments': [],
'provided_policy_rule_sets': {},
'consumed_policy_rule_sets': {},
'shared': False}
@gbp_attributes
def get_create_external_policy_attrs():
return {'name': 'ep1',
'description': 'test ep',
'external_segments': [_uuid()],
'provided_policy_rule_sets': {_uuid(): None},
'consumed_policy_rule_sets': {_uuid(): None},
'shared': False}
@gbp_attributes
def get_update_external_policy_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_external_segment_default_attrs():
return {'name': '', 'description': '',
'external_routes': [],
'ip_version': 4,
'cidr': '172.16.0.0/12',
'port_address_translation': False,
'shared': False}
@gbp_attributes
def get_create_external_segment_attrs():
return {'name': 'es1',
'description': 'test ep',
'external_routes': [{'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'}],
'cidr': '192.168.0.0/24',
'ip_version': 4, 'port_address_translation': True,
'shared': False}
@gbp_attributes
def get_update_external_segment_attrs():
return {'name': 'new_name'}
@gbp_attributes
def get_create_nat_pool_default_attrs():
return {'name': '', 'description': '',
'external_segment_id': None, 'ip_version': 4,
'ip_pool': '172.16.0.0/16',
'shared': False}
@gbp_attributes
def get_create_nat_pool_attrs():
return {'name': 'es1',
'description': 'test ep',
'ip_version': 4,
'ip_pool': '172.16.0.0/16',
'external_segment_id': _uuid(),
'shared': False}
@gbp_attributes
def get_update_nat_pool_attrs():
return {'name': 'new_name'}
# Service Chain
@gbp_attributes
def get_create_service_profile_default_attrs():
return {'name': '', 'description': ''}
@gbp_attributes
def get_create_service_profile_attrs():
return {
'name': 'serviceprofile1',
'service_type': 'FIREWALL',
'description': 'test service profile',
}
@gbp_attributes
def get_update_service_profile_attrs():
return {
'name': 'new_name',
}
@gbp_attributes
def get_create_servicechain_node_default_attrs():
return {
'name': '',
'description': '',
'config': '{}',
'service_type': None,
'shared': False,
}
@gbp_attributes
def get_create_servicechain_node_attrs():
return {
'name': 'servicechain1',
'service_profile_id': _uuid(),
'description': 'test servicechain node',
'config': '{}',
'service_type': None,
'shared': True,
}
@gbp_attributes
def get_update_servicechain_node_attrs():
return {
'name': 'new_name',
'config': 'new_config',
}
@gbp_attributes
def get_create_servicechain_spec_default_attrs():
return {
'name': '',
'description': '',
'nodes': [],
'shared': False,
}
@gbp_attributes
def get_create_servicechain_spec_attrs():
return {
'name': 'servicechainspec1',
'nodes': [_uuid(), _uuid()],
'description': 'test servicechain spec',
'shared': True,
}
@gbp_attributes
def get_update_servicechain_spec_attrs():
return {
'name': 'new_name',
'nodes': [_uuid()]
}
@gbp_attributes
def get_create_servicechain_instance_default_attrs():
return {'name': '', 'description': '', 'config_param_values': "{}"}
@gbp_attributes
def get_create_servicechain_instance_attrs():
return {
'name': 'servicechaininstance1',
'servicechain_specs': [_uuid()],
'provider_ptg_id': _uuid(),
'consumer_ptg_id': _uuid(),
'management_ptg_id': _uuid(),
'classifier_id': _uuid(),
'config_param_values': "{}",
'description': 'test servicechain instance'
}
def get_update_servicechain_instance_attrs():
return {
'name': 'new_name',
'servicechain_specs': [_uuid()],
'classifier_id': _uuid()
}
@gbp_attributes
def get_create_application_policy_group_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'shared': False}
@gbp_attributes
def get_create_policy_target_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'policy_target_group_id': None,
'cluster_id': ''}
@gbp_attributes
def get_create_policy_target_group_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'l2_policy_id': None,
'application_policy_group_id': None,
'provided_policy_rule_sets': {},
'consumed_policy_rule_sets': {},
'network_service_policy_id': None, 'shared': False,
'service_management': False}
@gbp_attributes
def get_create_l2_policy_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'shared': False,
'inject_default_route': True}
@gbp_attributes
def get_create_l3_policy_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'ip_version': 4,
'ip_pool': '10.0.0.0/8', 'subnet_prefix_length': 24,
'external_segments': {}, 'shared': False}
@gbp_attributes
def get_create_policy_action_default_attrs_and_prj_id():
return {'name': '',
'description': '',
'action_type': 'allow',
'action_value': None,
'shared': False}
@gbp_attributes
def get_create_policy_classifier_default_attrs_and_prj_id():
return {'name': '',
'description': '',
'protocol': None,
'port_range': None,
'direction': None,
'shared': False}
@gbp_attributes
def get_create_policy_rule_default_attrs_and_prj_id():
return {'name': '',
'description': '',
'enabled': True,
'policy_actions': [],
'shared': False}
@gbp_attributes
def get_create_policy_rule_set_default_attrs_and_prj_id():
return {'name': '',
'description': '',
'child_policy_rule_sets': [],
'policy_rules': [],
'shared': False}
@gbp_attributes
def get_create_network_service_policy_default_attrs_and_prj_id():
return {'name': '', 'description': '',
'network_service_params': [], 'shared': False}
@gbp_attributes
def get_create_external_policy_default_attrs_and_prj_id():
return {'name': '', 'description': '',
'external_segments': [],
'provided_policy_rule_sets': {},
'consumed_policy_rule_sets': {},
'shared': False}
@gbp_attributes
def get_create_external_segment_default_attrs_and_prj_id():
return {'name': '', 'description': '',
'external_routes': [],
'ip_version': 4,
'cidr': '172.16.0.0/12',
'port_address_translation': False,
'shared': False}
@gbp_attributes
def get_create_nat_pool_default_attrs_and_prj_id():
return {'name': '', 'description': '',
'external_segment_id': None, 'ip_version': 4,
'ip_pool': '172.16.0.0/16',
'shared': False}
# Service Chain
@gbp_attributes
def get_create_service_profile_default_attrs_and_prj_id():
return {'name': '', 'description': ''}
@gbp_attributes
def get_create_servicechain_node_default_attrs_and_prj_id():
return {
'name': '',
'description': '',
'config': '{}',
'service_type': None,
'shared': False,
}
@gbp_attributes
def get_create_servicechain_spec_default_attrs_and_prj_id():
return {
'name': '',
'description': '',
'nodes': [],
'shared': False,
}
@gbp_attributes
def get_create_servicechain_instance_default_attrs_and_prj_id():
return {'name': '', 'description': '', 'config_param_values': "{}"}
def get_resource_plural(resource):
if resource.endswith('y'):
resource_plural = resource.replace('y', 'ies')
else:
resource_plural = resource + 's'
return resource_plural
|
py | 1a3b2559bcaac14edf74d8f3724d5588526b0fd4 | import pickle
import sys
import requests
from getpass import getpass
from bs4 import BeautifulSoup
import itertools
import os
import sqlite3
import hashlib
from pathlib import Path
fullpath = sys.argv[0]
pathspc = os.path.dirname(fullpath)
data = dict()
dirpath = sys.argv[1] # Gives the absolutepath of directory
obsdirpathlist = dirpath.split('/')[:-1]
obsdirpath = str()
for i in obsdirpathlist:
obsdirpath = obsdirpath + i + '/' # this is observing dir full path it includes slash at the end and beginning
observing_dir = dirpath.split("/")[-1] # this is observing directory eg.outlab30
try:
print("Reading User information ...", end=" ")
f = open(pathspc+'/spc_user_data', 'rb')
data = pickle.load(f)
print("done")
except IOError:
print(red+"Authentication credentials not found"+end_c)
u = input("Enter Username:")
p = getpass("Enter Password:")
data['username'] = u
data['password'] = p
url = input("Enter Server URL:")
if url[len(url)-1] != '/':
url = url + '/'
data['url'] = url
save = input('Would you like to save the configuration? [y/n]:')
if save == 'y' or save == 'Y':
f = open(pathspc+'/spc_user_data','wb')
pickle.dump(data,f)
print("User credentials updated")
f.close()
base_url = data['url']
url = base_url + 'login/'
def md5sum(filename, blocksize=65536):
hash = hashlib.md5()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
def listsmd5unequal(ldict,sdict): # dicts of filenames and md5s
unequalmd5s=[]
for i in ldict:
if ldict[i] != sdict[i] :
unequalmd5s.append(i)
return unequalmd5s
def ifstr1startswithstr2(str1,str2):
if(str1.split('/')[0]==str2):
return True
else:
return False
def falselist(n):
l = []
for i in range(0,n):
l.append("False")
return l
def truelist(n):
l = []
for i in range(0,n):
l.append("True")
return l
insync = str()
empty = []
red = '\033[91m'
green = '\033[92m'
end_c = '\033[0m'
files=dict()
var3 = str()
list777 = []
list77 = []
boollist=[]
for path, subdirs, files in os.walk(dirpath):
for filename in files:
f2 = os.path.join(path, filename)
list777.append(f2)
for i in list777:
serpath = i.replace(obsdirpath,"")
list77.append(serpath)
#this list consists of filepaths same as that of in server
list7 = []
data = dict()
try:
print("Reading User information ...", end=" ")
f = open(pathspc+'/spc_user_data', 'rb')
data = pickle.load(f)
print("done")
except IOError:
print("Authentication credentials not found")
u = input("Enter Username:")
p = getpass("Enter Password:")
data['username'] = u
data['password'] = p
url = input("Enter Server URL:")
if url[len(url)-1] != '/':
url = url + '/'
data['url'] = url
save = input('Would you like to save the configuration? [y/n]:')
if save == 'y' or save == 'Y':
f = open(pathspc+'/spc_user_data','wb')
pickle.dump(data,f)
print("User credentials updated")
f.close()
base_url = data['url']
url = base_url+'login/'
client = requests.session()
try:
print("connecting to server ...", end=" ")
client.get(url)
print("done")
except requests.ConnectionError as e:
print("The following error occured connecting to the server: {}\n Please try again".format(e))
client.close()
sys.exit()
try:
csrf = client.cookies['csrftoken']
except():
print("Error obtaining csrf token")
client.close()
sys.exit()
payload = dict(username=data['username'], password=data['password'], csrfmiddlewaretoken=csrf, next='/upload_file/')
try:
print("Sending request ...")
r = client.post(url, data=payload, headers=dict(Referer=url))
r.raise_for_status()
if r.status_code == 200:
print("Request sent ...")
if r.url == url:
print("User authentication failed. Please try again")
client.close()
sys.exit()
print("Reading files ...")
r1 = client.get(base_url)
soup = BeautifulSoup(r1.text, 'html.parser')
productDivs = soup.findAll(attrs = {"id" : "filepath"})
productDivs2 = soup.findAll('a', attrs = {"id" : "filename"})
productDivs3 = soup.findAll(attrs = {"id" : "md5sum"})
productDivs5 = soup.findAll(attrs = {"id" : "deletefile"})
productDivs6 = soup.findAll('a', attrs = {"id" : "startsync"})
productDivs7 = soup.findAll('a', attrs = {"id" : "stopsync"})
md5list={}
for link,l in zip(productDivs5,productDivs3):
pathinserver = link.string
if(ifstr1startswithstr2(pathinserver,observing_dir)==True):
list7.append(pathinserver)
md5list[pathinserver] = l.string.split()[1] #dict of filenames and md5sums of all files in server
insync = productDivs6[0].string
if insync=="True":
print(red+"Sync is going on in some other device.Please wait for sometime."+end_c)
sys.exit()
elif insync=="False":
try:
var = base_url+productDivs6[0]['href']
client.get(var,allow_redirects=True)
except() as e:
print("Error connecting: {}".format(e))
except() as e:
print("Error connecting: {}".format(e))
def download(listoffiles):
for lit in listoffiles:
for link,li in zip(productDivs2,productDivs):
if (li.string.split()[2]+link.string == lit):
var = base_url[:-1] + link['href']
try:
r2 = client.get(var, allow_redirects=True)
filep = str()
filen = obsdirpath+lit #absolute file path in local
l = filen.split('/')[1:-1]
for i in l:
filep = filep + '/' + i
path = Path(filep)
path.mkdir(parents=True, exist_ok=True)
f1 = open(filen, 'wb')
f1.write(r2.content)
f1.close()
except() as e:
print("Error connecting: {}".format(e))
def upload(listoffiles):
for lit in listoffiles:
l = lit.split('/')
l.pop()
fil = str()
for i in l:
fil = fil+i+"/"
filepath = fil
files = {'document': open(obsdirpath+lit, 'rb')}
try:
r2 = client.post(base_url+'upload_file/', data={'filepath': filepath, 'csrfmiddlewaretoken': r.cookies['csrftoken']}, files=files)
if r2.url == base_url:
print("File upload successful")
else:
print("An error occured")
except() as e:
print("error posting file: {}".format(e))
try:
serverset = set(list7)
localset = set(list77)
localmd5list = {}
inser = list(serverset - localset)
inloc = list(localset - serverset)
inboth = list(localset.intersection(serverset))
for i in inboth:
localmd5list[i] = md5sum(i)
inboth_difmd = listsmd5unequal(localmd5list,md5list)
print()
if (serverset == localset) and len(inboth_difmd) == 0:
print(green+"The directory is same in both client and server"+end_c)
else:
print("The observed directory have some differences from the files in cloud")
print(green+"Files in cloud not in the local directory :"+end_c)
print(inser)
print()
print(green+"Files in the local directory that are not in the cloud :"+end_c)
print(inloc)
print()
print("Choose one of the below options: ")
print("1. Change the directory of local as that of in cloud")
print("2. Change the files in cloud same as that of in local")
print("3. Merge both the local and files in cloud")
print("4. Don't change anything")
x = input()
if(int(x)==1):
for lit in inloc:
os.remove(obsdirpath+lit)
download(inser+inboth_difmd)
print(green+"Done"+end_c)
elif(int(x)==2):
for lit in inser:
for link in productDivs5:
if(link.string == lit):
var = base_url[:-1]+link['href']
try:
r2 = client.get(var, allow_redirects=True)
print("Deleting ...", end=" ")
print(green+"done"+end_c)
except() as e:
print(red+"Error connecting: {}".format(e)+end_c)
upload(inloc+inboth_difmd)
print(green+"Done"+end_c)
elif(int(x)==3):
download(inser)
upload(inloc)
if len(inboth_difmd) != 0:
print("There are some files in the server different from that of local files with same name.Please let us know what to do by choosing one of the three options:")
print(green+"Files present in both cloud and local directory but with different file content :"+end_c)
print(inboth_difmd)
print()
print("1.Download all the files from server")
print("2.Upload all the local fles to server")
print("3.Chose manually what to do with each file")
inp = input()
if inp == '1':
download(inboth_difmd)
elif inp == '2':
upload(inboth_difmd)
elif inp == '3':
for lit in inboth_difmd:
print("1.Download the file from server")
print("2.Upload the local file to server")
inp == input()
if inp == '1':
for link,li in zip(productDivs2,productDivs):
if (li.string.split()[2]+link.string == lit):
var = base_url[:-1] + link['href']
try:
r2 = client.get(var, allow_redirects=True)
filep = str()
filen = obsdirpath+lit #absolute file path in local
l = filen.split('/')[1:-1]
for i in l:
filep = filep + '/' + i
path = Path(filep)
path.mkdir(parents=True, exist_ok=True)
f1 = open(filen, 'wb')
f1.write(r2.content)
f1.close()
except() as e:
print("Error connecting: {}".format(e))
elif inp == '2':
l = lit.split('/')
l.pop()
for i in l:
fil = fil+i+"/"
filepath = fil
files = {'document': open(obsdirpath+lit, 'rb')}
try:
r2 = client.post(base_url+'upload_file/', data={'filepath': filepath, 'csrfmiddlewaretoken': r.cookies['csrftoken']}, files=files)
if r2.url == base_url:
print("File upload successful")
else:
print("An error occured")
except() as e:
print("error posting file: {}".format(e))
print("Done")
else:
print("Nothing is changed")
try:
var = base_url+productDivs7[0]['href']
client.get(var,allow_redirects=True)
except() as e:
print("Error connecting: {}".format(e))
except requests.exceptions.HTTPError as e:
print('HTTP Error: {}'.format(e))
except requests.exceptions.RequestException as e:
print('Connection Error: {}'.format(e))
client.close()
sys.exit()
client.close() |
py | 1a3b273a6b703dcc42edade61e945b3caf142528 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate deeplesion like files, smaller and with random data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import zipfile
from absl import app
from absl import flags
import tensorflow as tf
import random
import tempfile
import numpy as np
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.utils import py_utils
from tensorflow_datasets.testing import fake_data_utils
flags.DEFINE_string('tfds_dir', py_utils.tfds_dir(),
'Path to tensorflow_datasets directory')
FLAGS = flags.FLAGS
def _output_dir():
return os.path.join(FLAGS.tfds_dir, 'testing', 'test_data',
'fake_examples', 'deeplesion')
MIN_HEIGHT_WIDTH = 10
MAX_HEIGHT_WIDTH = 15
CHANNELS_NB = 1
def get_random_picture(height=None, width=None, channels=CHANNELS_NB):
"""Returns random picture as np.ndarray (int)."""
height = height or random.randrange(MIN_HEIGHT_WIDTH, MAX_HEIGHT_WIDTH)
width = width or random.randrange(MIN_HEIGHT_WIDTH, MAX_HEIGHT_WIDTH)
return np.random.randint(
32768, size=(height, width, channels), dtype=np.uint16)
def get_random_png(height=None, width=None, channels=CHANNELS_NB):
"""Returns path to PNG picture."""
# Big randomly generated pngs take large amounts of diskspace.
# Instead, we resize a 4x4 random image to the png size.
image = get_random_picture(4, 4, channels)
image = tf.compat.v1.image.resize_nearest_neighbor(
tf.expand_dims(image, 0), (height, width))[0]
png = tf.image.encode_png(image)
fobj = tempfile.NamedTemporaryFile(delete=False, mode='wb', suffix='.PNG')
fobj.write(png.numpy())
fobj.close()
return fobj.name
def _generate_data(num_zipfiles, num_images):
"""Generate images archive."""
paths = []
for i in range(1, num_zipfiles+1):
paths.append(os.path.join(_output_dir(), 'zipfile{:02d}.zip'.format(i)))
idx = 0
for p in paths:
idx += 1
print('Following files will be zipped in {}:'.format(p))
with zipfile.ZipFile(p, "w") as image_zip:
# Generate images
for i in range(0, num_images):
png = get_random_png(512, 512)
image_name = os.path.join("Images_png", "{:06d}_01_01".format(idx),
"{:03d}.png".format(i))
image_zip.write(png, image_name, zipfile.ZIP_DEFLATED)
print(image_name)
print('All files zipped successfully!')
return paths
def _generate_csv():
# Generate annotations
csv_dir = _output_dir()
assert tf.io.gfile.exists(csv_dir), 'Oops, base_folder not exist'
ann_file = os.path.join(csv_dir, 'fake_DL_info.csv')
ann_info = \
[['File_name','Patient_index','Study_index','Series_ID','Key_slice_index','Measurement_coordinates','Bounding_boxes','Lesion_diameters_Pixel_','Normalized_lesion_location','Coarse_lesion_type','Possibly_noisy','Slice_range','Spacing_mm_px_','Image_size','DICOM_windows','Patient_gender','Patient_age','Train_Val_Test'],
['000001_01_01_000.png',1,1,1,0,"233.537, 95.0204, 234.057, 106.977, 231.169, 101.605, 236.252, 101.143","226.169, 90.0204, 241.252, 111.977","11.9677, 5.10387","0.44666, 0.283794, 0.434454",3,0,"103, 115","0.488281, 0.488281, 5","512, 512","-175, 275",'F',62,1],
['000001_01_01_001.png',1,1,1,1,"224.826, 289.296, 224.016, 305.294, 222.396, 297.194, 228.978, 297.903","217.396, 284.296, 233.978, 310.294","16.019, 6.61971","0.431015, 0.485238, 0.340745",3,0,"8, 23","0.314453, 0.314453, 5","512, 512","-175, 275",'F',72,1],
['000001_01_01_002.png',1,1,1,2,"272.323, 320.763, 246.522, 263.371, 234.412, 305.494, 280.221, 288.118","229.412, 258.371, 285.221, 325.763","62.9245, 48.9929","0.492691, 0.503106, 0.351754",3,0,"8, 23","0.314453, 0.314453, 5","512, 512","-175, 275",'F',72,1],
['000001_01_01_003.png',1,1,1,3,"257.759, 157.618, 260.018, 133.524, 251.735, 145.571, 265.288, 146.841","246.735, 128.524, 270.288, 162.618","24.1998, 13.6123","0.498999, 0.278924, 0.452792",3,0,"58, 118","0.732422, 0.732422, 1","512, 512","-175, 275",'F',73,1],
['000001_01_01_004.png',1,1,1,4,"304.019, 230.585, 292.217, 211.789, 304.456, 218.783, 296.151, 223.998","287.217, 206.789, 309.456, 235.585","22.1937, 9.8065","0.572678, 0.42336, 0.445674",3,0,"11, 23","0.666016, 0.666016, 5","512, 512","-175, 275",'F',73,1],
['000002_01_01_000.png',2,1,1,0,"238.314, 261.228, 235.858, 268.594, 240.36, 265.729, 234.222, 262.865","229.222, 256.228, 245.36, 273.594","7.76388, 6.77335","0.437715, 0.573812, 0.609054",2,0,"156, 168","0.859375, 0.859375, 5","512, 512","-175, 275",'F',51,2],
['000002_01_01_001.png',2,1,1,1,"275.758, 191.194, 261.137, 190.799, 269.83, 185.662, 269.83, 195.541","256.137, 180.662, 280.758, 200.541","14.6261, 9.87891","0.508777, 0.438113, 0.66217",2,0,"170, 182","0.859375, 0.859375, 5","512, 512","-175, 275",'F',51,2],
['000002_01_01_002.png',2,1,1,2,"240.988, 215.969, 228.479, 219.186, 235.984, 223.475, 232.41, 212.395","223.479, 207.395, 245.988, 228.475","12.9166, 11.6422","0.43167, 0.47806, 0.702035",2,0,"44, 83","0.976562, 0.976562, 5","512, 512","-175, 275",'F',59,2],
['000002_01_01_003.png',2,1,1,3,"313.615, 261.093, 293.88, 259.183, 302.156, 253.135, 300.564, 269.051","288.88, 248.135, 318.615, 274.051","19.8278, 15.9952","0.596974, 0.57036, 0.60468",2,0,"44, 83","0.976562, 0.976562, 5","512, 512","-175, 275",'F',59,2],
['000002_01_01_004.png',2,1,1,4,"289.383, 205.23, 277.907, 202.448, 285.21, 198.623, 283.819, 209.055","272.907, 193.623, 294.383, 214.055","11.8077, 10.5244","0.536447, 0.458577, 0.661835",2,0,"44, 83","0.976562, 0.976562, 5","512, 512","-175, 275",'F',59,2],
['000003_01_01_000.png',3,1,1,0,"222.361, 259.958, 214.941, 273.809, 222.856, 269.851, 213.456, 264.41","208.456, 254.958, 227.856, 278.809","15.7138, 10.8607","0.395444, 0.586444, 0.612088",2,0,"44, 83","0.976562, 0.976562, 5","512, 512","-175, 275",'F',59,3],
['000003_01_01_001.png',3,1,1,1,"324.745, 261.451, 270.106, 260.369, 301.483, 249.008, 300.915, 277.68","265.106, 244.008, 329.745, 282.68","54.6491, 28.6773","0.560316, 0.501742, 0.690962",2,0,"35, 47","0.976562, 0.976562, 5","512, 512","-175, 275",'F',60,3],
['000003_01_01_002.png',3,1,1,2,"357.938, 289.428, 364.226, 314.912, 367.536, 300.35, 350.988, 305.976","345.988, 284.428, 372.536, 319.912","26.2489, 17.4787","0.69148, 0.624435, 0.616547",-1,0,"4, 28","0.488281, 0.488281, 2.5","512, 512","-175, 275",'F',19,3],
['000003_01_01_003.png',3,1,1,3,"357.938, 289.428, 364.226, 314.912, 367.536, 300.35, 350.988, 305.976","345.988, 284.428, 372.536, 319.912","26.2489, 17.4787","0.69148, 0.624435, 0.616547",-1,0,"4, 28","0.488281, 0.488281, 2.5","512, 512","-175, 275",'F',19,3],
['000003_01_01_004.png',3,1,1,4,"357.938, 289.428, 364.226, 314.912, 367.536, 300.35, 350.988, 305.976","345.988, 284.428, 372.536, 319.912","26.2489, 17.4787","0.69148, 0.624435, 0.616547",-1,0,"4, 28","0.488281, 0.488281, 2.5","512, 512","-175, 275",'F',19,3],
]
with tf.io.gfile.GFile(ann_file,'w') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
for line in ann_info:
writer.writerow(line)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
dirs = _generate_data(3, 5)
_generate_csv()
if __name__ == '__main__':
app.run(main)
|
py | 1a3b27b254787cb61cbd82181af1cc3b3f195645 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLuigi(PythonPackage):
"""Workflow mgmgt + task scheduling + dependency resolution"""
homepage = "https://github.com/spotify/luigi"
pypi = "luigi/luigi-2.8.3.tar.gz"
version('2.8.3', sha256='8b5c84a3c3f4df07309056d3b98348b93c054f1931b7ee22fc29e7989f645c9e')
depends_on('[email protected]:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:4.99', type=('build', 'run'))
depends_on('py-python-daemon@:2.1', type=('build', 'run'))
depends_on('[email protected]', when='@2.8.3:', type=('build', 'run'))
|
py | 1a3b28899ebc08558b4afd0b41bfb44f9e8d0d54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes and utilities that manage spectral model specific to diffuse analyses
"""
from __future__ import absolute_import, division, print_function
import yaml
class SpectralLibrary(object):
""" A small helper class that serves as an alias dictionary for spectral models
"""
def __init__(self, spectral_dict):
"""C'tor, loads the dictionary
"""
self.spectral_dict = spectral_dict
def update(self, spectral_dict):
"""Update the dictionary """
self.spectral_dict.update(spectral_dict)
def __getitem__(self, key):
"""Get an item from the dictionary """
return self.spectral_dict.get(key, {})
@classmethod
def create_from_yamlstr(cls, yamlstr):
"""Create the dictionary for a yaml file
"""
spectral_dict = yaml.load(yamlstr)
return cls(spectral_dict)
@classmethod
def create_from_yaml(cls, yamlfile):
"""Create the dictionary for a yaml file
"""
return cls.create_from_yamlstr(open(yamlfile))
|
py | 1a3b29c5c2d92f90cb6da369d4f2d64117120207 | from django.contrib.admin.views.decorators import staff_member_required
from django.urls import path
from .views import ImageUploadView, LinkToolView
urlpatterns = [
path(
'image_upload/',
staff_member_required(ImageUploadView.as_view()),
name='editorjs_image_upload',
),
path(
'linktool/',
staff_member_required(LinkToolView.as_view()),
name='editorjs_linktool',
),
]
|
py | 1a3b2afb95180ee068a27a73ffa81d6c5496dd72 | '''
validate survey simulations using CMX data.
updates
-------
* 5/19/2020: created script and test to compare which wavelength range I should
use for the exposure time correction factor
'''
import os
import h5py
import fitsio
import numpy as np
import astropy.units as u
# -- feasibgs --
from feasibgs import util as UT
from feasibgs import catalogs as Cat
from feasibgs import forwardmodel as FM
# -- desihub --
import desispec.io
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
if 'NERSC_HOST' not in os.environ:
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
dir = '/global/cscratch1/sd/chahah/feasibgs/cmx/survey_sims/'
def validate_spectral_pipeline():
''' validate the spectral pipeline by
1. constructing spectra from fiber acceptance fraction scaled smoothed CMX
spectra with CMX sky surface brightness
2. compare noise levels to CMX observations
'''
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 70502
date = 20200225
expid = 52113
ispec = 0
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# get sky surface brightness by correcting for the throughput on the CMX
# sky data
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# exposure time
_frame = desispec.io.read_frame(f_sky('b').replace('sky-', 'frame-'))
exptime = _frame.meta['EXPTIME']
print('exp.time = %.fs' % exptime)
# get which are good fibers from coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print('fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print('airmass = %.2f' % airmass)
# select BGS spectra
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
coadd_ivar = fitsio.read(f_coadd, ext=4)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
igals = np.random.choice(np.arange(len(gal_cut))[gal_cut], size=5,
replace=False)
igals = np.arange(len(coadd['FIBER']))[coadd['FIBER'] == 143]
for igal in igals:
# source flux is the smoothed CMX spetra
source_flux = np.clip(np.interp(wave, coadd_wave,
medfilt(coadd_flux[igal,:], 101)), 0, None)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave,
np.atleast_2d(source_flux * fibloss), # scale by fiber acceptance fraction
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=None)
# barebone specsim pipeline for comparison
from specsim.simulator import Simulator
desi = Simulator(config, num_fibers=1)
desi.observation.exposure_time = exptime * u.s
desi.atmosphere._surface_brightness_dict[desi.atmosphere.condition] = \
np.interp(desi.atmosphere._wavelength, wave, sky_bright) * \
desi.atmosphere.surface_brightness.unit
desi.atmosphere._extinct_emission = False
desi.atmosphere._moon = None
desi.atmosphere.airmass = airmass # high airmass
desi.simulate(source_fluxes=np.atleast_2d(source_flux) * 1e-17 * desi.simulated['source_flux'].unit,
fiber_acceptance_fraction=np.tile(fibloss,
np.atleast_2d(source_flux).shape))
random_state = np.random.RandomState(0)
desi.generate_random_noise(random_state, use_poisson=True)
scale=1e17
waves, fluxes, ivars, ivars_electron = [], [], [], []
for table in desi.camera_output:
_wave = table['wavelength'].astype(float)
_flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
_flux = _flux * scale
_ivar = table['flux_inverse_variance'].T.astype(float)
_ivar = _ivar / scale**2
waves.append(_wave)
fluxes.append(_flux[0])
ivars.append(_ivar[0])
fig = plt.figure(figsize=(15,10))
sub = fig.add_subplot(211)
sub.plot(coadd_wave, coadd_flux[igal,:] * fibloss, c='C0', lw=1,
label='(coadd flux) x (fib.loss)')
for i_b, band in enumerate(['b', 'r', 'z']):
lbl = None
if band == 'b': lbl = 'spectral sim.'
sub.plot(bgs.wave[band], bgs.flux[band][0], c='C1', lw=1,
label=lbl)
sub.plot(waves[i_b], fluxes[i_b] *fibloss, c='C2', lw=1, ls=':')
sub.plot(wave, source_flux * fibloss, c='k', lw=1, ls='--',
label='source flux')
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlim(3600, 9800)
sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-1., 5.)
sub = fig.add_subplot(212)
sub.plot(coadd_wave, coadd_ivar[igal,:] * fibloss**-2, c='C0', lw=1,
label=r'(coadd ivar) / (fib.loss$)^2$')
for i_b, band in enumerate(['b', 'r', 'z']):
sub.plot(bgs.wave[band], bgs.ivar[band][0], c='C1', lw=1)
sub.plot(waves[i_b], ivars[i_b] * fibloss**-2, c='C2', lw=1, ls=':')
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlabel('wavelength [$A$]', fontsize=20)
sub.set_xlim(3600, 9800)
sub.set_ylabel('ivar', fontsize=25)
sub.set_ylim(0., None)
fig.savefig(os.path.join(dir, 'valid.spectral_pipeline.exp%i.%i.png' % (expid, igal)),
bbox_inches='tight')
return None
def validate_spectral_pipeline_GAMA_source():
''' compare the fiber flux scaled source spectra from spectral simulations
pipeline to fiber loss corrected cframes CMX data for overlapping GAMA G12
galaxies.
'''
import glob
from scipy.signal import medfilt
from scipy.interpolate import interp1d
from desitarget.cmx import cmx_targetmask
from pydl.pydlutils.spheregroup import spherematch
np.random.seed(0)
tileid = 70502 #[66014, 70502] #66014 is with low transparency
date = 20200225
expids = [52112]#, 52113, 52114, 52115, 52116] # terrible FWHM
#tileid = 66014 # low transparency
#date = 20200314
#expids = [55432]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read in GAMA + Legacy catalo g
cata = Cat.GamaLegacy()
g12 = cata.Read('g12', dr_gama=3, dr_legacy=7)
g12_ra = g12['legacy-photo']['ra']
g12_dec = g12['legacy-photo']['dec']
Ng12 = len(g12_ra)
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
template_match = bgs3._GamaLegacy(g12)
hasmatch = (template_match != -999)
# ra/dec cut for GAMA so we only keep ones near the tile
cut_gama = ((g12_ra > 174.0) & (g12_ra < 186.0) & (g12_dec > -3.0) & (g12_dec < 2.0) & hasmatch)
g12_ra = g12_ra[cut_gama]
g12_dec = g12_dec[cut_gama]
g12_z = g12['gama-spec']['z'][cut_gama]
g12_rfib = UT.flux2mag(g12['legacy-photo']['fiberflux_r'])[cut_gama]
g12_rmag_gama = g12['gama-photo']['r_model'][cut_gama] # r-band magnitude from GAMA (SDSS) photometry
print('%i galaxies in GAMA G12 + Legacy' % len(g12_ra))
# match coadd objects to G12+legacy catalog based on RA and Dec
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction for exposure from GFA
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
# spectrographs available for the exposure
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
match_gama, coadd_fluxes = [], []
for ispec in ispecs:
# select BGS galaxies from the coadds
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
# select ones that are in GAMA by matching RA and Dec
match = spherematch(g12_ra, g12_dec,
coadd['TARGET_RA'][gal_cut], coadd['TARGET_DEC'][gal_cut],
0.000277778)
m_gama = match[0]
m_coadd = match[1]
match_gama.append(m_gama)
coadd_fluxes.append(coadd_flux[gal_cut,:][m_coadd])
match_gama = np.concatenate(match_gama)
coadd_fluxes = np.concatenate(coadd_fluxes, axis=0)
print(' %i matches to G12' % len(match_gama))
# generate spectra for the following overlapping galaxies
gama_samp = np.arange(Ng12)[cut_gama][match_gama]
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
emline_flux = s_bgs.EmissionLineFlux(g12, index=gama_samp, dr_gama=3, silent=True) # emission lines from GAMA
s_flux, s_wave, magnorm_flag = s_bgs.Spectra(
g12_rfib[match_gama],
g12_z[match_gama],
np.repeat(100.0, len(match_gama)),
seed=1,
templateid=template_match[gama_samp],
emflux=emline_flux,
mag_em=g12_rmag_gama[match_gama]
)
igals = np.random.choice(np.arange(len(match_gama))[magnorm_flag], size=5, replace=False)
fig = plt.figure(figsize=(15,20))
for i, igal in enumerate(igals):
sub = fig.add_subplot(5,1,i+1)
#sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101), c='k',
# ls=':', lw=0.5, label='smoothed (coadd flux)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * transp * 0.775 ,
c='C0', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * transp * 0.775 , c='C0',
label='(coadd flux) x (TRANSP) x (0.775)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * fibloss,
c='C1', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * fibloss, c='C1',
label='(coadd flux) x (TRANSP) x (FIBER FRACFLUX)')
sub.plot(s_wave, s_flux[igal,:] * transp, c='k', ls='--',
label='(sim source flux) x (TRANSP)')
sub.set_xlim(3600, 9800)
if i < 4: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('inciddent flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
if expid == 55432:
sub.set_ylim(-0.5, 3.)
else:
sub.set_ylim(-0.5, 10.)
#sub.set_ylim(1e-1, None)
#sub.set_yscale('log')
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_source_flux.exp%i.png' % expid), bbox_inches='tight')
plt.close()
return None
def validate_spectral_pipeline_source():
''' compare the color-matched and fiber flux scaled source spectra from the
spectral simulation to the fiber loss corrected cframes CMX data. This is
because the GAMA comparison was a bust.
'''
import glob
from scipy.signal import medfilt
from scipy.interpolate import interp1d
from scipy.spatial import cKDTree as KDTree
from desitarget.cmx import cmx_targetmask
from pydl.pydlutils.spheregroup import spherematch
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read VI redshifts, which will be used for constructing the source spectra
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
mbgs = FM.BGStree()
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction for exposure from GFA
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
# spectrographs available for the exposure
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
coadd_fluxes, s_fluxes = [], []
for ispec in ispecs:
# read coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
rmag = UT.flux2mag(coadd['FLUX_R'], method='log')[gal_cut]
gmag = UT.flux2mag(coadd['FLUX_G'], method='log')[gal_cut]
rfib = UT.flux2mag(coadd['FIBERFLUX_R'], method='log')[gal_cut]
_, m_vi, m_coadd = np.intersect1d(vi_id, targetid, return_indices=True)
print(' %i matches to VI' % len(m_vi))
# match to templates
temp_rmag = mbgs.meta['SDSS_UGRIZ'].data[:,2]
temp_gmag = mbgs.meta['SDSS_UGRIZ'].data[:,1]
temp_meta = np.vstack([
mbgs.meta['Z'].data,
temp_rmag,
temp_gmag - temp_rmag]).T
tree = KDTree(temp_meta)
# match CMX galaxies to templates
_, match_temp = tree.query(np.vstack([
ztrue[m_vi], rmag[m_coadd], (gmag - rmag)[m_coadd]]).T)
# in some cases there won't be a match from KDTree.query
# we flag these with -999
has_match = ~(match_temp >= len(mbgs.meta['TEMPLATEID']))
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
s_flux, s_wave, magnorm_flag = s_bgs.Spectra(
rfib[m_coadd][has_match],
ztrue[m_vi][has_match],
np.repeat(100.0, np.sum(has_match)),
seed=1,
templateid=match_temp[has_match],
emflux=None,
mag_em=None)
coadd_fluxes.append(coadd_flux[gal_cut][m_coadd][has_match])
s_fluxes.append(s_flux)
coadd_fluxes = np.concatenate(coadd_fluxes, axis=0)
s_fluxes = np.concatenate(s_fluxes, axis=0)
igals = np.random.choice(np.arange(s_fluxes.shape[0]), size=5, replace=False)
fig = plt.figure(figsize=(15,20))
for i, igal in enumerate(igals):
sub = fig.add_subplot(5,1,i+1)
sub.plot(coadd_wave, coadd_fluxes[igal,:] * transp * 0.775, c='C0', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * transp * 0.775 , c='C0',
label='(coadd flux) x (TRANSP) x (0.775)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * fibloss, c='C1', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * fibloss, c='C1',
label='(coadd flux) x (TRANSP) x (FIBER FRACFLUX)')
sub.plot(s_wave, s_fluxes[igal,:] * transp, c='k', ls='--',
label='(sim source flux) x (TRANSP)')
sub.set_xlim(3600, 9800)
if i < 4: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('inciddent flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-0.5, 6)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_source.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def validate_cmx_zsuccess_specsim_discrepancy(dchi2=40.):
''' This ended up being a useless test because the smoothed CMX spectra
that I was using as the source spectra has no features to fit the redshfits!
currently we know that the spectral simulation pipeline does not fuly
reproduce the noise level of CMX spectra even when we use the smoothed out
fiber loss corrected CMX spectra as input. This script is to check whether
this discrepancy significantly impacts the redshift success rates.
So we'll be comparing
- z-success rate of observe CMX exposure with VI truth table
- z-success rate of simulated CMX exposure (smoothed fib.loss corrected
source spectra + CMX sky)
VI is currently available for tile 66033 and night 20200315.
'''
import glob
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print(' airmass = %.2f' % airmass)
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
# exposure time
_frame = desispec.io.read_frame(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-b%i-%s.fits' % (ispecs[0], str(expid).zfill(8))))
exptime = _frame.meta['EXPTIME']
print(' exp.time = %.fs' % exptime)
for ispec in ispecs:
print(' petal %i' % ispec)
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
igals = np.arange(len(gal_cut))[gal_cut]
print(' %i BGS galaxies' % np.sum(gal_cut))
if os.path.isfile(fexp): continue
# get sky surface brightness for petal
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# get which are good fibers from coadd file
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
# source flux is the smoothed CMX spetra
source_flux = np.zeros((len(igals), len(wave)))
for i in range(len(igals)):
source_flux[i,:] = np.clip(np.interp(wave, coadd_wave,
medfilt(coadd_flux[igals[i],:], 101)), 0, None)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave,
source_flux * fibloss, # scale by fiber acceptance fraction
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=fexp)
frr = run_redrock(fexp, overwrite=False)
for ispec in ispecs:
print(' petal %i' % ispec)
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
coadd_ivar = fitsio.read(f_coadd, ext=4)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
sim = desispec.io.read_spectra(fexp)
# randomly check 3 galaxies
igals = np.random.choice(np.arange(np.sum(gal_cut)), size=3, replace=False)
fig = plt.figure(figsize=(15,15))
for i, igal in enumerate(igals):
sub = fig.add_subplot(3,1,i+1)
sub.plot(coadd_wave, coadd_flux[gal_cut,:][igal,:], c='C0', label='coadd')
for band in ['b', 'r', 'z']:
sub.plot(sim.wave[band], sim.flux[band][igal,:] / fibloss, c='C1',
label='sim / fib.loss')
sub.set_xlim(3600, 9800)
if i < 2: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-1., None)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess_flux.exp%i.petal%i.png' %
(expid, ispec)), bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(15,15))
for i, igal in enumerate(igals):
sub = fig.add_subplot(3,1,i+1)
sub.plot(coadd_wave, coadd_ivar[gal_cut,:][igal,:], c='C0', label='coadd')
for band in ['b', 'r', 'z']:
sub.plot(sim.wave[band], sim.ivar[band][igal,:] *
fibloss**2, c='C1', label='sim x (fib.loss$)^2$')
sub.set_xlim(3600, 9800)
if i < 2: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('ivar', fontsize=25)
sub.set_ylim(0., None)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess_ivar.exp%i.petal%i.png' %
(expid, ispec)), bbox_inches='tight')
plt.close()
# read in single exposure coadd and redrock output
for i, ispec in enumerate(ispecs):
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
# read coadd redrock fits
rr_coadd = fitsio.read(f_coadd.replace('coadd-', 'zbest-'))
rr_coadd_z = rr_coadd['Z'][gal_cut]
rr_coadd_zwarn = rr_coadd['ZWARN'][gal_cut]
rr_coadd_dchi2 = rr_coadd['DELTACHI2'][gal_cut]
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
frr_sim = run_redrock(fexp, overwrite=False)
rr_sim = fitsio.read(frr_sim)
rr_sim_z = rr_sim['Z']
rr_sim_zwarn = rr_sim['ZWARN']
rr_sim_dchi2 = rr_sim['DELTACHI2']
# match VI to exposure based on target ids
_, m_vi, m_sim = np.intersect1d(vi_id, targetid, return_indices=True)
print('%i matches to VI' % len(m_vi))
print(' ', ztrue[m_vi][:5])
print(' ', rr_coadd_z[m_sim][:5])
print(' ', rr_sim_z[m_sim][:5])
if i == 0:
rmags = []
ztrues = []
rr_coadd_zs = []
rr_coadd_zwarns = []
rr_coadd_dchi2s = []
rr_sim_zs = []
rr_sim_zwarns = []
rr_sim_dchi2s = []
rmags.append(UT.flux2mag(coadd['FLUX_R'][gal_cut][m_sim], method='log'))
ztrues.append(ztrue[m_vi])
rr_coadd_zs.append(rr_coadd_z[m_sim])
rr_coadd_zwarns.append(rr_coadd_zwarn[m_sim])
rr_coadd_dchi2s.append(rr_coadd_dchi2[m_sim])
rr_sim_zs.append(rr_sim_z[m_sim])
rr_sim_zwarns.append(rr_sim_zwarn[m_sim])
rr_sim_dchi2s.append(rr_sim_dchi2[m_sim])
rmags = np.concatenate(rmags)
ztrues = np.concatenate(ztrues)
rr_coadd_zs = np.concatenate(rr_coadd_zs)
rr_coadd_zwarns = np.concatenate(rr_coadd_zwarns)
rr_coadd_dchi2s = np.concatenate(rr_coadd_dchi2s)
rr_sim_zs = np.concatenate(rr_sim_zs)
rr_sim_zwarns = np.concatenate(rr_sim_zwarns)
rr_sim_dchi2s = np.concatenate(rr_sim_dchi2s)
zs_coadd = UT.zsuccess(rr_coadd_zs, ztrues, rr_coadd_zwarns,
deltachi2=rr_coadd_dchi2s, min_deltachi2=dchi2)
zs_sim = UT.zsuccess(rr_sim_zs, ztrues, rr_sim_zwarns,
deltachi2=rr_sim_dchi2s, min_deltachi2=dchi2)
print('coadd z-success %.2f' % (np.sum(zs_coadd)/float(len(zs_coadd))))
print('sim z-success %.2f' % (np.sum(zs_sim)/float(len(zs_sim))))
# compare the two redshift success rates
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_coadd, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label='coadd')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_sim, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label='specsim')
sub.text(21., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim(16, 20.5)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def validate_cmx_zsuccess(dchi2=40.):
''' currently we know that the spectral simulation pipeline does not fuly
reproduce the noise level of CMX spectra even when we use the smoothed out
fiber loss corrected CMX spectra as input. This script is to check whether
this discrepancy significantly impacts the redshift success rates.
So we'll be comparing
- z-success rate of observe CMX exposure with VI truth table
- z-success rate of spectral simulations run with CMX sky and transparency
VI is currently available for tile 66033 and night 20200315.
'''
import glob
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read VI table
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
# read GAMA-Legacy source fluxes
wave_s, flux_s, meta_s = source_spectra()
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = transp * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print(' airmass = %.2f' % airmass)
# get petals
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
# exposure time
_frame = desispec.io.read_frame(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-b%i-%s.fits' % (ispecs[0], str(expid).zfill(8))))
exptime = _frame.meta['EXPTIME']
print(' exp.time = %.fs' % exptime)
# simulated exposure
fexp = os.path.join(dir, 'spectralsim_source.cmx_sky.exp%i.fits' % expid)
if not os.path.isfile(fexp):
# get sky brightness for exposure
sky_brights = []
for ispec in ispecs:
print(' petal %i' % ispec)
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
# get sky surface brightness for petal
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# get which are good fibers from coadd file
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
sky_brights.append(sky_bright)
sky_brights = np.array(sky_brights)
# median sky brightness of the petals
sky_bright = np.median(sky_brights, axis=0)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave_s,
flux_s * transp, # scale by transparency
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=fexp)
# run redrock
frr_sim = run_redrock(fexp, overwrite=False)
rr_sim = fitsio.read(frr_sim)
rr_sim_z = rr_sim['Z']
rr_sim_zwarn = rr_sim['ZWARN']
rr_sim_dchi2 = rr_sim['DELTACHI2']
# compile single exposure coadd and redrock output
for i, ispec in enumerate(ispecs):
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
# read coadd redrock fits
rr_coadd = fitsio.read(f_coadd.replace('coadd-', 'zbest-'))
rr_coadd_z = rr_coadd['Z'][gal_cut]
rr_coadd_zwarn = rr_coadd['ZWARN'][gal_cut]
rr_coadd_dchi2 = rr_coadd['DELTACHI2'][gal_cut]
# match VI to exposure based on target ids
_, m_vi, m_coadd = np.intersect1d(vi_id, targetid, return_indices=True)
if i == 0:
rmags = []
ztrues = []
rr_coadd_zs = []
rr_coadd_zwarns = []
rr_coadd_dchi2s = []
rmags.append(UT.flux2mag(coadd['FLUX_R'][gal_cut][m_coadd], method='log'))
ztrues.append(ztrue[m_vi])
rr_coadd_zs.append(rr_coadd_z[m_coadd])
rr_coadd_zwarns.append(rr_coadd_zwarn[m_coadd])
rr_coadd_dchi2s.append(rr_coadd_dchi2[m_coadd])
print('%i matches to VI' % len(rmags))
rmags = np.concatenate(rmags)
ztrues = np.concatenate(ztrues)
rr_coadd_zs = np.concatenate(rr_coadd_zs)
rr_coadd_zwarns = np.concatenate(rr_coadd_zwarns)
rr_coadd_dchi2s = np.concatenate(rr_coadd_dchi2s)
zs_coadd = UT.zsuccess(rr_coadd_zs, ztrues, rr_coadd_zwarns,
deltachi2=rr_coadd_dchi2s, min_deltachi2=dchi2)
zs_sim = UT.zsuccess(rr_sim_z, meta_s['zred'], rr_sim_zwarn,
deltachi2=rr_sim_dchi2, min_deltachi2=dchi2)
print('coadd z-success %.2f' % (np.sum(zs_coadd)/float(len(zs_coadd))))
print('sim z-success %.2f' % (np.sum(zs_sim)/float(len(zs_sim))))
# compare the two redshift success rates
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_coadd, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label='coadd')
wmean, rate, err_rate = UT.zsuccess_rate(meta_s['r_mag'], zs_sim, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label='spectral sim')
sub.text(19.5, 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim(16, 20.5)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'valid.spectralsim_source.cmx_sky.zsuccess.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def tnom(dchi2=40., emlines=True):
''' Calculate z-success rate for nominal dark time exposure with different
tnom exposure times. For each tnom, use the z-success rate to determine
r_lim, the r magnitude that gets 95% completeness.
'''
np.random.seed(0)
# nominal exposure times
if dchi2 == 40:
texps = [100 + 20 * i for i in range(11)][::2]
elif dchi2 == 100:
texps = [200 + 10 * i for i in range(11)][::2]
# true redshift and r-magnitude
_, _, meta = source_spectra()
ztrue = meta['zred'] # true redshifts
r_mag = meta['r_mag']
r_fib = meta['r_mag_apflux']
# generate spectra for nominal dark sky exposures and run redrock
frr_noms = []
for texp in texps:
spec_nom = nomdark_spectra(texp)
# run redrock on nominal dark sky exposure spectra
if emlines:
fnom = os.path.join(dir, 'exp_spectra.nominal_dark.%.fs.fits' % texp)
else:
fnom = os.path.join(dir, 'exp_spectra.nominal_dark.noemission.%.fs.fits' % texp)
frr_nom = run_redrock(fnom, overwrite=False)
frr_noms.append(frr_nom)
rmags = np.linspace(17, 20, 31)
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1., 1.], c='k', ls=':')
# for each tnom, calculate rlim from the z-sucess rates
for i, texp, frr_nom in zip(range(len(texps)), texps, frr_noms):
# read redrock output and calculate z-success
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
# ignore redshift failtures for bright r < 18.2 galaxies, since this is
# likely an issue with the emission line
zs_nom[r_mag < 18.2] = True
# determine rlim
zs_rmag = []
for _r in rmags:
brighter = (r_mag < _r)
zs_rmag.append(np.sum(zs_nom[brighter]) / np.sum(brighter))
crit = (np.array(zs_rmag) < 0.95) & (rmags > 18)
if np.sum(crit) > 0:
rlim = np.min(rmags[crit])
else:
rlim = np.max(rmags)
print('--- tnom = %.fs ---' % texp)
print(' total z-success = %.2f' % (np.sum(zs_nom)/float(len(zs_nom))))
print(' 95percent complete rlim = %.1f' % rlim)
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, zs_nom, range=[15,22],
nbins=28, bin_min=10)
sub.plot(wmean, rate, label=r'%.fs; $r_{\rm lim}= %.1f$' % (texp, rlim))
sub.text(19., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ magnitude', fontsize=20)
sub.set_xlim([16., 20.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'zsuccess.tnom.dchi2_%i.png' % dchi2),
bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([18, 25], [1., 1.], c='k', ls=':')
# nominal exposure z-success rate as a function of fiber magnitude
for i, texp, frr_nom in zip(range(len(texps)), texps, frr_noms):
# read redrock output and calculate z-success
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
# ignore redshift failtures for bright r < 18.2 galaxies, since this is
# likely an issue with the emission line
zs_nom[r_mag < 18.2] = True
wmean, rate, err_rate = UT.zsuccess_rate(r_fib, zs_nom, range=[18,23],
nbins=28, bin_min=10)
sub.plot(wmean, rate, err_rate, label=r'%.fs' % texp)
sub.text(21., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim([18., 22.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'zsuccess.tnom.r_fib.dchi2_%i.png' % dchi2),
bbox_inches='tight')
return None
def texp_factor_wavelength(emlines=True):
''' Q: Should the exposure time correction factor be determined by sky
surface brightness ratio at 5000A or 6500A?
sky surface brightness ratio = (sky surface brightness) / (nominal dark sky)
We will answer this by constructing a set of nominal dark sky exposure
spectra with 150s exposure time, getting the redshift success rate for
these spectra. Then we'll compare the redshift success rate for
1. exposure spectra constructed with CMX sky brightness and
texp = 150s x (sky ratio at 5000A)
2. exposure spectra constructed with CMX sky brightness and
texp = 150s x (sky ratio at 6500A)
We use CMX sky brightness during bright exposures.
Whichever redshift success rate is coser to the nominal dark exposure z
success rate will determine the exposure factor
updates
-------
* David Schlegel was surprised that 6500A agreed better. He finds that
5000A agrees better. He suggested I run this test without emission lines
* 06/11/2020: Read noise term in the SNR calculation cannot be ignored when
our nominal exposure time is low. New fsky values calculated for CMX
exposures including read noise.
'''
np.random.seed(0)
import desisim.simexp
from desimodel.io import load_throughput
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(
config.atmosphere.sky, 'surface_brightness', as_dict=True)
Isky_nom = [wave, nominal_surface_brightness_dict['dark']]
# generate spectra for nominal dark sky exposure as reference
spec_nom = nomdark_spectra(150, emlines=emlines)
# run redrock on nominal dark sky exposure spectra
frr_nom = run_redrock(os.path.join(dir,
'exp_spectra.nominal_dark%s.150s.fits' % ['.noemission', ''][emlines]),
overwrite=False)
# read in CMX sky data
skies = cmx_skies()
# select CMX exposures when the sky was brighter than dark time. In
# principle we should focus on bright exposures (i.e. 2.5x nominal).
# we also remove exposures from 20200314 which has strange sky fluxes.
#bright = (((skies['sky_ratio_5000'] > 1.) | (skies['sky_ratio_7000'] > 1.))
# & (skies['date'] != 20200314))
#print('%i exposures with sky ratios > 1 and not taken during March 14' % len(expids))
bright = (((skies['fsky_5000'] > 1.5) | (skies['fsky_7000'] > 1.5))
& (skies['date'] != 20200314))
expids = np.unique(skies['expid'][bright])[:5]
print('%i exposures with fsky > 1.5 and not taken during March 14' % len(expids))
#np.random.choice(np.unique(skies['expid'][bright]), size=5, replace=False)
# generate exposure spectra for select CMX sky surface brightnesses with
# exposure times scaled by (1) sky ratio at 5000A (2) sky ratio at 6500A
for expid in expids:
print('--- expid = %i ---' % expid)
is_exp = (skies['expid'] == expid)
# get median sky surface brightnesses for exposure
Isky = bs_coadd(
[skies['wave_b'], skies['wave_r'], skies['wave_z']],
[
np.median(skies['sky_sb_b'][is_exp], axis=0),
np.median(skies['sky_sb_r'][is_exp], axis=0),
np.median(skies['sky_sb_z'][is_exp], axis=0)]
)
fig = plt.figure(figsize=(15,10))
sub = fig.add_subplot(211)
sub.plot(Isky_nom[0], Isky_nom[1], c='k', lw=0.5)
sub.plot(Isky[0], Isky[1], c='C0', lw=0.5)
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('flux', fontsize=20)
sub.set_ylim(0., 10.)
sub = fig.add_subplot(212)
for band in ['b', 'r', 'z']:
sub.plot(spec_nom.wave[band], spec_nom.flux[band][0,:], c='k', lw=1)
# get median sky ratios for the exposure
for i, _w in enumerate([5000, 7000]):
_fexp = np.median(skies['fsky_%i' % _w ][is_exp])
print(' fexp at %iA = %.2f' % (_w, _fexp))
print(' sky ratio = %.2f' % (np.median(skies['sky_ratio_%i' % _w][is_exp])))
# generate exposure spectra for expid CMX sky
_fspec = os.path.join(dir, 'exp_spectra.exp%i%s.fexp_%i.fits' %
(expid, ['.noemission', ''][emlines], _w))
_spec = exp_spectra(
Isky, # sky surface brightness
150. * _fexp, # exposure time
1.1, # same airmass
_fspec,
emlines=emlines
)
# run redrock on the exposure spectra
frr = run_redrock(_fspec, qos='debug')
# plot comparing the exp spectra to the nominal dark spectra
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b':
lbl = ('at %iA' % _w)
sub.plot(_spec.wave[band], _spec.flux[band][0,:], c='C%i' % i,
lw=1, label=lbl)
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('flux', fontsize=20)
sub.set_ylim(0., 10.)
sub.legend(loc='upper right', fontsize=20, ncol=3)
fig.savefig(_fspec.replace('.fexp_%i.fits' % _w, '.png'), bbox_inches='tight')
plt.close()
_, _, meta = source_spectra(emlines=emlines)
ztrue = meta['zred'] # true redshifts
r_mag = meta['r_mag']
dchi2 = 40. # minimum delta chi2
# read redrock outputs and compare which exposure factor does better
# at reproducing the nomimal dark exposure redshift success rate.
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
print('nominal z-success = %.2f' % (np.sum(zs_nom)/float(len(zs_nom))))
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1., 1.], c='k', ls=':')
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, zs_nom, range=[15,22],
nbins=28, bin_min=10)
_plt_nom = sub.errorbar(wmean, rate, err_rate, fmt='.k', elinewidth=2, markersize=10)
zs_5000, zs_7000 = [], []
for expid in expids:
print('--- expid = %i ---' % expid)
zss = []
for i, _w in enumerate([5000, 7000]):
rr = fitsio.read(os.path.join(dir,
'zbest.exp_spectra.exp%i%s.fexp_%i.fits' %
(expid, ['.noemission', ''][emlines], _w)))
_zs = UT.zsuccess(rr['Z'], ztrue, rr['ZWARN'],
deltachi2=rr['DELTACHI2'], min_deltachi2=dchi2)
zss.append(_zs)
print(' fexp at %i z-success = %.2f' % (_w, np.sum(_zs)/float(len(_zs))))
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, _zs, range=[15,22],
nbins=28, bin_min=10)
_plt, = sub.plot(wmean, rate, c='C%i' % i)
if expid == expids[0]:
if i == 0: _plts = [_plt_nom]
_plts.append(_plt)
zs_5000.append(zss[0])
zs_7000.append(zss[1])
zs_5000 = np.concatenate(zs_5000)
zs_7000 = np.concatenate(zs_7000)
print('-----------------------')
print('nominal z-success = %.2f' % (np.sum(zs_nom)/float(len(zs_nom))))
print('fexp at 5000A z-success = %.2f ' % (np.sum(zs_5000)/float(len(zs_5000))))
print('fexp at 7000A z-success = %.2f ' % (np.sum(zs_7000)/float(len(zs_7000))))
sub.text(19., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(_plts,
['nominal dark 150s',
r'CMX exp. $f_{\rm sky}[5000A]$',
r'CMX exp. $f_{\rm sky}[7000A]$'],
loc='lower left', handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ magnitude', fontsize=20)
sub.set_xlim([16., 20.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'zsuccess.exp_spectra%s.fsky.png' % ['.noemission', ''][emlines]),
bbox_inches='tight')
return None
def reproduce_vi_zsuccess():
''' **validating the spectral sims pipeline** I will test the spectral
simulation pipeline by trying to reproduce the redshift success rate of VI
Round 1 Tile 66003. I will construct spectral sims using Tile 66003
exposure sky brightness and exposure times then run redrock on them.
'''
np.random.seed(0)
Isky_nom = _Isky_nominal_dark()
# read in CMX sky data
skies = cmx_skies()
# select VI exposures of Tile 66003
vi_tile = (skies['tileid'] == 66003)
uniq_exps = np.unique(skies['expid'][vi_tile])
print('%i exposures with TILEID=66003' % len(uniq_exps))
print(uniq_exps)
for expid in uniq_exps:
print('\n--- expid = %i ---' % expid)
is_exp = (skies['expid'] == expid)
# get median sky surface brightnesses for exposure
Isky = bs_coadd(
[skies['wave_b'], skies['wave_r'], skies['wave_z']],
[
np.median(skies['sky_sb_b'][is_exp], axis=0),
np.median(skies['sky_sb_r'][is_exp], axis=0),
np.median(skies['sky_sb_z'][is_exp], axis=0)]
)
# median airmass
airmass_exp = np.median(skies['airmass'][is_exp])
print(' median airmass = %.2f' % airmass_exp)
# exposure time of exposure
texp = skies['exptime'][is_exp][0]
print(' exposure time = %.1f' % texp)
assert np.all(skies['exptime'][is_exp] == texp)
# generate exposure spectra for expid CMX sky
_fspec = os.path.join(dir, 'exp_spectra.exp%i%s.texp%.f.fits' %
(expid, '.noemission', texp))
_spec = exp_spectra(
Isky, # sky surface brightness
texp, # exposure time
airmass_exp, # same airmass
_fspec,
emlines=False
)
# run redrock on the exposure spectra
frr = run_redrock(_fspec, qos='debug')
_, _, meta = source_spectra(emlines=False)
ztrue = meta['zred'] # true redshifts
r_mag = meta['r_mag']
dchi2 = 40. # minimum delta chi2
# read redrock outputs for each exposure
zs_exps = []
for expid in expids:
print('--- expid = %i ---' % expid)
rr = fitsio.read(os.path.join(dir, 'zbest.exp_spectra.exp%i%s.texp%.f.fits' %
(expid, '.noemission', texp)))
_zs = UT.zsuccess(rr['Z'], ztrue, rr['ZWARN'],
deltachi2=rr['DELTACHI2'], min_deltachi2=dchi2)
zs_exps.append(_zs)
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
fig = plt.figure(figsize=(18,6))
for i, expid, zs in zip(range(len(expids)), expids, zs_exps):
sub = fig.add_subplot(1, 3, i+1)
sub.plot([16, 21], [1., 1.], c='k', ls=':')
# load VI z success rate for single exposure
wmean, rate, err_rate = np.loadtxt(os.path.join(dir_coadd,
'vi_zsuccess.dchi2_40.coadd-66003-20200315-%i.txt' % expid))
sub.errorbar(wmean, rate, err_rate, fmt='.k', label='VI $z$ success')
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, zs, range=[15,22],
nbins=28, bin_min=10)
sub.plot(wmean, rate, c='C%i' % i, label='Spectral Sim.')
if i == 2: sub.legend(loc='lower left', handletextpad=0.1, fontsize=15)
if i == 1: sub.set_xlabel(r'Legacy $r$ magnitude', fontsize=20)
sub.set_xlim([16., 20.5])
if i == 0:
sub.text(19., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'reproduce_vi_zsuccess.png'), bbox_inches='tight')
return None
def _Isky_nominal_dark():
''' surface brightness of nominal dark sky
'''
import desisim.simexp
from desimodel.io import load_throughput
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(
config.atmosphere.sky, 'surface_brightness', as_dict=True)
return [wave, nominal_surface_brightness_dict['dark']]
def _SNR_test():
''' Q: Why is scaling the exposure time by the sky brightness ratio scaling
not producing spectra with roughly the same SNR?
The SNR of the spectra is approximately
SNR = S x sqrt(texp/sky)
This means that if the sky is twice as bright but you increase texp by 2,
you would get the same SNR. This, however, does not seem to be the case for
the SNR for the `exp_spectra` output.
In this script I will generate spectra with uniform sky brightness
'''
np.random.seed(0)
import desisim.simexp
from desimodel.io import load_throughput
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
# get throughput for the cameras
import specsim.instrument
from specsim.simulator import Simulator
config = desisim.simexp._specsim_config_for_wave(wave.value, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
fig = plt.figure(figsize=(20,15))
sub0 = fig.add_subplot(321)
sub1 = fig.add_subplot(323)
sub2 = fig.add_subplot(325)
sub3 = fig.add_subplot(322)
sub4 = fig.add_subplot(324)
sub5 = fig.add_subplot(326)
for ii, i in enumerate([0, 5, 10]):
# read in source spectra
print('sky = %i' % (i+1))
wave_s, flux_s, _ = source_spectra(emlines=False)
#'''
_fspec = os.path.join(dir, 'exp_spectra.snr_test.sky%i.fits' % (i+1))
Isky = [wave, np.ones(len(wave)) * (i + 1.)]
_spec = exp_spectra(
Isky, # sky surface brightness
150. * (i + 1.), # exposure time
1.1, # same airmass
_fspec,
emlines=False
)
# plot comparing the exp spectra to the nominal dark spectra
for band in ['b', 'r', 'z']:
lbl = None
if band == 'b': lbl = ('sky = %i, texp = %.f' % ((i+1), 150.*(i+1.)))
sub0.plot(_spec.wave[band], _spec.flux[band][0,:], c='C%i' % ii, lw=1, label=lbl)
sub1.plot(_spec.wave[band], _spec.flux[band][1,:], c='C%i' % ii, lw=1, label=lbl)
sub2.plot(_spec.wave[band], _spec.flux[band][2,:], c='C%i' % ii, lw=1, label=lbl)
sub3.plot(_spec.wave[band], _spec.ivar[band][0,:], c='C%i' % ii, lw=1, label=lbl)
sub4.plot(_spec.wave[band], _spec.ivar[band][1,:], c='C%i' % ii, lw=1, label=lbl)
sub5.plot(_spec.wave[band], _spec.ivar[band][2,:], c='C%i' % ii, lw=1, label=lbl)
sub0.plot(wave_s, flux_s[0,:], c='k', lw=1, ls='--')
sub1.plot(wave_s, flux_s[1,:], c='k', lw=1, ls='--')
sub2.plot(wave_s, flux_s[2,:], c='k', lw=1, ls='--')
'''
# barebone specsim pipeline for comparison
desi = Simulator(config, num_fibers=flux_s.shape[0])
desi.observation.exposure_time = 150. * (i + 1.) * u.s
desi.atmosphere._surface_brightness_dict[desi.atmosphere.condition] = \
np.ones(len(desi.atmosphere._wavelength)) * (i + 1.) * \
desi.atmosphere.surface_brightness.unit
desi.atmosphere._extinct_emission = False
desi.atmosphere._moon = None
desi.atmosphere.airmass = 1.1
source_flux = np.array([np.clip(np.interp(wave, wave_s, _flux_s), 0, None) for _flux_s in flux_s])
desi.simulate(source_fluxes=source_flux * 1e-17 * desi.simulated['source_flux'].unit)
random_state = np.random.RandomState(0)
desi.generate_random_noise(random_state, use_poisson=True)
scale=1e17
waves, fluxes, ivars, ivars_electron = [], [], [], []
lbl = ('sky=%i' % (i+1))
for table in desi.camera_output:
print(' source', table['num_source_electrons'][0][:5])
print(' sky', table['num_sky_electrons'][0][:5])
print(' dark', table['num_dark_electrons'][0][:5])
print(' RN', table['read_noise_electrons'][0][:5]**2)
_wave = table['wavelength'].astype(float)
_flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
_flux = _flux * scale
_ivar = table['flux_inverse_variance'].T.astype(float)
_ivar = _ivar / scale**2
sub0.plot(_wave, _flux[0], c='C%i' % ii, lw=1, label=lbl)
sub1.plot(_wave, _flux[1], c='C%i' % ii, lw=1, label=lbl)
sub2.plot(_wave, _flux[2], c='C%i' % ii, lw=1, label=lbl)
sub3.plot(_wave, _ivar[0], c='C%i' % ii, lw=1, label=lbl)
sub4.plot(_wave, _ivar[1], c='C%i' % ii, lw=1, label=lbl)
sub5.plot(_wave, _ivar[2], c='C%i' % ii, lw=1, label=lbl)
lbl = None
'''
sub2.set_xlabel('wavelength', fontsize=20)
sub0.set_xlim(3.6e3, 9.8e3)
sub1.set_xlim(3.6e3, 9.8e3)
sub2.set_xlim(3.6e3, 9.8e3)
sub3.set_xlim(3.6e3, 9.8e3)
sub4.set_xlim(3.6e3, 9.8e3)
sub5.set_xlim(3.6e3, 9.8e3)
sub1.set_ylabel('flux', fontsize=20)
sub4.set_ylabel('ivar', fontsize=20)
sub0.set_ylim(0., 10.)
sub1.set_ylim(0., 10.)
sub2.set_ylim(0., 10.)
sub0.legend(loc='upper right', fontsize=15)
fig.savefig(os.path.join(dir, 'snr_test.png'), bbox_inches='tight')
plt.close()
return None
def cmx_skies():
''' read in CMX sky data. The sky surface brightnesses are generated
from the flat fielded sky data that's throughput corrected.
'''
fskies = h5py.File('/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/sky_fibers.cmx.v1.hdf5', 'r')
skies = {}
for k in fskies.keys():
skies[k] = fskies[k][...]
return skies
def source_spectra(emlines=True):
''' read GAMA-matched fiber-magnitude scaled BGS source spectra
These source spectra are created for GAMA objects. their spectra is
constructed from continuum that's template matched to the broadband
colors and emission lines from GAMA data (properly flux calibrated).
Then the spectra is scaled down to the r-band fiber magnitude. They
therefore do not require fiber acceptance fractions.
'''
fsource = os.path.join(dir,
'GALeg.g15.sourceSpec%s.1000.seed0.hdf5' % ['.noemission', ''][emlines])
if not os.path.isfile(fsource):
seed = 0
np.random.seed(seed)
# read in GAMA-Legacy catalog with galaxies in both GAMA and Legacy surveys
cata = Cat.GamaLegacy()
gleg = cata.Read('g15', dr_gama=3, dr_legacy=7, silent=True)
# extract meta-data of galaxies
redshift = gleg['gama-spec']['z']
absmag_ugriz = cata.AbsMag(gleg, kcorr=0.1, H0=70, Om0=0.3, galext=False) # ABSMAG k-correct to z=0.1
r_mag_apflux = UT.flux2mag(gleg['legacy-photo']['apflux_r'][:,1]) # aperture flux
r_mag_gama = gleg['gama-photo']['r_petro'] # r-band magnitude from GAMA (SDSS) photometry
ha_gama = gleg['gama-spec']['ha_flux'] # halpha line flux
ngal = len(redshift) # number of galaxies
vdisp = np.repeat(100.0, ngal) # velocity dispersions [km/s]
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
match = bgs3._GamaLegacy(gleg)
hasmatch = (match != -999)
criterion = hasmatch
# randomly pick a few more than 5000 galaxies from the catalog that have
# matching templates because some of the galaxies will have issues where the
# emission line is brighter than the photometric magnitude.
subsamp = np.random.choice(np.arange(ngal)[criterion], int(1.1 * 1000), replace=False)
# generate noiseless spectra for these galaxies
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
# emission line fluxes from GAMA data
if emlines:
emline_flux = s_bgs.EmissionLineFlux(gleg, index=subsamp, dr_gama=3, silent=True) # emission lines from GAMA
mag_em = r_mag_gama[subsamp]
else:
emline_flux = None
mag_em = None
flux, wave, magnorm_flag = s_bgs.Spectra(
r_mag_apflux[subsamp],
redshift[subsamp],
vdisp[subsamp],
seed=1,
templateid=match[subsamp],
emflux=emline_flux,
mag_em=mag_em,
silent=True)
# only keep 1000 galaxies
isubsamp = np.random.choice(np.arange(len(subsamp))[magnorm_flag], 1000, replace=False)
subsamp = subsamp[isubsamp]
# save to file
fsub = h5py.File(fsource, 'w')
fsub.create_dataset('zred', data=redshift[subsamp])
fsub.create_dataset('absmag_ugriz', data=absmag_ugriz[:,subsamp])
fsub.create_dataset('r_mag_apflux', data=r_mag_apflux[subsamp])
fsub.create_dataset('r_mag_gama', data=r_mag_gama[subsamp])
for grp in gleg.keys():
group = fsub.create_group(grp)
for key in gleg[grp].keys():
group.create_dataset(key, data=gleg[grp][key][subsamp])
fsub.create_dataset('flux', data=flux[isubsamp, :])
fsub.create_dataset('wave', data=wave)
fsub.close()
# read in source spectra
source = h5py.File(fsource, 'r')
wave_s = source['wave'][...]
flux_s = source['flux'][...]
meta = {}
for k in ['r_mag_apflux', 'r_mag_gama', 'zred', 'absmag_ugriz']:
meta[k] = source[k][...]
meta['r_mag'] = UT.flux2mag(source['legacy-photo']['flux_r'][...], method='log')
source.close()
return wave_s, flux_s, meta
def nomdark_spectra(texp, emlines=True):
''' spectra observed during nominal dark sky for 150s. This will
serve as the reference spectra for a number of tests.
'''
if emlines:
fexp = os.path.join(dir, 'exp_spectra.nominal_dark.%.fs.fits' % texp)
else:
fexp = os.path.join(dir, 'exp_spectra.nominal_dark.noemission.%.fs.fits' % texp)
if os.path.isfile(fexp):
bgs = desispec.io.read_spectra(fexp)
else:
import desisim.simexp
from desimodel.io import load_throughput
# read nominal dark sky surface brightness
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(config.atmosphere.sky, 'surface_brightness', as_dict=True)
Isky = [wave, nominal_surface_brightness_dict['dark']]
# read in source spectra
wave_s, flux_s, _ = source_spectra(emlines=emlines)
# simulate the exposures and save to file
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave_s,
flux_s,
exptime=texp,
airmass=1.1,
Isky=Isky,
filename=fexp)
return bgs
def exp_spectra(Isky, exptime, airmass, fexp, emlines=True, overwrite=False):
''' spectra observed at the specified
- sky surface brightness
- exposure time
- airmass
'''
if os.path.isfile(fexp):
bgs = desispec.io.read_spectra(fexp)
else:
import desisim.simexp
from desimodel.io import load_throughput
# read in source spectra
wave_s, flux_s, _ = source_spectra(emlines=emlines)
# simulate the exposures and save to file
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave_s,
flux_s,
exptime=exptime,
airmass=airmass,
Isky=Isky,
filename=fexp)
return bgs
def run_redrock(fspec, qos='regular', overwrite=False):
''' run redrock on given spectra file
'''
frr = os.path.join(os.path.dirname(fspec),
'redrock.%s' % os.path.basename(fspec).replace('.fits', '.h5'))
fzb = os.path.join(os.path.dirname(fspec),
'zbest.%s' % os.path.basename(fspec))
if not os.path.isfile(fzb) or overwrite:
print('running redrock on %s' % os.path.basename(fspec))
script = '\n'.join([
"#!/bin/bash",
"#SBATCH -N 1",
"#SBATCH -C haswell",
"#SBATCH -q %s" % qos,
'#SBATCH -J rr_%s' % os.path.basename(fspec).replace('.fits', ''),
'#SBATCH -o _rr_%s.o' % os.path.basename(fspec).replace('.fits', ''),
"#SBATCH -t 00:10:00",
"",
"export OMP_NUM_THREADS=1",
"export OMP_PLACES=threads",
"export OMP_PROC_BIND=spread",
"",
"",
"conda activate desi",
"",
"srun -n 32 -c 2 --cpu-bind=cores rrdesi_mpi -o %s -z %s %s" % (frr, fzb, fspec),
""])
# create the script.sh file, execute it and remove it
f = open('script.slurm','w')
f.write(script)
f.close()
os.system('sbatch script.slurm')
os.system('rm script.slurm')
return fzb
def bs_coadd(waves, sbrights):
''' bullshit hack to combine wavelengths and surface brightnesses of the 3
cameras...
'''
from scipy.interpolate import interp1d
from desimodel.io import load_throughput
# read nominal dark sky surface brightness
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
outwave = np.arange(round(wavemin, 1), wavemax, 0.8)
sbrights_interp = []
for wave, sbright in zip(waves, sbrights):
fintrp = interp1d(wave, sbright, fill_value=0., bounds_error=False)
sbrights_interp.append(fintrp(outwave))
outsbright = np.amax(sbrights_interp, axis=0)
return outwave, outsbright
if __name__=="__main__":
#_SNR_test()
#texp_factor_wavelength()
#texp_factor_wavelength(emlines=False) # without emission lines
#tnom(dchi2=40)
#tnom(dchi2=100)
#validate_spectral_pipeline()
#validate_spectral_pipeline_source()
#validate_spectral_pipeline_GAMA_source()
#validate_cmx_zsuccess_specsim_discrepancy()
#validate_cmx_zsuccess(dchi2=40.)
reproduce_vi_zsuccess()
|
py | 1a3b2b89d41088cb60bcfc1d887d16774c41e85f | from typing import Dict
import numpy as np
import torch
import torch.optim as optim
from allennlp.data.dataset_readers.stanford_sentiment_tree_bank import (
StanfordSentimentTreeBankDatasetReader,
)
from allennlp.data.iterators import BucketIterator
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.seq2vec_encoders import (
Seq2VecEncoder,
PytorchSeq2VecWrapper,
)
from allennlp.modules.text_field_embedders import (
TextFieldEmbedder,
BasicTextFieldEmbedder,
)
from allennlp.modules.token_embedders import Embedding
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import FBetaMeasure
from allennlp.training.trainer import Trainer
import torch
import torch.nn.functional as F
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.modules.input_variational_dropout import InputVariationalDropout
# EMBEDDING_DIM = 128
# HIDDEN_DIM = 128
@Model.register("lstm_classifier")
class LstmClassifier(Model):
def __init__(
self,
word_embeddings: TextFieldEmbedder,
encoder: Seq2VecEncoder,
vocab: Vocabulary,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: RegularizerApplicator = RegularizerApplicator(),
var_dropout: float = 0.35,
) -> None:
super().__init__(vocab)
# We need the embeddings to convert word IDs to their vector representations
self.word_embeddings = word_embeddings
self.encoder = encoder
self._variational_dropout = InputVariationalDropout(var_dropout)
# After converting a sequence of vectors to a single vector, we feed it into
# a fully-connected linear layer to reduce the dimension to the total number of labels.
self.linear = torch.nn.Linear(
in_features=encoder.get_output_dim(),
out_features=vocab.get_vocab_size("labels"),
)
self._accuracy = CategoricalAccuracy()
self._f1_measure = FBetaMeasure(average="macro")
self.loss_function = torch.nn.CrossEntropyLoss()
# Instances are fed to forward after batching.
# Fields are passed through arguments with the same name.
def forward(
self, tokens: Dict[str, torch.Tensor], label: torch.Tensor = None
) -> torch.Tensor:
mask = get_text_field_mask(tokens)
# Forward pass
embeddings = self.word_embeddings(tokens)
embeddings = self._variational_dropout(embeddings)
encoder_out = self.encoder(embeddings, mask)
logits = self.linear(encoder_out)
probs = F.softmax(logits, dim=-1)
output = {"logits": logits, "probs": probs}
if label is not None:
self._accuracy(logits, label)
self._f1_measure(logits, label)
output["loss"] = self.loss_function(logits, label)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
all_metrics.update(
{"accuracy": self._accuracy.get_metric(reset=reset)}
)
all_metrics.update(
{"f1": self._f1_measure.get_metric(reset=reset)["fscore"]}
)
return all_metrics
|
py | 1a3b2bddef1c7ae74ad276f87d78a825b96945af | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2021 CERN.
# Copyright (C) 2019 Northwestern University.
#
# Invenio-Cli is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module to ease the creation and management of applications."""
import os
from pathlib import Path
import click
from ..commands import Commands, ContainersCommands, InstallCommands, \
LocalCommands, RequirementsCommands, UpgradeCommands
from ..errors import InvenioCLIConfigError
from ..helpers.cli_config import CLIConfig
from ..helpers.cookiecutter_wrapper import CookiecutterWrapper
from .assets import assets
from .containers import containers
from .install import install
from .packages import packages
from .services import services
from .utils import calculate_instance_path, pass_cli_config, run_steps
@click.group()
@click.version_option()
@click.pass_context
def invenio_cli(ctx):
"""Initialize CLI context."""
invenio_cli.add_command(assets)
invenio_cli.add_command(containers)
invenio_cli.add_command(install)
invenio_cli.add_command(packages)
invenio_cli.add_command(services)
@invenio_cli.command('check-requirements')
@click.option('--development', '-d', default=False, is_flag=True,
help='Check development requirements.')
def check_requirements(development):
"""Checks the system fulfills the pre-requirements."""
click.secho("Checking pre-requirements...", fg="green")
steps = RequirementsCommands.check(development)
on_fail = "Pre requisites not met."
on_success = "All requisites are fulfilled."
run_steps(steps, on_fail, on_success)
@invenio_cli.command()
def shell():
"""Shell command."""
Commands.shell()
@invenio_cli.command()
@click.option('--debug/--no-debug', '-d/', default=False, is_flag=True,
help='Enable Flask development mode (default: disabled).')
def pyshell(debug):
"""Python shell command."""
Commands.pyshell(debug=debug)
@invenio_cli.command()
@click.argument('flavour', type=click.Choice(['RDM'], case_sensitive=False),
default='RDM', required=False)
@click.option('-t', '--template', required=False,
help='Cookiecutter path or git url to template')
@click.option('-c', '--checkout', required=False,
help='Branch, tag or commit to checkout if --template is a git url') # noqa
def init(flavour, template, checkout):
"""Initializes the application according to the chosen flavour."""
click.secho('Initializing {flavour} application...'.format(
flavour=flavour), fg='green')
template_checkout = (template, checkout)
cookiecutter_wrapper = CookiecutterWrapper(flavour, template_checkout)
try:
click.secho("Running cookiecutter...", fg='green')
project_dir = cookiecutter_wrapper.cookiecutter()
click.secho("Writing invenio-invenio_cli config file...", fg='green')
saved_replay = cookiecutter_wrapper.get_replay()
instance_path = calculate_instance_path(project_dir)
CLIConfig.write(project_dir, flavour, saved_replay, instance_path)
click.secho("Creating logs directory...", fg='green')
os.mkdir(Path(project_dir) / "logs")
except Exception as e:
click.secho(str(e), fg='red')
finally:
cookiecutter_wrapper.remove_config()
@invenio_cli.command()
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--debug/--no-debug', '-d/', default=True, is_flag=True,
help='Enable/disable debug mode including auto-reloading '
'(default: enabled).')
@click.option('--services/--no-services', '-s/-n', default=True, is_flag=True,
help='Enable/disable dockerized services (default: enabled).')
@pass_cli_config
def run(cli_config, host, port, debug, services):
"""Starts the local development server.
NOTE: this only makes sense locally so no --local option
"""
commands = LocalCommands(cli_config)
commands.run(host=host, port=str(port), debug=debug, services=services)
@invenio_cli.command()
@pass_cli_config
def destroy(cli_config):
"""Removes all associated resources (containers, images, volumes)."""
commands = Commands(cli_config)
services = ContainersCommands(cli_config)
click.secho(
"Destroying containers, volumes, virtual environment...", fg="green")
steps = commands.destroy() # Destroy virtual environment
steps.extend(services.destroy()) # Destroy services
on_fail = "Failed to destroy instance. You can destroy only services " + \
"using the services command: invenio-cli services destroy"
on_success = "Instance destroyed."
run_steps(steps, on_fail, on_success)
@invenio_cli.command()
@click.option('--script', required=True,
help='The path of custom migration script.'
)
def upgrade(script):
"""Upgrades the current instance to a newer version."""
steps = UpgradeCommands.upgrade(script)
on_fail = "Upgrade failed."
on_success = "Upgrade sucessfull."
run_steps(steps, on_fail, on_success)
|
py | 1a3b2dc5cee6bff62d83484bd2d8afee4980af71 | n = int(input('Digite um numero inteiro:'))
tot = 0
for c in range(1, n + 1):
if n % c == 0:
tot += 1
print('\033[33m', end=' ')
else:
print('\033[31m', end=' ')
print('{}'.format(c), end=' ')
print('\n\033[mO número {} foi divisivel {} vezes'.format(n, tot))
if tot == 2:
print('O número {} é PRIMO'.format(n))
else:
print('O número {} NÃO é primo'.format(n)) |
py | 1a3b3113526adfcef2bd96b80da8863ed308c807 | try:
# Python 3.8+
from importlib.metadata import version, PackageNotFoundError
except ModuleNotFoundError:
# Python < 3.8
from importlib_metadata import version, PackageNotFoundError # type: ignore
from ._theme_class import WebvizConfigTheme
from ._webviz_settings_class import WebvizSettings
from ._localhost_token import LocalhostToken
from ._is_reload_process import is_reload_process
from ._plugin_abc import WebvizPluginABC, EncodedFile, ZipFileMember
from ._shared_settings_subscriptions import SHARED_SETTINGS_SUBSCRIPTIONS
from .webviz_instance_info import WEBVIZ_INSTANCE_INFO
from ._oauth2 import Oauth2
try:
__version__ = version("webviz-config")
except PackageNotFoundError:
# package is not installed
pass
|
py | 1a3b327dc39b8b5e3ec1ae453e06d012989f5b0f | import math
import random
from external.agent import AIAgent as ExtrAIAgent
class Game():
# gap is the gap in pixels between the south Pipe and North Pipe.
def __init__(self,cvsHeight=512,cvsWidth=800,pipeHeight=242,pipeWidth=52, fgHeight=118, birdHeight=38,birdWidth=26):
self.cvsHeight=cvsHeight
self.cvsWidth=cvsWidth
self.pipeHeight=pipeHeight
self.pipeWidth=pipeWidth
self.fgHeight=fgHeight
self.birdHeight=birdHeight
self.birdWidth=birdWidth
self.gap = 85
self.bXStart=40
self.bYStart=150
# the constant is the south Pipe position, and it is calculating by adding the gap to the north Pipe.
self.constant=self.pipeHeight+self.gap
# the bird X and Y positions.
self.bX = self.bXStart
self.bY = self.bYStart
self.velY=0
# the bird falls by 1.5 pixels at a time.
self.gravity = -9.8
self.forceY=0
self.deltaForceY=300
# we initiate the players score
self.score = 0
# reward
self.reward=0
#action
self.action=0
self.actionToTake=0
self.prevAction=0
self.manual=False
self.error=False
self.fps=0
self.stateHistory=[]
self.pipe = []
self.pipe.append({'x' : self.cvsWidth,'y' : 0})
self.gameover=False
self.mAgent=ManualAgent()
self.aiAgent=AIAgent()
def moveUp(self):
self.forceY=self.deltaForceY
def reset(self):
self.pipe = []
self.pipe.append({'x' : self.cvsWidth,'y' : 0})
self.velY=0
self.bY = self.bYStart
self.score = 0
self.forceY=0
self.action=0
def getNearestPipe(self):
minDist=self.cvsWidth*20
nearestPole=-1
for i in range(len(self.pipe)):
point=self.pipe[i]
dstFromBird=(point['x']+self.pipeWidth)-self.bX
if dstFromBird>0 and dstFromBird<minDist:
minDist=dstFromBird
nearestPole=i
return nearestPole
def updateGameLogic(self):
for i in range(len(self.pipe)):
point=self.pipe[i]
self.pipe[i]['x']-=1
if self.pipe[i]['x']==self.cvsWidth-188:
self.pipe.append({
'x' : self.cvsWidth,
'y' : math.floor(random.random()*self.pipeHeight)-self.pipeHeight
})
if (self.bX+self.birdWidth>=point['x'] and self.bX<=point['x']
and (self.bY<=point['y']+self.pipeHeight or self.bY+self.birdHeight>=point['y']+self.pipeHeight+self.gap)
or self.bY+self.birdHeight>=self.cvsHeight-self.fgHeight
or self.bY<=0):
self.reward=-10
self.gameover=True
break
if(point['x']==10):
self.score+=1
self.reward=10
for i in range(len(self.pipe)):
if(self.pipe[i]['x']<=-188):
del self.pipe[i]
break
def takeAction(self):
nearestPole=self.getNearestPipe()
state={
'bX':self.bX/self.cvsWidth,
'bY':self.bY/self.cvsHeight,
'pX1':self.pipe[nearestPole]['x']/self.cvsWidth,
'pX2':self.pipe[nearestPole]['x']/self.cvsWidth,
'pY1':(self.pipe[nearestPole]['y']+self.pipeHeight)/self.cvsHeight,
'pY2':(self.pipe[nearestPole]['y']+self.pipeHeight+self.gap)/self.cvsHeight,
'velY':self.velY,
'action':self.action
}
if(self.manual):
self.action=self.mAgent.getAction(state,self.reward)
else:
self.action=self.aiAgent.getAction(state,self.reward)
self.velY=self.velY+(self.gravity+self.deltaForceY*self.action)*(0.015)
self.bY-= self.velY
self.reward=0
self.action=0
def getGameState(self):
state={
'bX':self.bX,
'bY':self.bY,
'pipe':self.pipe,
'gap':self.gap,
'action':self.action,
'score':self.score,
'gameover':self.gameover,
'reward':self.reward
}
return state
class Agent():
def __init__(self):
self.action=0
def getAction(self,state,reward):
return self.action
def setNextAction(self, action):
self.action=action
class ManualAgent(Agent):
def __init__(self):
super().__init__()
def getAction(self,state,reward):
action=self.action
self.action=0
return action
class AIAgent(Agent):
def __init__(self):
super().__init__()
self.externalAgent=ExtrAIAgent()
def getAction(self,state,reward):
self.action=self.externalAgent.takeAction(state,reward)
return self.action
|
py | 1a3b34d9885ea0635e49f688e05848f66b2741ed | import random
import time
from subprocess import Popen
# adjust tempo here
tempo = 90
lookup = {
'C': 'c',
'D': 'd',
'E': 'e',
'F': 'f',
'G': 'g',
'A': 'hey',
'B': 'b',
'Bb': 'b flat',
'Eb': 'e flat',
'Ab': 'hey flat',
'Db': 'd flat',
'Gb': 'g flat',
'C#': 'c sharp',
'F#': 'f sharp',
}
pop_sound = '/System/Library/Sounds/Pop.aiff'
tink_sound = '/System/Library/Sounds/Tink.aiff'
notes = list(lookup.keys())
quarter_note_duration_seconds = 60.0 / tempo
measure_quarter_notes = 4
min_num_measures = 4
max_num_measures = 4
# Process
p = None
def speak(text: str) -> None:
p = Popen(['say', text])
def metronome_sound(path: str) -> None:
Popen(['afplay', path])
def rand_note() -> str:
return notes[random.randint(0, len(notes) - 1)]
def main():
keys = [rand_note()]
i = 1
while i < 101:
# print new key
n = rand_note()
# skip consecutive identical keys
if keys[i - 1] != n:
keys.append(n)
i += 1
for idx, k in enumerate(keys[:100]):
measures = random.randint(min_num_measures, max_num_measures)
# print new key
print(f'<{k}> ({measures})')
speak(lookup[k])
for m in range(1, measures + 1):
if m == measures - 1:
next_key = keys[idx + 1]
next_key_spoken = lookup[next_key]
speak(f'next key is {next_key_spoken}')
print(f'The next key is {next_key}....')
print(f'Measure {m}')
for q in range(0, measure_quarter_notes):
metronome_sound(pop_sound if q > 0 else tink_sound)
print(f'* {q + 1}')
time.sleep(quarter_note_duration_seconds)
if p:
p.terminate()
if __name__ == '__main__':
main()
|
py | 1a3b3581e969fc977386cec47bd5ee34c97698e5 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import sys
import warnings
from typing import Any, Iterable, Optional, Union, overload, TYPE_CHECKING
import numpy
from pyspark import RDD, SparkContext, since
from pyspark.streaming.dstream import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import (
LabeledPoint,
LinearModel,
_regression_train_wrapper,
StreamingLinearAlgorithm,
)
from pyspark.mllib.util import Saveable, Loader, inherit_doc
from pyspark.mllib.linalg import Vector
from pyspark.mllib.regression import LabeledPoint
if TYPE_CHECKING:
from pyspark.mllib._typing import VectorLike
__all__ = [
"LogisticRegressionModel",
"LogisticRegressionWithSGD",
"LogisticRegressionWithLBFGS",
"SVMModel",
"SVMWithSGD",
"NaiveBayesModel",
"NaiveBayes",
"StreamingLogisticRegressionWithSGD",
]
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights: Vector, intercept: float) -> None:
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold: Optional[float] = None
@since("1.4.0")
def setThreshold(self, value: float) -> None:
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property # type: ignore[misc]
@since("1.4.0")
def threshold(self) -> Optional[float]:
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since("1.4.0")
def clearThreshold(self) -> None:
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@overload
def predict(self, test: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, test: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, test: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 1.4.0
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
.. versionadded:: 0.9.0
Parameters
----------
weights : :py:class:`pyspark.mllib.linalg.Vector`
Weights computed for every feature.
intercept : float
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not be a single value, so the intercepts will be part of the
weights.)
numFeatures : int
The dimension of the features.
numClasses : int
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(numpy.array([0.0, 1.0]))
1
>>> lrm.predict(numpy.array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(numpy.array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except BaseException:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
"""
def __init__(
self, weights: Vector, intercept: float, numFeatures: int, numClasses: int
) -> None:
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size // ( # type: ignore[attr-defined]
self._numClasses - 1
)
self._weightsMatrix = self._coeff.toArray().reshape(
self._numClasses - 1, self._dataWithBiasSize
)
@property # type: ignore[misc]
@since("1.4.0")
def numFeatures(self) -> int:
"""
Dimension of the features.
"""
return self._numFeatures
@property # type: ignore[misc]
@since("1.4.0")
def numClasses(self) -> int:
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@overload
def predict(self, x: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, x: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, x: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 0.9.0
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept # type: ignore[attr-defined]
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
assert self._weightsMatrix is not None
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize: # type: ignore[attr-defined]
for i in range(0, self._numClasses - 1):
margin = (
x.dot(self._weightsMatrix[i][0 : x.size]) # type: ignore[attr-defined]
+ self._weightsMatrix[i][x.size] # type: ignore[attr-defined]
)
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i]) # type: ignore[attr-defined]
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
@since("1.4.0")
def save(self, sc: SparkContext, path: str) -> None:
"""
Save this model to the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses
)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc: SparkContext, path: str) -> "LogisticRegressionModel":
"""
Load a model from the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel.load(
sc._jsc.sc(), path
)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
numFeatures = java_model.numFeatures()
numClasses = java_model.numClasses()
threshold = java_model.getThreshold().get()
model = LogisticRegressionModel(weights, intercept, numFeatures, numClasses)
model.setThreshold(threshold)
return model
def __repr__(self) -> str:
return (
"pyspark.mllib.LogisticRegressionModel: intercept = {}, "
"numFeatures = {}, numClasses = {}, threshold = {}"
).format(self._intercept, self._numFeatures, self._numClasses, self._threshold)
class LogisticRegressionWithSGD:
"""
Train a classification model for Binary Logistic Regression using Stochastic Gradient Descent.
.. versionadded:: 0.9.0
.. deprecated:: 2.0.0
Use ml.classification.LogisticRegression or LogisticRegressionWithLBFGS.
"""
@classmethod
def train(
cls,
data: RDD[LabeledPoint],
iterations: int = 100,
step: float = 1.0,
miniBatchFraction: float = 1.0,
initialWeights: Optional["VectorLike"] = None,
regParam: float = 0.01,
regType: str = "l2",
intercept: bool = False,
validateData: bool = True,
convergenceTol: float = 0.001,
) -> LogisticRegressionModel:
"""
Train a logistic regression model on the given data.
.. versionadded:: 0.9.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
iterations : int, optional
The number of iterations.
(default: 100)
step : float, optional
The step parameter used in SGD.
(default: 1.0)
miniBatchFraction : float, optional
Fraction of data to be used for each SGD iteration.
(default: 1.0)
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
The initial weights.
(default: None)
regParam : float, optional
The regularizer parameter.
(default: 0.01)
regType : str, optional
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
intercept : bool, optional
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
validateData : bool, optional
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
convergenceTol : float, optional
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.classification.LogisticRegression or "
"LogisticRegressionWithLBFGS.",
FutureWarning,
)
def train(rdd: RDD[LabeledPoint], i: Vector) -> Iterable[Any]:
return callMLlibFunc(
"trainLogisticRegressionModelWithSGD",
rdd,
int(iterations),
float(step),
float(miniBatchFraction),
i,
float(regParam),
regType,
bool(intercept),
bool(validateData),
float(convergenceTol),
)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class LogisticRegressionWithLBFGS:
"""
Train a classification model for Multinomial/Binary Logistic Regression
using Limited-memory BFGS.
Standard feature scaling and L2 regularization are used by default.
.. versionadded:: 1.2.0
"""
@classmethod
def train(
cls,
data: RDD[LabeledPoint],
iterations: int = 100,
initialWeights: Optional["VectorLike"] = None,
regParam: float = 0.0,
regType: str = "l2",
intercept: bool = False,
corrections: int = 10,
tolerance: float = 1e-6,
validateData: bool = True,
numClasses: int = 2,
) -> LogisticRegressionModel:
"""
Train a logistic regression model on the given data.
.. versionadded:: 1.2.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
iterations : int, optional
The number of iterations.
(default: 100)
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
The initial weights.
(default: None)
regParam : float, optional
The regularizer parameter.
(default: 0.01)
regType : str, optional
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
intercept : bool, optional
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
corrections : int, optional
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
tolerance : float, optional
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
validateData : bool, optional
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
numClasses : int, optional
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
Examples
--------
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
"""
def train(rdd: RDD[LabeledPoint], i: Vector) -> Iterable[Any]:
return callMLlibFunc(
"trainLogisticRegressionModelWithLBFGS",
rdd,
int(iterations),
i,
float(regParam),
regType,
bool(intercept),
int(corrections),
float(tolerance),
bool(validateData),
int(numClasses),
)
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearClassificationModel):
"""
Model for Support Vector Machines (SVMs).
.. versionadded:: 0.9.0
Parameters
----------
weights : :py:class:`pyspark.mllib.linalg.Vector`
Weights computed for every feature.
intercept : float
Intercept computed for this model.
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data), iterations=10)
>>> svm.predict([1.0])
1
>>> svm.predict(sc.parallelize([[1.0]])).collect()
[1]
>>> svm.clearThreshold()
>>> svm.predict(numpy.array([1.0]))
1.44...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> svm.predict(SparseVector(2, {1: 1.0}))
1
>>> svm.predict(SparseVector(2, {0: -1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> svm.save(sc, path)
>>> sameModel = SVMModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {1: 1.0}))
1
>>> sameModel.predict(SparseVector(2, {0: -1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except BaseException:
... pass
"""
def __init__(self, weights: Vector, intercept: float) -> None:
super(SVMModel, self).__init__(weights, intercept)
self._threshold = 0.0
@overload
def predict(self, x: "VectorLike") -> Union[int, float]:
...
@overload
def predict(self, x: RDD["VectorLike"]) -> RDD[Union[int, float]]:
...
def predict(
self, x: Union["VectorLike", RDD["VectorLike"]]
) -> Union[RDD[Union[int, float]], Union[int, float]]:
"""
Predict values for a single data point or an RDD of points
using the model trained.
.. versionadded:: 0.9.0
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept # type: ignore[attr-defined]
if self._threshold is None:
return margin
else:
return 1 if margin > self._threshold else 0
@since("1.4.0")
def save(self, sc: SparkContext, path: str) -> None:
"""
Save this model to the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel(
_py2java(sc, self._coeff), self.intercept
)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc: SparkContext, path: str) -> "SVMModel":
"""
Load a model from the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load(sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
threshold = java_model.getThreshold().get()
model = SVMModel(weights, intercept)
model.setThreshold(threshold)
return model
class SVMWithSGD:
"""
Train a Support Vector Machine (SVM) using Stochastic Gradient Descent.
.. versionadded:: 0.9.0
"""
@classmethod
def train(
cls,
data: RDD[LabeledPoint],
iterations: int = 100,
step: float = 1.0,
regParam: float = 0.01,
miniBatchFraction: float = 1.0,
initialWeights: Optional["VectorLike"] = None,
regType: str = "l2",
intercept: bool = False,
validateData: bool = True,
convergenceTol: float = 0.001,
) -> SVMModel:
"""
Train a support vector machine on the given data.
.. versionadded:: 0.9.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
iterations : int, optional
The number of iterations.
(default: 100)
step : float, optional
The step parameter used in SGD.
(default: 1.0)
regParam : float, optional
The regularizer parameter.
(default: 0.01)
miniBatchFraction : float, optional
Fraction of data to be used for each SGD iteration.
(default: 1.0)
initialWeights : :py:class:`pyspark.mllib.linalg.Vector` or convertible, optional
The initial weights.
(default: None)
regType : str, optional
The type of regularizer used for training our model.
Allowed values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
intercept : bool, optional
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
validateData : bool, optional
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
convergenceTol : float, optional
A condition which decides iteration termination.
(default: 0.001)
"""
def train(rdd: RDD[LabeledPoint], i: Vector) -> Iterable[Any]:
return callMLlibFunc(
"trainSVMModelWithSGD",
rdd,
int(iterations),
float(step),
float(regParam),
float(miniBatchFraction),
i,
regType,
bool(intercept),
bool(validateData),
float(convergenceTol),
)
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
@inherit_doc
class NaiveBayesModel(Saveable, Loader["NaiveBayesModel"]):
"""
Model for Naive Bayes classifiers.
.. versionadded:: 0.9.0
Parameters
----------
labels : :py:class:`numpy.ndarray`
List of labels.
pi : :py:class:`numpy.ndarray`
Log of class priors, whose dimension is C, number of labels.
theta : :py:class:`numpy.ndarray`
Log of class conditional probabilities, whose dimension is C-by-D,
where D is number of features.
Examples
--------
>>> from pyspark.mllib.linalg import SparseVector
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(numpy.array([0.0, 1.0]))
0.0
>>> model.predict(numpy.array([1.0, 0.0]))
1.0
>>> model.predict(sc.parallelize([[1.0, 0.0]])).collect()
[1.0]
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = NaiveBayesModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {0: 1.0})) == model.predict(SparseVector(2, {0: 1.0}))
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
"""
def __init__(self, labels: numpy.ndarray, pi: numpy.ndarray, theta: numpy.ndarray) -> None:
self.labels = labels
self.pi = pi
self.theta = theta
@overload
def predict(self, x: "VectorLike") -> numpy.float64:
...
@overload
def predict(self, x: RDD["VectorLike"]) -> RDD[numpy.float64]:
...
@since("0.9.0")
def predict(
self, x: Union["VectorLike", RDD["VectorLike"]]
) -> Union[numpy.float64, RDD[numpy.float64]]:
"""
Return the most likely class for a data vector
or an RDD of vectors
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
return self.labels[
numpy.argmax(self.pi + x.dot(self.theta.transpose())) # type: ignore[attr-defined]
]
def save(self, sc: SparkContext, path: str) -> None:
"""
Save this model to the given path.
"""
assert sc._jvm is not None
java_labels = _py2java(sc, self.labels.tolist())
java_pi = _py2java(sc, self.pi.tolist())
java_theta = _py2java(sc, self.theta.tolist())
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel(
java_labels, java_pi, java_theta
)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc: SparkContext, path: str) -> "NaiveBayesModel":
"""
Load a model from the given path.
"""
assert sc._jvm is not None
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel.load(
sc._jsc.sc(), path
)
# Can not unpickle array.array from Pickle in Python3 with "bytes"
py_labels = _java2py(sc, java_model.labels(), "latin1")
py_pi = _java2py(sc, java_model.pi(), "latin1")
py_theta = _java2py(sc, java_model.theta(), "latin1")
return NaiveBayesModel(py_labels, py_pi, numpy.array(py_theta))
class NaiveBayes:
"""
Train a Multinomial Naive Bayes model.
.. versionadded:: 0.9.0
"""
@classmethod
def train(cls, data: RDD[LabeledPoint], lambda_: float = 1.0) -> NaiveBayesModel:
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the `Multinomial NB <http://tinyurl.com/lsdw6p>`_ which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as `Bernoulli NB <http://tinyurl.com/p7c96j6>`_.
The input feature values must be nonnegative.
.. versionadded:: 0.9.0
Parameters
----------
data : :py:class:`pyspark.RDD`
The training data, an RDD of :py:class:`pyspark.mllib.regression.LabeledPoint`.
lambda\\_ : float, optional
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
@inherit_doc
class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a logistic regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model based on
each new batch of incoming data from a DStream.
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight
vector must be provided.
.. versionadded:: 1.5.0
Parameters
----------
stepSize : float, optional
Step size for each iteration of gradient descent.
(default: 0.1)
numIterations : int, optional
Number of iterations run for each batch of data.
(default: 50)
miniBatchFraction : float, optional
Fraction of each batch of data to use for updates.
(default: 1.0)
regParam : float, optional
L2 Regularization parameter.
(default: 0.0)
convergenceTol : float, optional
Value used to determine when to terminate iterations.
(default: 0.001)
"""
def __init__(
self,
stepSize: float = 0.1,
numIterations: int = 50,
miniBatchFraction: float = 1.0,
regParam: float = 0.0,
convergenceTol: float = 0.001,
) -> None:
self.stepSize = stepSize
self.numIterations = numIterations
self.regParam = regParam
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model: Optional[LogisticRegressionModel] = None
super(StreamingLogisticRegressionWithSGD, self).__init__(model=self._model)
@since("1.5.0")
def setInitialWeights(
self, initialWeights: "VectorLike"
) -> "StreamingLogisticRegressionWithSGD":
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn.
"""
initialWeights = _convert_to_vector(initialWeights)
# LogisticRegressionWithSGD does only binary classification.
self._model = LogisticRegressionModel(
initialWeights, 0, initialWeights.size, 2 # type: ignore[attr-defined]
)
return self
@since("1.5.0")
def trainOn(self, dstream: "DStream[LabeledPoint]") -> None:
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd: RDD[LabeledPoint]) -> None:
# LogisticRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LogisticRegressionWithSGD.train(
rdd,
self.numIterations,
self.stepSize,
self.miniBatchFraction,
self._model.weights, # type: ignore[union-attr]
regParam=self.regParam,
convergenceTol=self.convergenceTol,
)
dstream.foreachRDD(update)
def _test() -> None:
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.classification
globs = pyspark.mllib.classification.__dict__.copy()
spark = (
SparkSession.builder.master("local[4]").appName("mllib.classification tests").getOrCreate()
)
globs["sc"] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
py | 1a3b364752e55daa0a145074ba4b03d55bc0b21d | # Ref: https://github.com/doliom/basic-petri-net
from place import P
from transition import T
from model import Model
from arc import Arc
def main():
#places with marking {3.wait, done, free}
p1 = P("p1", 3) # Wait
p2 = P("p2", 0) # Inside
p3 = P("p3", 1) # Done
p4 = P("p4", 1) # Free
p5 = P("p5", 0) # Busy
p6 = P("p6", 0) # Docu
#transitions
t1 = T("t1") # Start
t2 = T("t2") # Change
t3 = T("t3") # End
#arcs
arc1 = Arc(name="from p1 to t1", prevP=p1, nextT=t1, n=1)
arc2 = Arc(name="from t1 to p2", nextP=p2, n=1)
arc3 = Arc(name="from p4 to t1", prevP=p4, nextT=t1, n=1)
arc4 = Arc(name="from t1 to p5", nextP=p5, n=1)
arc5 = Arc(name="from t3 to p4", nextP=p4, n=1)
arc6 = Arc(name="from p6 to t3", prevP=p6, nextT=t3, n=1)
arc7 = Arc(name="from p2 to t2", prevP=p2, nextT=t2, n=1)
arc8 = Arc(name="from t2 to p3", nextP=p3, n=1)
arc9 = Arc(name="from p5 to t2", prevP=p5, nextT=t2, n=1)
arc10 = Arc(name="from t2 to p6", nextP=p6, n=1)
t1.inArcs = [arc1, arc3]
t1.outArcs = [arc2, arc4]
t2.inArcs = [arc7, arc9]
t2.outArcs = [arc8, arc10]
t3.inArcs = [arc6]
t3.outArcs = [arc5]
places = [p1, p2, p3, p4, p5, p6]
transitions = [t1, t2, t3]
petriNet = Model(places, transitions)
petriNet.simulate(100)
# t1.inArcs = [arc1]
# t1.outArcs = [arc2, arc3]
# t2.inArcs = [arc4, arc6]
# t2.outArcs = [arc5, arc7]
# t3.inArcs = [arc8]
# t3.outArcs = [arc9]
# t4.inArcs = [arc10]
# t4.outArcs = [arc11]
def printInit(places):
print("Init state")
for p in places:
print("Position: {} -------- Markers: {}".format(p.name, p.tokens))
print("\n")
if __name__ == "__main__":
main() |
py | 1a3b3690bc3b38b41b034eadf41cb271c203d185 | import asyncio
from concurrent.futures import Executor, ProcessPoolExecutor
from functools import partial
import logging
from multiprocessing import freeze_support
from aiohttp import web
import aiohttp_cors
import black
import click
# This is used internally by tests to shut down the server prematurely
_stop_signal = asyncio.Event()
VERSION_HEADER = "X-Protocol-Version"
LINE_LENGTH_HEADER = "X-Line-Length"
PYTHON_VARIANT_HEADER = "X-Python-Variant"
SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
SKIP_NUMERIC_UNDERSCORE_NORMALIZATION_HEADER = "X-Skip-Numeric-Underscore-Normalization"
FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
BLACK_HEADERS = [
VERSION_HEADER,
LINE_LENGTH_HEADER,
PYTHON_VARIANT_HEADER,
SKIP_STRING_NORMALIZATION_HEADER,
SKIP_NUMERIC_UNDERSCORE_NORMALIZATION_HEADER,
FAST_OR_SAFE_HEADER,
]
@click.command(context_settings={"help_option_names": ["-h", "--help"]})
@click.option(
"--bind-host", type=str, help="Address to bind the server to.", default="localhost"
)
@click.option("--bind-port", type=int, help="Port to listen on", default=45484)
@click.version_option(version=black.__version__)
def main(bind_host: str, bind_port: int) -> None:
logging.basicConfig(level=logging.INFO)
app = make_app()
ver = black.__version__
black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}")
web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
def make_app() -> web.Application:
app = web.Application()
executor = ProcessPoolExecutor()
cors = aiohttp_cors.setup(app)
resource = cors.add(app.router.add_resource("/"))
cors.add(
resource.add_route("POST", partial(handle, executor=executor)),
{
"*": aiohttp_cors.ResourceOptions(
allow_headers=(*BLACK_HEADERS, "Content-Type"), expose_headers="*"
)
},
)
return app
async def handle(request: web.Request, executor: Executor) -> web.Response:
try:
if request.headers.get(VERSION_HEADER, "1") != "1":
return web.Response(
status=501, text="This server only supports protocol version 1"
)
try:
line_length = int(
request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)
)
except ValueError:
return web.Response(status=400, text="Invalid line length header value")
py36 = False
pyi = False
if PYTHON_VARIANT_HEADER in request.headers:
value = request.headers[PYTHON_VARIANT_HEADER]
if value == "pyi":
pyi = True
else:
try:
major, *rest = value.split(".")
if int(major) == 3 and len(rest) > 0:
if int(rest[0]) >= 6:
py36 = True
except ValueError:
return web.Response(
status=400, text=f"Invalid value for {PYTHON_VARIANT_HEADER}"
)
skip_string_normalization = bool(
request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)
)
skip_numeric_underscore_normalization = bool(
request.headers.get(SKIP_NUMERIC_UNDERSCORE_NORMALIZATION_HEADER, False)
)
fast = False
if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
fast = True
mode = black.FileMode.from_configuration(
py36=py36,
pyi=pyi,
skip_string_normalization=skip_string_normalization,
skip_numeric_underscore_normalization=skip_numeric_underscore_normalization,
)
req_bytes = await request.content.read()
charset = request.charset if request.charset is not None else "utf8"
req_str = req_bytes.decode(charset)
loop = asyncio.get_event_loop()
formatted_str = await loop.run_in_executor(
executor,
partial(
black.format_file_contents,
req_str,
line_length=line_length,
fast=fast,
mode=mode,
),
)
return web.Response(
content_type=request.content_type, charset=charset, text=formatted_str
)
except black.NothingChanged:
return web.Response(status=204)
except black.InvalidInput as e:
return web.Response(status=400, text=str(e))
except Exception as e:
logging.exception("Exception during handling a request")
return web.Response(status=500, text=str(e))
def patched_main() -> None:
freeze_support()
black.patch_click()
main()
if __name__ == "__main__":
patched_main()
|
py | 1a3b36e56cf2d15e7c0582464ed41178ccfe9ded | ###
# Copyright (c) 2004, Jeremiah Fincher
# Copyright (c) 2010-2021, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Math')
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Math', True)
Math = conf.registerPlugin('Math')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Math, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName.""")))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
py | 1a3b38295a1f6faf851768e9a48c266661981cc0 | from django.views import generic
from .models import prediction
from django.urls import reverse_lazy
from django.shortcuts import redirect,render
from django.contrib.auth import authenticate,login,logout
from .forms import UserForms,form_input
from django.shortcuts import render
from django.http import JsonResponse
from .models import prediction
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.response import Response
import os,sys
import joblib
import numpy as np
import pandas as pd
class prediction(generic.CreateView):
model=prediction
fields=['category','price','content_rating','size']
template_name = 'myapp/base.html'
def post(self,request):
category = request.POST['category']
price = request.POST['price']
content_rating = request.POST['content_rating']
size = request.POST.get('size')
if price == 'Paid':
price_convert = 1
elif price == 'Free':
price_convert = 0
category_dict = {'ART_AND_DESIGN': 0,'AUTO_AND_VEHICLES': 1,'BEAUTY': 2,
'BOOKS_AND_REFERENCE': 3,'BUSINESS': 4,'COMICS': 5,'COMMUNICATION': 6,'DATING': 7,'EDUCATION': 8,'ENTERTAINMENT': 9,
'EVENTS': 10,'FINANCE': 11,'FOOD_AND_DRINK': 12,'GAME_ACTION': 13,'GAME_ADVENTURE': 14,'GAME_ARCADE': 15,
'GAME_BOARD': 16,'GAME_CARD': 17,'GAME_CASINO': 18,'GAME_CASUAL': 19,'GAME_EDUCATIONAL': 20,'GAME_MUSIC': 21,
'GAME_PUZZLE': 22,'GAME_RACING': 23,'GAME_ROLE_PLAYING': 24,'GAME_SIMULATION': 25,'GAME_SPORTS': 26,
'GAME_STRATEGY': 27,'GAME_TRIVIA': 28,'GAME_WORD': 29,'HEALTH_AND_FITNESS': 30,'HOUSE_AND_HOME': 31,
'LIBRARIES_AND_DEMO': 32,'LIFESTYLE': 33,'MAPS_AND_NAVIGATION': 34,'MEDICAL': 35,'MUSIC_AND_AUDIO': 36,
'NEWS_AND_MAGAZINES': 37,'PARENTING': 38,'PERSONALIZATION': 39,'PHOTOGRAPHY': 40,'PRODUCTIVITY': 41,
'SHOPPING': 42,'SOCIAL': 43,'SPORTS': 44,'TOOLS': 45,'TRAVEL': 46,'TRAVEL_AND_LOCAL': 47,'VIDEO_PLAYERS': 48,'WEATHER': 49}
content_rating_dict = {'Adults only 18+': 0,'Everyone': 1,'Everyone 10+': 2,'Mature 17+': 3,'Teen': 4,'Unrated': 5}
my_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(my_path, "finalized.sav")
loaded_model = joblib.load(open(path, 'rb'))
cat = category_dict[category]
cont = content_rating_dict[content_rating]
X_test = pd.DataFrame({'Price':[price_convert],'category':cat,'Content_Rating':cont,'Size(Mb)':[size]})
result = loaded_model.predict(X_test)
result = str(result)
result = result.split("['")
result = result[1].split("']")
result = result[0]
return render(request, 'myapp/base.html', {'category': category,'price':price,'content_rating':content_rating,
'size':size,'result':result})
|
py | 1a3b38e8aec48049d1566dab700c05b20e84e28a | import re
import falcon
import simplejson as json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
from core import utilities
from decimal import Decimal
import excelexporters.tenantsaving
class Reporting:
@staticmethod
def __init__():
""""Initializes Reporting"""
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
####################################################################################################################
# PROCEDURES
# Step 1: valid parameters
# Step 2: query the tenant
# Step 3: query energy categories
# Step 4: query associated sensors
# Step 5: query associated points
# Step 6: query base period energy saving
# Step 7: query reporting period energy saving
# Step 8: query tariff data
# Step 9: query associated sensors and points data
# Step 10: construct the report
####################################################################################################################
@staticmethod
def on_get(req, resp):
print(req.params)
tenant_id = req.params.get('tenantid')
tenant_uuid = req.params.get('tenantuuid')
period_type = req.params.get('periodtype')
base_start_datetime_local = req.params.get('baseperiodstartdatetime')
base_end_datetime_local = req.params.get('baseperiodenddatetime')
reporting_start_datetime_local = req.params.get('reportingperiodstartdatetime')
reporting_end_datetime_local = req.params.get('reportingperiodenddatetime')
################################################################################################################
# Step 1: valid parameters
################################################################################################################
if tenant_id is None and tenant_uuid is None:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.INVALID_TENANT_ID')
if tenant_id is not None:
tenant_id = str.strip(tenant_id)
if not tenant_id.isdigit() or int(tenant_id) <= 0:
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.INVALID_TENANT_ID')
if tenant_uuid is not None:
regex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z', re.I)
match = regex.match(str.strip(tenant_uuid))
if not bool(match):
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.INVALID_TENANT_UUID')
if period_type is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
else:
period_type = str.strip(period_type)
if period_type not in ['hourly', 'daily', 'weekly', 'monthly', 'yearly']:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
base_start_datetime_utc = None
if base_start_datetime_local is not None and len(str.strip(base_start_datetime_local)) > 0:
base_start_datetime_local = str.strip(base_start_datetime_local)
try:
base_start_datetime_utc = datetime.strptime(base_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_START_DATETIME")
base_end_datetime_utc = None
if base_end_datetime_local is not None and len(str.strip(base_end_datetime_local)) > 0:
base_end_datetime_local = str.strip(base_end_datetime_local)
try:
base_end_datetime_utc = datetime.strptime(base_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_END_DATETIME")
if base_start_datetime_utc is not None and base_end_datetime_utc is not None and \
base_start_datetime_utc >= base_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BASE_PERIOD_END_DATETIME')
if reporting_start_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
else:
reporting_start_datetime_local = str.strip(reporting_start_datetime_local)
try:
reporting_start_datetime_utc = datetime.strptime(reporting_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
if reporting_end_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
else:
reporting_end_datetime_local = str.strip(reporting_end_datetime_local)
try:
reporting_end_datetime_utc = datetime.strptime(reporting_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
if reporting_start_datetime_utc >= reporting_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_REPORTING_PERIOD_END_DATETIME')
################################################################################################################
# Step 2: query the tenant
################################################################################################################
cnx_system = mysql.connector.connect(**config.myems_system_db)
cursor_system = cnx_system.cursor()
cnx_energy = mysql.connector.connect(**config.myems_energy_db)
cursor_energy = cnx_energy.cursor()
cnx_energy_baseline = mysql.connector.connect(**config.myems_energy_baseline_db)
cursor_energy_baseline = cnx_energy_baseline.cursor()
cnx_historical = mysql.connector.connect(**config.myems_historical_db)
cursor_historical = cnx_historical.cursor()
if tenant_id is not None:
cursor_system.execute(" SELECT id, name, area, cost_center_id "
" FROM tbl_tenants "
" WHERE id = %s ", (tenant_id,))
row_tenant = cursor_system.fetchone()
elif tenant_uuid is not None:
cursor_system.execute(" SELECT id, name, area, cost_center_id "
" FROM tbl_tenants "
" WHERE uuid = %s ", (tenant_uuid,))
row_tenant = cursor_system.fetchone()
if row_tenant is None:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cursor_energy_baseline:
cursor_energy_baseline.close()
if cnx_energy_baseline:
cnx_energy_baseline.disconnect()
if cursor_historical:
cursor_historical.close()
if cnx_historical:
cnx_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND', description='API.TENANT_NOT_FOUND')
tenant = dict()
tenant['id'] = row_tenant[0]
tenant['name'] = row_tenant[1]
tenant['area'] = row_tenant[2]
tenant['cost_center_id'] = row_tenant[3]
################################################################################################################
# Step 3: query energy categories
################################################################################################################
energy_category_set = set()
# query energy categories in base period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_tenant_input_category_hourly "
" WHERE tenant_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(tenant['id'], base_start_datetime_utc, base_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query energy categories in reporting period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_tenant_input_category_hourly "
" WHERE tenant_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(tenant['id'], reporting_start_datetime_utc, reporting_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query all energy categories in base period and reporting period
cursor_system.execute(" SELECT id, name, unit_of_measure, kgce, kgco2e "
" FROM tbl_energy_categories "
" ORDER BY id ", )
rows_energy_categories = cursor_system.fetchall()
if rows_energy_categories is None or len(rows_energy_categories) == 0:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cursor_energy_baseline:
cursor_energy_baseline.close()
if cnx_energy_baseline:
cnx_energy_baseline.disconnect()
if cursor_historical:
cursor_historical.close()
if cnx_historical:
cnx_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404,
title='API.NOT_FOUND',
description='API.ENERGY_CATEGORY_NOT_FOUND')
energy_category_dict = dict()
for row_energy_category in rows_energy_categories:
if row_energy_category[0] in energy_category_set:
energy_category_dict[row_energy_category[0]] = {"name": row_energy_category[1],
"unit_of_measure": row_energy_category[2],
"kgce": row_energy_category[3],
"kgco2e": row_energy_category[4]}
################################################################################################################
# Step 4: query associated sensors
################################################################################################################
point_list = list()
cursor_system.execute(" SELECT p.id, p.name, p.units, p.object_type "
" FROM tbl_tenants t, tbl_sensors s, tbl_tenants_sensors ts, "
" tbl_points p, tbl_sensors_points sp "
" WHERE t.id = %s AND t.id = ts.tenant_id AND ts.sensor_id = s.id "
" AND s.id = sp.sensor_id AND sp.point_id = p.id "
" ORDER BY p.id ", (tenant['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 5: query associated points
################################################################################################################
cursor_system.execute(" SELECT p.id, p.name, p.units, p.object_type "
" FROM tbl_tenants t, tbl_tenants_points tp, tbl_points p "
" WHERE t.id = %s AND t.id = tp.tenant_id AND tp.point_id = p.id "
" ORDER BY p.id ", (tenant['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 6: query base period energy saving
################################################################################################################
base = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
kgce = energy_category_dict[energy_category_id]['kgce']
kgco2e = energy_category_dict[energy_category_id]['kgco2e']
base[energy_category_id] = dict()
base[energy_category_id]['timestamps'] = list()
base[energy_category_id]['values_baseline'] = list()
base[energy_category_id]['values_actual'] = list()
base[energy_category_id]['values_saving'] = list()
base[energy_category_id]['subtotal_baseline'] = Decimal(0.0)
base[energy_category_id]['subtotal_actual'] = Decimal(0.0)
base[energy_category_id]['subtotal_saving'] = Decimal(0.0)
base[energy_category_id]['subtotal_in_kgce_baseline'] = Decimal(0.0)
base[energy_category_id]['subtotal_in_kgce_actual'] = Decimal(0.0)
base[energy_category_id]['subtotal_in_kgce_saving'] = Decimal(0.0)
base[energy_category_id]['subtotal_in_kgco2e_baseline'] = Decimal(0.0)
base[energy_category_id]['subtotal_in_kgco2e_actual'] = Decimal(0.0)
base[energy_category_id]['subtotal_in_kgco2e_saving'] = Decimal(0.0)
# query base period's energy baseline
cursor_energy_baseline.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_tenant_input_category_hourly "
" WHERE tenant_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(tenant['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_tenant_hourly = cursor_energy_baseline.fetchall()
rows_tenant_periodically = utilities.aggregate_hourly_data_by_period(rows_tenant_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_tenant_periodically in rows_tenant_periodically:
current_datetime_local = row_tenant_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'weekly':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
baseline_value = Decimal(0.0) if row_tenant_periodically[1] is None else row_tenant_periodically[1]
base[energy_category_id]['timestamps'].append(current_datetime)
base[energy_category_id]['values_baseline'].append(baseline_value)
base[energy_category_id]['subtotal_baseline'] += baseline_value
base[energy_category_id]['subtotal_in_kgce_baseline'] += baseline_value * kgce
base[energy_category_id]['subtotal_in_kgco2e_baseline'] += baseline_value * kgco2e
# query base period's energy actual
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_tenant_input_category_hourly "
" WHERE tenant_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(tenant['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_tenant_hourly = cursor_energy.fetchall()
rows_tenant_periodically = utilities.aggregate_hourly_data_by_period(rows_tenant_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_tenant_periodically in rows_tenant_periodically:
current_datetime_local = row_tenant_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'weekly':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_tenant_periodically[1] is None else row_tenant_periodically[1]
base[energy_category_id]['values_actual'].append(actual_value)
base[energy_category_id]['subtotal_actual'] += actual_value
base[energy_category_id]['subtotal_in_kgce_actual'] += actual_value * kgce
base[energy_category_id]['subtotal_in_kgco2e_actual'] += actual_value * kgco2e
# calculate base period's energy savings
for i in range(len(base[energy_category_id]['values_baseline'])):
base[energy_category_id]['values_saving'].append(
base[energy_category_id]['values_baseline'][i] -
base[energy_category_id]['values_actual'][i])
base[energy_category_id]['subtotal_saving'] = \
base[energy_category_id]['subtotal_baseline'] - \
base[energy_category_id]['subtotal_actual']
base[energy_category_id]['subtotal_in_kgce_saving'] = \
base[energy_category_id]['subtotal_in_kgce_baseline'] - \
base[energy_category_id]['subtotal_in_kgce_actual']
base[energy_category_id]['subtotal_in_kgco2e_saving'] = \
base[energy_category_id]['subtotal_in_kgco2e_baseline'] - \
base[energy_category_id]['subtotal_in_kgco2e_actual']
################################################################################################################
# Step 7: query reporting period energy saving
################################################################################################################
reporting = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
kgce = energy_category_dict[energy_category_id]['kgce']
kgco2e = energy_category_dict[energy_category_id]['kgco2e']
reporting[energy_category_id] = dict()
reporting[energy_category_id]['timestamps'] = list()
reporting[energy_category_id]['values_baseline'] = list()
reporting[energy_category_id]['values_actual'] = list()
reporting[energy_category_id]['values_saving'] = list()
reporting[energy_category_id]['subtotal_baseline'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_actual'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_saving'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_in_kgce_baseline'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_in_kgce_actual'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_in_kgce_saving'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_in_kgco2e_baseline'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_in_kgco2e_actual'] = Decimal(0.0)
reporting[energy_category_id]['subtotal_in_kgco2e_saving'] = Decimal(0.0)
# query reporting period's energy baseline
cursor_energy_baseline.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_tenant_input_category_hourly "
" WHERE tenant_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(tenant['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_tenant_hourly = cursor_energy_baseline.fetchall()
rows_tenant_periodically = utilities.aggregate_hourly_data_by_period(rows_tenant_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_tenant_periodically in rows_tenant_periodically:
current_datetime_local = row_tenant_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'weekly':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
baseline_value = Decimal(0.0) if row_tenant_periodically[1] is None else row_tenant_periodically[1]
reporting[energy_category_id]['timestamps'].append(current_datetime)
reporting[energy_category_id]['values_baseline'].append(baseline_value)
reporting[energy_category_id]['subtotal_baseline'] += baseline_value
reporting[energy_category_id]['subtotal_in_kgce_baseline'] += baseline_value * kgce
reporting[energy_category_id]['subtotal_in_kgco2e_baseline'] += baseline_value * kgco2e
# query reporting period's energy actual
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_tenant_input_category_hourly "
" WHERE tenant_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(tenant['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_tenant_hourly = cursor_energy.fetchall()
rows_tenant_periodically = utilities.aggregate_hourly_data_by_period(rows_tenant_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_tenant_periodically in rows_tenant_periodically:
current_datetime_local = row_tenant_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'weekly':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_tenant_periodically[1] is None else row_tenant_periodically[1]
reporting[energy_category_id]['values_actual'].append(actual_value)
reporting[energy_category_id]['subtotal_actual'] += actual_value
reporting[energy_category_id]['subtotal_in_kgce_actual'] += actual_value * kgce
reporting[energy_category_id]['subtotal_in_kgco2e_actual'] += actual_value * kgco2e
# calculate reporting period's energy savings
for i in range(len(reporting[energy_category_id]['values_baseline'])):
reporting[energy_category_id]['values_saving'].append(
reporting[energy_category_id]['values_baseline'][i] -
reporting[energy_category_id]['values_actual'][i])
reporting[energy_category_id]['subtotal_saving'] = \
reporting[energy_category_id]['subtotal_baseline'] - \
reporting[energy_category_id]['subtotal_actual']
reporting[energy_category_id]['subtotal_in_kgce_saving'] = \
reporting[energy_category_id]['subtotal_in_kgce_baseline'] - \
reporting[energy_category_id]['subtotal_in_kgce_actual']
reporting[energy_category_id]['subtotal_in_kgco2e_saving'] = \
reporting[energy_category_id]['subtotal_in_kgco2e_baseline'] - \
reporting[energy_category_id]['subtotal_in_kgco2e_actual']
################################################################################################################
# Step 8: query tariff data
################################################################################################################
parameters_data = dict()
parameters_data['names'] = list()
parameters_data['timestamps'] = list()
parameters_data['values'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
energy_category_tariff_dict = utilities.get_energy_category_tariffs(tenant['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
tariff_timestamp_list = list()
tariff_value_list = list()
for k, v in energy_category_tariff_dict.items():
# convert k from utc to local
k = k + timedelta(minutes=timezone_offset)
tariff_timestamp_list.append(k.isoformat()[0:19][0:19])
tariff_value_list.append(v)
parameters_data['names'].append('TARIFF-' + energy_category_dict[energy_category_id]['name'])
parameters_data['timestamps'].append(tariff_timestamp_list)
parameters_data['values'].append(tariff_value_list)
################################################################################################################
# Step 9: query associated sensors and points data
################################################################################################################
for point in point_list:
point_values = []
point_timestamps = []
if point['object_type'] == 'ANALOG_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_analog_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'ENERGY_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_energy_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'DIGITAL_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_digital_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')')
parameters_data['timestamps'].append(point_timestamps)
parameters_data['values'].append(point_values)
################################################################################################################
# Step 10: construct the report
################################################################################################################
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cursor_energy_baseline:
cursor_energy_baseline.close()
if cnx_energy_baseline:
cnx_energy_baseline.disconnect()
if cursor_historical:
cursor_historical.close()
if cnx_historical:
cnx_historical.disconnect()
result = dict()
result['tenant'] = dict()
result['tenant']['name'] = tenant['name']
result['tenant']['area'] = tenant['area']
result['base_period'] = dict()
result['base_period']['names'] = list()
result['base_period']['units'] = list()
result['base_period']['timestamps'] = list()
result['base_period']['values_saving'] = list()
result['base_period']['subtotals_saving'] = list()
result['base_period']['subtotals_in_kgce_saving'] = list()
result['base_period']['subtotals_in_kgco2e_saving'] = list()
result['base_period']['total_in_kgce_saving'] = Decimal(0.0)
result['base_period']['total_in_kgco2e_saving'] = Decimal(0.0)
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['base_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['base_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['base_period']['timestamps'].append(base[energy_category_id]['timestamps'])
result['base_period']['values_saving'].append(base[energy_category_id]['values_saving'])
result['base_period']['subtotals_saving'].append(base[energy_category_id]['subtotal_saving'])
result['base_period']['subtotals_in_kgce_saving'].append(
base[energy_category_id]['subtotal_in_kgce_saving'])
result['base_period']['subtotals_in_kgco2e_saving'].append(
base[energy_category_id]['subtotal_in_kgco2e_saving'])
result['base_period']['total_in_kgce_saving'] += base[energy_category_id]['subtotal_in_kgce_saving']
result['base_period']['total_in_kgco2e_saving'] += base[energy_category_id]['subtotal_in_kgco2e_saving']
result['reporting_period'] = dict()
result['reporting_period']['names'] = list()
result['reporting_period']['energy_category_ids'] = list()
result['reporting_period']['units'] = list()
result['reporting_period']['timestamps'] = list()
result['reporting_period']['values_saving'] = list()
result['reporting_period']['subtotals_saving'] = list()
result['reporting_period']['subtotals_in_kgce_saving'] = list()
result['reporting_period']['subtotals_in_kgco2e_saving'] = list()
result['reporting_period']['subtotals_per_unit_area_saving'] = list()
result['reporting_period']['increment_rates_saving'] = list()
result['reporting_period']['total_in_kgce_saving'] = Decimal(0.0)
result['reporting_period']['total_in_kgco2e_saving'] = Decimal(0.0)
result['reporting_period']['increment_rate_in_kgce_saving'] = Decimal(0.0)
result['reporting_period']['increment_rate_in_kgco2e_saving'] = Decimal(0.0)
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['reporting_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['reporting_period']['energy_category_ids'].append(energy_category_id)
result['reporting_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['reporting_period']['timestamps'].append(reporting[energy_category_id]['timestamps'])
result['reporting_period']['values_saving'].append(reporting[energy_category_id]['values_saving'])
result['reporting_period']['subtotals_saving'].append(reporting[energy_category_id]['subtotal_saving'])
result['reporting_period']['subtotals_in_kgce_saving'].append(
reporting[energy_category_id]['subtotal_in_kgce_saving'])
result['reporting_period']['subtotals_in_kgco2e_saving'].append(
reporting[energy_category_id]['subtotal_in_kgco2e_saving'])
result['reporting_period']['subtotals_per_unit_area_saving'].append(
reporting[energy_category_id]['subtotal_saving'] / tenant['area'] if tenant['area'] > 0.0 else None)
result['reporting_period']['increment_rates_saving'].append(
(reporting[energy_category_id]['subtotal_saving'] - base[energy_category_id]['subtotal_saving']) /
base[energy_category_id]['subtotal_saving']
if base[energy_category_id]['subtotal_saving'] > 0.0 else None)
result['reporting_period']['total_in_kgce_saving'] += \
reporting[energy_category_id]['subtotal_in_kgce_saving']
result['reporting_period']['total_in_kgco2e_saving'] += \
reporting[energy_category_id]['subtotal_in_kgco2e_saving']
result['reporting_period']['total_in_kgco2e_per_unit_area_saving'] = \
result['reporting_period']['total_in_kgce_saving'] / tenant['area'] if tenant['area'] > 0.0 else None
result['reporting_period']['increment_rate_in_kgce_saving'] = \
(result['reporting_period']['total_in_kgce_saving'] - result['base_period']['total_in_kgce_saving']) / \
result['base_period']['total_in_kgce_saving'] \
if result['base_period']['total_in_kgce_saving'] > Decimal(0.0) else None
result['reporting_period']['total_in_kgce_per_unit_area_saving'] = \
result['reporting_period']['total_in_kgco2e_saving'] / tenant['area'] if tenant['area'] > 0.0 else None
result['reporting_period']['increment_rate_in_kgco2e_saving'] = \
(result['reporting_period']['total_in_kgco2e_saving'] - result['base_period']['total_in_kgco2e_saving']) / \
result['base_period']['total_in_kgco2e_saving'] \
if result['base_period']['total_in_kgco2e_saving'] > Decimal(0.0) else None
result['parameters'] = {
"names": parameters_data['names'],
"timestamps": parameters_data['timestamps'],
"values": parameters_data['values']
}
# export result to Excel file and then encode the file to base64 string
result['excel_bytes_base64'] = excelexporters.tenantsaving.export(result,
tenant['name'],
reporting_start_datetime_local,
reporting_end_datetime_local,
period_type)
resp.text = json.dumps(result)
|
py | 1a3b3922199c4d8d24f31a08838517f0f5239f64 | from flask import Blueprint, redirect, render_template, url_for, request, flash, abort, current_app
from flask_login import login_user, login_required, logout_user, current_user
from werkzeug.utils import secure_filename
from website.webforms import LoginForm, RegisterForm, EditAccountForm, ChangePasswordForm, RequestResetPasswordForm, ResetPasswordForm
from website.users.utils import email_or_username_or_not_exist, flash_errors, send_mail
from website.models import User, UserMessage, Post, db
from website.decoretors import check_confirmed
from datetime import datetime
import uuid as uuid
import os
# Blueprint declaration
users = Blueprint('users', __name__, template_folder='users_templates', url_prefix='/user')
# -------------------- Register Route --------------------
@users.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
user_to_add = User.query.filter_by(email=form.email.data).first()
username_check = User.query.filter_by(username=form.username.data).first()
email = form.email.data
if user_to_add is None and username_check is None:
# Add User To Database
user_to_add = User(name=form.name.data, username=form.username.data, about_author=form.about_author.data, email=email, password=form.password.data)
db.session.add(user_to_add)
db.session.commit()
# Confarmation Email Message With Token Link
send_mail(user_to_add, base_link='users.confirm', salt='email-confirm', message_title="Email Confirmation", template='mail_templates/email_confirm.html')
return redirect(url_for('users.login'))
else:
return render_template('error.html', error='User Already Exist!!')
# Clear the form
form.name.data = ''
form.username.data = ''
form.email.data = ''
form.password.data = ''
form.re_password.data = ''
return render_template('register.html', form=form)
# -------------------- End of Register Route --------------------
# -------------------- Login Route --------------------
@users.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if current_user.is_authenticated: return redirect(url_for('main.index'))
if form.validate_on_submit():
user_is_exist = email_or_username_or_not_exist(form.email.data)
if user_is_exist and User.verify_password(user_is_exist, form.password.data):
login_user(user_is_exist, remember=form.remember_me.data)
return redirect(url_for('main.index'))
form.email.data = ''
form.password.data = ''
return render_template('login.html', form=form)
# -------------------- End of Login Route --------------------
# -------------------- Logout Route --------------------
@users.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
# -------------------- End of Logout Route --------------------
# -------------------- Profile Route --------------------
@users.route('/profile')
@login_required
@check_confirmed
def profile():
# Variables
amount_of_user_messages = len(UserMessage.query.filter_by(user_id=current_user.id).all())
amount_of_user_posts = len(Post.query.filter_by(poster_id=current_user.id).all())
return render_template('profile.html', amount_of_user_messages=amount_of_user_messages, amount_of_user_posts=amount_of_user_posts)
# -------------------- End of Profile Route --------------------
# -------------------- Edit_Account Route --------------------
@users.route('/edit_account/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_account(id):
form = EditAccountForm()
change_password_form = ChangePasswordForm()
user_to_update = User.query.get_or_404(id)
# if request.method == 'POST':
# Profile Details Form
if form.identifier.data == 'EditAccountForm' and form.validate_on_submit():
user_to_update.name = request.form['name']
# Check if Username is aleady exist
if user_to_update.username != request.form['username']:
if email_or_username_or_not_exist(request.form['username']) is None:
user_to_update.username = request.form['username']
else:
flash("Username is Already Taken", category='error')
# Check if Email is aleady exist
if user_to_update.email != request.form['email']:
if email_or_username_or_not_exist(request.form['email']) is None:
user_to_update.email = request.form['email']
else:
flash("Email is Already Taken", category='error')
# Update about author
user_to_update.about_author = request.form['about_author']
# Update Profile Pic
user_to_update.profile_pic = request.files['profile_pic']
# Grab image name
pic_filename = secure_filename(user_to_update.profile_pic.filename)
# Set UUID
pic_name = str(uuid.uuid1()) + "_" + pic_filename
# Save that image
user_to_update.profile_pic.save(os.path.join(current_app.config['UPLOAD_FOLDER'], pic_name))
# Change it to string to savet to db
user_to_update.profile_pic = pic_name
try:
db.session.commit()
flash("User Updated Successfully!", category='success')
except:
flash("Error While Updating User - Try Again!!", category='error')
else:
flash_errors(form)
pass
# Password Changing Form
if form.identifier.data == 'ChangePasswordForm' and change_password_form.validate_on_submit():
user_to_update.password = change_password_form.password.data
try:
db.session.commit()
flash("User Updated Successfully!", category='success')
except:
flash("Error While Updating User - Try Again!!", category='error')
else:
flash_errors(change_password_form)
form.name.data = user_to_update.name
form.username.data = user_to_update.username
form.email.data = user_to_update.email
form.about_author.data = user_to_update.about_author
return render_template('edit_account.html', form=form, change_password_form=change_password_form, id=id, abort=abort)
# -------------------- End of Edit_Account Route --------------------
# -------------------- Email_Confirm Route --------------------
@users.route('/confirm/<token>')
def confirm(token):
user = User.verify_token(token, "email-confirm")
if user is None:
flash("That is an invalid or expired token", category='error')
return redirect(url_for('main.index'))
if user.confirm:
flash("Account Already Confirmed. Please Login.", category='error')
return redirect(url_for('users.login'))
else:
try:
user.confirm = True
user.confirm_at = datetime.utcnow()
db.session.commit()
except:
return render_template('error.html', error="Whoops! Somthing Went Wrong - User Doesn't confirmed - Try Again")
else:
flash("Thank's {} Your Account is confirmed".format(user.name), category='success')
return redirect(url_for('users.login'))
# -------------------- End of Email_Confirm Route --------------------
# -------------------- Unconfirm Route --------------------
@users.route('/unconfirmed')
@login_required
def unconfirmed():
if current_user.confirm:
return redirect(url_for('main.index'))
flash('Please confirm your account!', 'warning')
return render_template('unconfirmed.html')
# -------------------- End of Unconfirm Route --------------------
# -------------------- Resend Route --------------------
@users.route('/resend')
@login_required
def resend_confirmation():
if current_user.confirm == False:
# Confarmation Email Message With Token Link
send_mail(current_user, base_link='users.confirm', salt='email-confirm', message_title="Email Confirmation", template='mail_templates/email_confirm.html')
flash('A new confirmation email has been sent.', 'success')
return redirect(url_for('users.unconfirmed'))
else:
return redirect(url_for('main.index'))
# -------------------- End of Resend Route --------------------
# -------------------- Reset_password Route --------------------
@users.route('/reset_password', methods=['GET', 'POST'])
def reset_request():
# Make sure user is logged out
if current_user.is_authenticated: return redirect(url_for('main.index'))
form = RequestResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
send_mail(user, base_link='users.reset_password', salt='reset-password', message_title="Reset Password", template='mail_templates/reset_password_confirm.html')
flash("An email has been sent with instructions to reset your password.", category='success')
return redirect(url_for('users.login'))
return render_template('request_reset_token.html', form=form)
@users.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
# Make sure user is logged out
if current_user.is_authenticated: return redirect(url_for('main.index'))
user = User.verify_token(token, "reset-password")
if user is None:
flash("That is an invalid or expired token", category='error')
return redirect(url_for('users.reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.password = form.password.data
db.session.commit()
return redirect(url_for('users.login'))
return render_template('reset_password.html', form=form)
# -------------------- End of Reset_password Route -------------------- |
py | 1a3b393acc3b7081079161d7ceaa658f9f4bdb7d | """
otter Python API
"""
import platform
from . import api
from .check import logs
from .check.notebook import Notebook
from .version import __version__
# whether Otter is running on Window
_WINDOWS = platform.system() == "Windows"
|
py | 1a3b39902ebb483032831857387cb69c359e5f6d | """Pathname and path-related operations for the Macintosh."""
import os
import warnings
from stat import *
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name) and not islink(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
components = path.split(':')
path = components[0] + ':'
for c in components[1:]:
path = join(path, c)
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
return path
supports_unicode_filenames = False
|
py | 1a3b3a34411e2661693d22cef2b7c67cd304a5f8 | import os
import shutil
import subprocess
import sys
from pathlib import Path
from unittest import mock
import pytest
from requests import exceptions as requests_exceptions
from briefcase.console import Log
from briefcase.exceptions import BriefcaseCommandError, MissingToolError, NetworkFailure
from briefcase.integrations.java import JDK
from tests.utils import FsPathMock
@pytest.fixture
def test_command(tmp_path):
command = mock.MagicMock()
command.logger = Log()
command.tools_path = tmp_path / "tools"
# Mock environ.get returning no explicit JAVA_HOME
command.os.environ.get = mock.MagicMock(return_value="")
return command
def test_macos_tool_java_home(test_command, capsys):
"""On macOS, the /usr/libexec/java_home utility is checked."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock 2 calls to check_output.
test_command.subprocess.check_output.side_effect = [
"/path/to/java",
"javac 1.8.0_144\n",
]
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
test_command.subprocess.check_output.assert_has_calls(
[
# First call is to /usr/lib/java_home
mock.call(
["/usr/libexec/java_home"],
stderr=subprocess.STDOUT,
),
# Second is a call to verify a valid Java version
mock.call(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
]
)
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_macos_tool_failure(test_command, tmp_path, capsys):
"""On macOS, if the libexec tool fails, the Briefcase JDK is used."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock a failed call on the libexec tool
test_command.subprocess.check_output.side_effect = subprocess.CalledProcessError(
returncode=1, cmd="/usr/libexec/java_home"
)
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "Contents" / "Home" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java" / "Contents" / "Home"
test_command.subprocess.check_output.assert_has_calls(
[
# First call is to /usr/lib/java_home
mock.call(
["/usr/libexec/java_home"],
stderr=subprocess.STDOUT,
),
]
)
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_macos_provided_overrides_tool_java_home(test_command, capsys):
"""On macOS, an explicit JAVA_HOME overrides /usr/libexec/java_home."""
# Mock being on macOS
test_command.host_os = "Darwin"
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac. libexec won't be invoked.
test_command.subprocess.check_output.return_value = "javac 1.8.0_144\n"
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
# A single call to check output
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_valid_provided_java_home(test_command, capsys):
"""If a valid JAVA_HOME is provided, it is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "javac 1.8.0_144\n"
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the path returned by the tool
assert jdk.java_home == Path("/path/to/java")
# A single call to check output
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_invalid_jdk_version(test_command, tmp_path, capsys):
"""If the JDK pointed to by JAVA_HOME isn't a Java 8 JDK, the briefcase JDK
is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "javac 14\n"
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_no_javac(test_command, tmp_path, capsys):
"""If the JAVA_HOME doesn't point to a location with a bin/javac, the
briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/nowhere")
# Mock return value from javac failing because executable doesn't exist
test_command.subprocess.check_output.side_effect = FileNotFoundError
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JAVA_HOME should point at the Briefcase-provided JDK
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/nowhere/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_javac_error(test_command, tmp_path, capsys):
"""If javac can't be executed, the briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac failing because executable doesn't exist
test_command.subprocess.check_output.side_effect = subprocess.CalledProcessError(
returncode=1, cmd="/path/to/java/bin/javac"
)
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
def test_unparseable_javac_version(test_command, tmp_path, capsys):
"""If the javac version can't be parsed, the briefcase JDK is used."""
# Mock environ.get returning an explicit JAVA_HOME
test_command.os.environ.get = mock.MagicMock(return_value="/path/to/java")
# Mock return value from javac.
test_command.subprocess.check_output.return_value = "NONSENSE\n"
# Create a directory to make it look like the Briefcase Java already exists.
(tmp_path / "tools" / "java" / "bin").mkdir(parents=True)
# Create a JDK wrapper by verification
jdk = JDK.verify(command=test_command)
# The JDK should have the briefcase JAVA_HOME
assert jdk.java_home == tmp_path / "tools" / "java"
# A single call was made to check javac
test_command.subprocess.check_output.assert_called_once_with(
[os.fsdecode(Path("/path/to/java/bin/javac")), "-version"],
stderr=subprocess.STDOUT,
),
# No console output (because Briefcase JDK exists)
output = capsys.readouterr()
assert output.out == ""
assert output.err == ""
@pytest.mark.parametrize(
("host_os, jdk_url, jhome"),
[
(
"Darwin",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_mac_hotspot_8u242b08.tar.gz",
"java/Contents/Home",
),
(
"Linux",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
"java",
),
(
"Windows",
"https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_windows_hotspot_8u242b08.zip",
"java",
),
],
)
def test_successful_jdk_download(
test_command, tmp_path, capsys, host_os, jdk_url, jhome
):
"""If needed, a JDK can be downloaded."""
# Mock host OS
test_command.host_os = host_os
# Mock a JAVA_HOME that won't exist
# This is only needed to make macOS *not* run /usr/libexec/java_home
test_command.os.environ.get = mock.MagicMock(return_value="/does/not/exist")
# Mock the cached download path
# Consider to remove if block when we drop py3.7 support, only keep statements from else.
# MagicMock below py3.8 doesn't has __fspath__ attribute.
if sys.version_info < (3, 8):
archive = FsPathMock("/path/to/download.zip")
else:
archive = mock.MagicMock()
archive.__fspath__.return_value = "/path/to/download.zip"
test_command.download_url.return_value = archive
# Create a directory to make it look like Java was downloaded and unpacked.
(tmp_path / "tools" / "jdk8u242-b08").mkdir(parents=True)
# Invoke the verify call
jdk = JDK.verify(command=test_command)
assert jdk.java_home == tmp_path / "tools" / jhome
# Console output contains a warning about the bad JDK location
output = capsys.readouterr()
assert output.err == ""
assert "** WARNING: JAVA_HOME does not point to a Java 8 JDK" in output.out
# Download was invoked
test_command.download_url.assert_called_with(
url=jdk_url,
download_path=tmp_path / "tools",
)
# The archive was unpacked
# TODO: Py3.6 compatibility; os.fsdecode not required in Py3.7
test_command.shutil.unpack_archive.assert_called_with(
"/path/to/download.zip", extract_dir=os.fsdecode(tmp_path / "tools")
)
# The original archive was deleted
archive.unlink.assert_called_once_with()
def test_not_installed(test_command, tmp_path):
"""If the JDK isn't installed, and install isn't requested, an error is
raised."""
# Mock host OS
test_command.host_os = "Linux"
# Invoke the verify call. Install is not requested, so this will fail.
with pytest.raises(MissingToolError):
JDK.verify(command=test_command, install=False)
# Download was not invoked
assert test_command.download_url.call_count == 0
def test_jdk_download_failure(test_command, tmp_path):
"""If an error occurs downloading the JDK, an error is raised."""
# Mock Linux as the host
test_command.host_os = "Linux"
# Mock a failure on download
test_command.download_url.side_effect = requests_exceptions.ConnectionError
# Invoking verify_jdk causes a network failure.
with pytest.raises(NetworkFailure):
JDK.verify(command=test_command)
# That download was attempted
test_command.download_url.assert_called_with(
url="https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
download_path=tmp_path / "tools",
)
# No attempt was made to unpack the archive
assert test_command.shutil.unpack_archive.call_count == 0
def test_invalid_jdk_archive(test_command, tmp_path):
"""If the JDK download isn't a valid archive, raise an error."""
# Mock Linux as the host
test_command.host_os = "Linux"
# Mock the cached download path
# Consider to remove if block when we drop py3.7 support, only keep statements from else.
# MagicMock below py3.8 doesn't has __fspath__ attribute.
if sys.version_info < (3, 8):
archive = FsPathMock("/path/to/download.zip")
else:
archive = mock.MagicMock()
archive.__fspath__.return_value = "/path/to/download.zip"
test_command.download_url.return_value = archive
# Mock an unpack failure due to an invalid archive
test_command.shutil.unpack_archive.side_effect = shutil.ReadError
with pytest.raises(BriefcaseCommandError):
JDK.verify(command=test_command)
# The download occurred
test_command.download_url.assert_called_with(
url="https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/"
"jdk8u242-b08/OpenJDK8U-jdk_x64_linux_hotspot_8u242b08.tar.gz",
download_path=tmp_path / "tools",
)
# An attempt was made to unpack the archive.
# TODO: Py3.6 compatibility; os.fsdecode not required in Py3.7
test_command.shutil.unpack_archive.assert_called_with(
"/path/to/download.zip", extract_dir=os.fsdecode(tmp_path / "tools")
)
# The original archive was not deleted
assert archive.unlink.call_count == 0
|
py | 1a3b3a556d127f5c9d38349a8180df1dc3e17ebc | from .attribute_builder import AttributeBuilder
class Muted(AttributeBuilder):
"""
Represents 'muted' attribute.
"""
def __init__(self):
super().__init__()
self.attributes = ["muted"]
|
py | 1a3b3a636ba087fec806a38c9b0d4c82506f0787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenPublicTopicCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenPublicTopicCreateResponse, self).__init__()
self._topic_id = None
@property
def topic_id(self):
return self._topic_id
@topic_id.setter
def topic_id(self, value):
self._topic_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenPublicTopicCreateResponse, self).parse_response_content(response_content)
if 'topic_id' in response:
self.topic_id = response['topic_id']
|
py | 1a3b3aadbd80740616f73f18c8314fbd36b64a21 | from collections.abc import Iterable
from itertools import repeat
from typing import Callable, TypeVar
T = TypeVar('T')
def count_horizontal_vertical_overlaps(
lines: list[tuple[tuple[int, int], tuple[int, int]]]) -> int:
vents: dict[tuple[int, int], bool] = {}
overlaps = 0
def add(start: int, end: int, mk_coord: Callable[[int], tuple[int, int]]
) -> None:
nonlocal overlaps
for i in range(min(start, end), max(start, end) + 1):
c = mk_coord(i)
if c in vents:
if not vents[c]:
overlaps += 1
vents[c] = True
else:
vents[c] = False
for (x1, y1), (x2, y2) in lines:
if x1 == x2:
add(y1, y2, lambda y: (x1, y))
elif y1 == y2:
add(x1, x2, lambda x: (x, y1))
return overlaps
def mk_range(start: int, end: int) -> Iterable[int]:
if start < end:
return range(start, end + 1)
if start > end:
return range(start, end - 1, -1)
return repeat(start)
def count_all_overlaps(
lines: list[tuple[tuple[int, int], tuple[int, int]]]) -> int:
vents: dict[tuple[int, int], bool] = {}
overlaps = 0
for (x1, y1), (x2, y2) in lines:
for c in zip(mk_range(x1, x2), mk_range(y1, y2)):
if c in vents:
if not vents[c]:
overlaps += 1
vents[c] = True
else:
vents[c] = False
return overlaps
def pair(it: Iterable[T]) -> tuple[T, T]:
a, b = it
return a, b
with open('input.txt') as f:
lines = [pair(pair(map(int, c.split(',')))
for c in l.rstrip().split(' -> ')) for l in f]
print(count_horizontal_vertical_overlaps(lines))
print(count_all_overlaps(lines))
|
py | 1a3b3ab6489e6817526ce700b05ad40f65e8404e | from typing import Dict, Optional, Tuple
import numpy as np
from stable_baselines.common import vec_env
from imitation.util import rollout
class Buffer:
"""A FIFO ring buffer for NumPy arrays of a fixed shape and dtype.
Supports random sampling with replacement.
"""
capacity: int
"""The number of data samples that can be stored in this buffer."""
sample_shapes: Dict[str, Tuple[int, ...]]
"""The shapes of each data sample stored in this buffer."""
_arrays: Dict[str, np.ndarray]
"""The underlying NumPy arrays (which actually store the data)."""
_n_data: int
"""The number of samples currently stored in this buffer.
An integer in `range(0, self.capacity + 1)`. This attribute is the return
value of `self.__len__`.
"""
_idx: int
"""The index of the first row that new data should be written to.
An integer in `range(0, self.capacity)`.
"""
def __init__(self, capacity: int,
sample_shapes: Dict[str, Tuple[int, ...]],
dtypes: Dict[str, np.dtype]):
"""Constructs a Buffer.
Args:
capacity: The number of samples that can be stored.
sample_shapes: A dictionary mapping string keys to the shape of
samples associated with that key.
dtypes (`np.dtype`-like): A dictionary mapping string keys to the dtype
of samples associated with that key.
Raises:
KeyError: `sample_shapes` and `dtypes` have different keys.
"""
if sample_shapes.keys() != dtypes.keys():
raise KeyError("sample_shape and dtypes keys don't match")
self.capacity = capacity
self.sample_shapes = {k: tuple(shape) for k, shape in sample_shapes.items()}
self._arrays = {k: np.zeros((capacity,) + shape, dtype=dtypes[k])
for k, shape in self.sample_shapes.items()}
self._n_data = 0
self._idx = 0
@classmethod
def from_data(cls, data: Dict[str, np.ndarray]) -> "Buffer":
"""Constructs and return a Buffer containing only the provided data.
The returned Buffer is at full capacity and ready for sampling.
Args:
data: A dictionary mapping keys to data arrays. The arrays may differ
in their shape, but should agree in the first axis.
Raises:
ValueError: `data` is empty.
ValueError: `data` has items mapping to arrays differing in the
length of their first axis.
"""
capacities = [arr.shape[0] for arr in data.values()]
capacities = np.unique(capacities)
if len(data) == 0:
raise ValueError("No keys in data.")
if len(capacities) > 1:
raise ValueError("Keys map to different length values")
capacity = capacities[0]
sample_shapes = {k: arr.shape[1:] for k, arr in data.items()}
dtypes = {k: arr.dtype for k, arr in data.items()}
buf = cls(capacity, sample_shapes, dtypes)
buf.store(data)
return buf
def store(self, data: Dict[str, np.ndarray]) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Args:
data: A dictionary mapping keys `k` to arrays with shape
`(n_samples,) + self.sample_shapes[k]`, where `n_samples` is less
than or equal to `self.capacity`.
Raises:
ValueError: `data` is empty.
ValueError: If `n_samples` is greater than `self.capacity`.
ValueError: data is the wrong shape.
"""
expected_keys = set(self.sample_shapes.keys())
missing_keys = expected_keys.difference(data.keys())
unexpected_keys = set(data.keys()).difference(expected_keys)
if len(missing_keys) > 0:
raise ValueError(f"Missing keys {missing_keys}")
if len(unexpected_keys) > 0:
raise ValueError(f"Unexpected keys {unexpected_keys}")
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
if len(n_samples) > 1:
raise ValueError("Keys map to different length values.")
n_samples = n_samples[0]
if n_samples == 0:
raise ValueError("Trying to store empty data.")
if n_samples > self.capacity:
raise ValueError("Not enough capacity to store data.")
for k, arr in data.items():
if arr.shape[1:] != self.sample_shapes[k]:
raise ValueError(f"Wrong data shape for {k}")
new_idx = self._idx + n_samples
if new_idx > self.capacity:
n_remain = self.capacity - self._idx
# Need to loop around the buffer. Break into two "easy" calls.
self._store_easy({k: arr[:n_remain] for k, arr in data.items()})
assert self._idx == 0
self._store_easy({k: arr[n_remain:] for k, arr in data.items()})
else:
self._store_easy(data)
def _store_easy(self, data: Dict[str, np.ndarray]) -> None:
"""Stores new data samples, replacing old samples with FIFO priority.
Requires that `len(data) <= self.capacity - self._idx`. Updates `self._idx`
to be the insertion point of the next call to `_store_easy` call,
looping back to `self._idx = 0` if necessary.
Also updates `self._n_data`.
Args:
data: Same as in `self.store`'s docstring, except with the additional
constraint `len(data) <= self.capacity - self._idx`.
"""
n_samples = [arr.shape[0] for arr in data.values()]
n_samples = np.unique(n_samples)
assert len(n_samples) == 1
n_samples = n_samples[0]
assert n_samples <= self.capacity - self._idx
idx_hi = self._idx + n_samples
for k, arr in data.items():
self._arrays[k][self._idx:idx_hi] = arr
self._idx = idx_hi % self.capacity
self._n_data = min(self._n_data + n_samples, self.capacity)
def sample(self, n_samples: int) -> Dict[str, np.ndarray]:
"""Uniformly sample `n_samples` samples from the buffer with replacement.
Args:
n_samples: The number of samples to randomly sample.
Returns:
samples (np.ndarray): An array with shape
`(n_samples) + self.sample_shape`.
Raises:
ValueError: The buffer is empty.
"""
if len(self) == 0:
raise ValueError("Buffer is empty")
ind = np.random.randint(len(self), size=n_samples)
return {k: buffer[ind] for k, buffer in self._arrays.items()}
def __len__(self) -> int:
"""Returns the number of samples stored in the buffer."""
assert 0 <= self._n_data <= self.capacity
return self._n_data
class ReplayBuffer:
"""Buffer for Transitions."""
capacity: int
"""The number of data samples that can be stored in this buffer."""
def __init__(self, capacity: int,
venv: Optional[vec_env.VecEnv] = None, *,
obs_shape: Optional[Tuple[int, ...]] = None,
act_shape: Optional[Tuple[int, ...]] = None,
obs_dtype: Optional[np.dtype] = None,
act_dtype: Optional[np.dtype] = None):
"""Constructs a ReplayBuffer.
Args:
capacity: The number of samples that can be stored.
venv: The environment whose action and observation
spaces can be used to determine the data shapes of the underlying
buffers. Overrides all the following arguments.
obs_shape: The shape of the observation space.
act_shape: The shape of the action space.
obs_dtype: The dtype of the observation space.
act_dtype: The dtype of the action space.
Raises:
ValueError: Couldn't infer the observation and action shapes and dtypes
from the arguments.
"""
params = [obs_shape, act_shape, obs_dtype, act_dtype]
if venv is not None:
if np.any([x is not None for x in params]):
raise ValueError("Specified shape or dtype and environment.")
obs_shape = tuple(venv.observation_space.shape)
act_shape = tuple(venv.action_space.shape)
obs_dtype = venv.observation_space.dtype
act_dtype = venv.action_space.dtype
else:
if np.any([x is None for x in params]):
raise ValueError("Shape or dtype missing and no environment specified.")
self.capacity = capacity
sample_shapes = {
'obs': obs_shape,
'acts': act_shape,
'next_obs': obs_shape,
'rews': (),
'dones': (),
}
dtypes = {
'obs': obs_dtype,
'acts': act_dtype,
'next_obs': obs_dtype,
'rews': np.float32,
'dones': np.bool,
}
self._buffer = Buffer(capacity, sample_shapes=sample_shapes, dtypes=dtypes)
@classmethod
def from_data(cls, transitions: rollout.Transitions) -> "ReplayBuffer":
"""Construct and return a ReplayBuffer containing only the provided data.
The returned ReplayBuffer is at full capacity and ready for sampling.
Args:
transitions: Transitions to store.
Returns:
A new ReplayBuffer.
Raises:
ValueError: obs and next_obs have a different dtype.
"""
if transitions.obs.dtype != transitions.next_obs.dtype:
raise ValueError("obs and next_obs must have the same dtype.")
capacity, *obs_shape = transitions.obs.shape
_, *act_shape = transitions.acts.shape
instance = cls(capacity=capacity, obs_shape=obs_shape, act_shape=act_shape,
obs_dtype=transitions.obs.dtype,
act_dtype=transitions.acts.dtype)
instance.store(transitions)
return instance
def sample(self, n_samples: int) -> rollout.Transitions:
"""Sample obs-act-obs triples.
Args:
n_samples: The number of samples.
Returns:
A Transitions named tuple containing n_samples transitions.
"""
sample = self._buffer.sample(n_samples)
return rollout.Transitions(**sample)
def store(self, transitions: rollout.Transitions) -> None:
"""Store obs-act-obs triples.
Args:
transitions: Transitions to store.
Raises:
ValueError: The arguments didn't have the same length.
"""
lengths = [len(arr) for arr in transitions]
if len(set(lengths)) != 1:
raise ValueError("Arguments must have the same length.")
self._buffer.store(transitions._asdict())
def __len__(self):
return len(self._buffer)
|
py | 1a3b3b7c8dfc36690e9a527dec9c8325f65e6fc2 | from plenum.common.constants import TRUSTEE, STEWARD, NODE
from stp_core.common.log import getlogger
from indy_common.constants import OWNER, POOL_UPGRADE, TGB, TRUST_ANCHOR, NYM, \
POOL_CONFIG, SCHEMA, CLAIM_DEF, \
POOL_RESTART, VALIDATOR_INFO
from indy_common.roles import Roles
logger = getlogger()
# TODO: make this class the only point of authorization and checking permissions!
# There are some duplicates of this logic in *_req_handler classes
class Authoriser:
ValidRoles = (TRUSTEE, TGB, STEWARD, TRUST_ANCHOR, None)
AuthMap = {
'{}_role__{}'.format(NYM, TRUSTEE):
{TRUSTEE: []},
'{}_role__{}'.format(NYM, TGB):
{TRUSTEE: []},
'{}_role__{}'.format(NYM, STEWARD):
{TRUSTEE: []},
'{}_role__{}'.format(NYM, TRUST_ANCHOR):
{TRUSTEE: [], STEWARD: []},
'{}_role__'.format(NYM):
{TRUSTEE: [], TGB: [], STEWARD: [], TRUST_ANCHOR: []},
'{}_role_{}_'.format(NYM, TRUSTEE):
{TRUSTEE: []},
'{}_role_{}_'.format(NYM, TGB):
{TRUSTEE: []},
'{}_role_{}_'.format(NYM, STEWARD):
{TRUSTEE: []},
'{}_role_{}_'.format(NYM, TRUST_ANCHOR):
{TRUSTEE: []},
'{}_<any>_<any>_<any>'.format(SCHEMA):
{TRUSTEE: [], STEWARD: [], TRUST_ANCHOR: []},
'{}_<any>_<any>_<any>'.format(CLAIM_DEF):
{TRUSTEE: [OWNER, ], STEWARD: [OWNER, ], TRUST_ANCHOR: [OWNER, ]},
'{}_verkey_<any>_<any>'.format(NYM):
{r: [OWNER] for r in ValidRoles},
'{}_services__[VALIDATOR]'.format(NODE):
{STEWARD: [OWNER, ]},
# INDY-410 - steward allowed to demote/promote its validator
'{}_services_[VALIDATOR]_[]'.format(NODE):
{TRUSTEE: [], STEWARD: [OWNER, ]},
'{}_services_[]_[VALIDATOR]'.format(NODE):
{TRUSTEE: [], STEWARD: [OWNER, ]},
'{}_node_ip_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_node_port_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_client_ip_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_client_port_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_blskey_<any>_<any>'.format(NODE):
{STEWARD: [OWNER, ]},
'{}_action__start'.format(POOL_UPGRADE):
{TRUSTEE: [], TGB: []},
'{}_action_start_cancel'.format(POOL_UPGRADE):
{TRUSTEE: [], TGB: []},
'{}_action_<any>_<any>'.format(POOL_RESTART):
{TRUSTEE: []},
'{}_action_<any>_<any>'.format(POOL_CONFIG):
{TRUSTEE: [], TGB: []},
'{}_<any>_<any>_<any>'.format(VALIDATOR_INFO):
{TRUSTEE: [], STEWARD: []},
}
@staticmethod
def isValidRole(role) -> bool:
return role in Authoriser.ValidRoles
@staticmethod
def getRoleFromName(roleName) -> bool:
if not roleName:
return
return Roles[roleName].value
@staticmethod
def isValidRoleName(roleName) -> bool:
if not roleName:
return True
try:
Authoriser.getRoleFromName(roleName)
except KeyError:
return False
return True
@staticmethod
def authorised(typ, actorRole, field=None, oldVal=None, newVal=None,
isActorOwnerOfSubject=None) -> (bool, str):
field = field if field is not None else ""
oldVal = '' if oldVal is None else \
str(oldVal).replace('"', '').replace("'", '')
newVal = '' if newVal is None else \
str(newVal).replace('"', '').replace("'", '')
key = '_'.join([typ, field, oldVal, newVal])
if key not in Authoriser.AuthMap:
any_value = '_'.join([typ, field, '<any>', '<any>'])
if any_value not in Authoriser.AuthMap:
any_field = '_'.join([typ, "<any>", '<any>', '<any>'])
if any_field not in Authoriser.AuthMap:
msg = "key '{}' not found in authorized map".format(key)
logger.debug(msg)
return False, msg
else:
key = any_field
else:
key = any_value
roles = Authoriser.AuthMap[key]
if actorRole not in roles:
roles_as_str = [Roles.nameFromValue(role) for role in roles.keys()]
return False, '{} not in allowed roles {}'.\
format(Roles.nameFromValue(actorRole), roles_as_str)
roleDetails = roles[actorRole]
if len(roleDetails) == 0:
return True, ''
else:
r = OWNER in roleDetails and isActorOwnerOfSubject
msg = '' if r else 'Only owner is allowed'
return r, msg
|
py | 1a3b3c1cb7141c84c32afc74c248a71280c726c8 | # -*- coding: utf-8 -*-
import argparse
import json
import os
import re
import shutil
import subprocess as sp
import sys
from concurrent import futures
from tempfile import mkdtemp, mkstemp
from typing import List, Tuple
import cx_Oracle
from pyinterprod import logger
from pyinterprod.utils import oracle
from .database import Database
from . import contrib
HMM_SUFFIX = ".hmm"
SEQ_SUFFIX = ".fa"
DOM_SUFFIX = ".tab"
OUT_SUFFIX = ".out"
def calc_dir_size(dirpath: str) -> int:
size = 0
for root, dirs, files in os.walk(dirpath):
for f in files:
size += os.path.getsize(os.path.join(root, f))
return size
def create_tables(url: str):
con = cx_Oracle.connect(url)
cur = con.cursor()
for table in ("CLAN_MATCH", "CLAN_MEMBER", "CLAN"):
oracle.drop_table(cur, table, purge=True)
cur.execute(
"""
CREATE TABLE INTERPRO.CLAN
(
CLAN_AC VARCHAR2(25) NOT NULL,
DBCODE CHAR(1) NOT NULL,
NAME VARCHAR2(100) DEFAULT NULL,
DESCRIPTION VARCHAR2(4000) DEFAULT NULL,
CONSTRAINT PK_CLAN
PRIMARY KEY (CLAN_AC),
CONSTRAINT FK_CLAN$DBCODE
FOREIGN KEY (DBCODE)
REFERENCES INTERPRO.CV_DATABASE (DBCODE)
ON DELETE CASCADE
)
"""
)
cur.execute(
"""
CREATE TABLE INTERPRO.CLAN_MEMBER
(
CLAN_AC VARCHAR2(25) NOT NULL,
MEMBER_AC VARCHAR2(25) NOT NULL,
LEN NUMBER NOT NULL,
SCORE FLOAT NOT NULL,
CONSTRAINT PK_CLAN_MEMBER
PRIMARY KEY (CLAN_AC, MEMBER_AC),
CONSTRAINT UQ_CLAN_MEMBER$MEMBER_AC
UNIQUE (MEMBER_AC),
CONSTRAINT FK_CLAN_MEMBER$CLAN_AC
FOREIGN KEY (CLAN_AC)
REFERENCES INTERPRO.CLAN (CLAN_AC)
ON DELETE CASCADE,
CONSTRAINT FK_CLAN_MEMBER$MEMBER_AC
FOREIGN KEY (MEMBER_AC)
REFERENCES INTERPRO.METHOD (METHOD_AC)
ON DELETE CASCADE
)
"""
)
cur.execute(
"""
CREATE TABLE INTERPRO.CLAN_MATCH
(
QUERY_AC VARCHAR2(25) NOT NULL,
TARGET_AC VARCHAR2(25) NOT NULL,
EVALUE FLOAT NOT NULL,
DOMAINS CLOB NOT NULL,
CONSTRAINT PK_CLAN_MATCH
PRIMARY KEY (QUERY_AC, TARGET_AC),
CONSTRAINT FK_CLAN_MATCH
FOREIGN KEY (QUERY_AC)
REFERENCES INTERPRO.CLAN_MEMBER (MEMBER_AC)
ON DELETE CASCADE
)
"""
)
cur.close()
con.close()
def load_sequence(seqfile: str) -> str:
seq = ""
with open(seqfile, "rt") as fh:
next(fh)
for line in fh:
seq += line.rstrip()
return seq
def iter_models(hmmdb: str):
with open(hmmdb, "rt") as fh:
reg_acc = re.compile(r"ACC\s+(\w+)", flags=re.M)
reg_name = re.compile(r"^NAME\s+(PTHR\d+)\.(SF\d+)?", flags=re.M)
hmm = ""
for line in fh:
hmm += line
if line[:2] == "//":
m = reg_acc.search(hmm)
if m:
accession = m.group(1)
else:
# PANTHER: accessions in the NAME field
m = reg_name.search(hmm)
accession, prefix = m.groups()
if prefix is not None:
accession += ':' + prefix
yield accession, hmm
hmm = ""
def iter_sequences(seqfile: str):
with open(seqfile, "rt") as fh:
buffer = ""
accession = identifier = None
for line in fh:
if line[0] == ">":
if buffer and identifier:
yield identifier, accession, buffer
m = re.match(r">(gnl\|CDD\|\d+)\s+(cd\d+),", line)
if m:
identifier, accession = m.groups()
else:
accession = identifier = None
buffer = ""
buffer += line
if buffer and identifier:
yield identifier, accession, buffer
def load_hmmscan_results(outfile: str, tabfile: str) -> List[dict]:
alignments = load_domain_alignments(outfile)
targets = {}
with open(tabfile, "rt") as fh:
i = 0
for line in fh:
if line[0] == "#":
continue
cols = re.split(r"\s+", line.rstrip(), maxsplit=22)
name = cols[0]
# Pfam entries end with a mark followed by a number
acc = cols[1].split(".")[0]
if acc == "-":
# Panther accessions are under the `target_name` column
acc = name
if acc in targets:
t = targets[acc]
else:
t = targets[acc] = {
"name": name,
"accession": acc,
"tlen": int(cols[2]),
"qlen": int(cols[5]),
# full sequence
"evalue": float(cols[6]),
"evaluestr": cols[6],
"score": float(cols[7]),
"bias": float(cols[8]),
"domains": []
}
t["domains"].append({
# this domain
# conditional E-value
"cevalue": float(cols[11]),
"cevaluestr": cols[11],
# independent E-value
"ievalue": float(cols[12]),
"ievaluestr": cols[12],
"score": float(cols[13]),
"bias": float(cols[14]),
"coordinates": {
# target (as we scan an HMM DB)
"hmm": {
"start": int(cols[15]),
"end": int(cols[16])
},
# query
"ali": {
"start": int(cols[17]),
"end": int(cols[18])
},
"env": {
"start": int(cols[19]),
"end": int(cols[20])
},
},
"sequences": alignments[i]
})
i += 1
return list(targets.values())
def load_domain_alignments(file: str) -> List[Tuple[str, str]]:
"""
Parse the output file of hmmscan and load domain alignments.
Example of alignments:
== domain 1 score: 25.3 bits; conditional E-value: 5.2e-09
Cytochrome_c4 11 llalaalal.alaaaadaeagaaklaea......gaaavkaCaaCHGadGnsaaaaayPrLAgqsaaYlakqLkdfrsg 82
l++l+a+++ ++ a+++ e++a+k++ea + ++C +CHG+d ++a+ P+L ++Y +++++++ ++
Cytochrome_Bsub_c550-consensus 18 LVVLLAVNGgSKDAEEEKEEEAEKSEEAeaeaegEEIFKQKCISCHGKDLEGAVG---PNLEKVGSKYSEEEIAKIIEN 93
34444444442223333333333333336666856777899***********766...***************999887 PP
Cytochrome_c4 83 errknpMaplakaLsdqdiedlaaYfaaq 111
k +M a+ sd++ +++a+++a++
Cytochrome_Bsub_c550-consensus 94 G--KGAM--PAAIVSDDEAKAVAKWLAEK 118
3..3344..46678999999999999986 PP
Since the sequence is too long, the domain is represented with two "blocks".
The "== domain" line might be followed by a consensus structure annotation line (not the case here).
Each block has four lines:
1. consensus of the target profile
2. matches between the query sequence and target profile (**can be empty**)
3. query sequence
4. posterior probability of each aligned residue
:param file: hmmscan output file
:return: a list of alignments, represented by a tuple of two sequences (query, target)
"""
alignments = []
query_seq = target_seq = ""
with open(file, "rt") as fh:
for line in fh:
line = line.strip()
if not line:
continue
if line.startswith(">> "):
# New model
# target_name = line[3:]
if query_seq:
alignments.append((query_seq, target_seq))
query_seq = target_seq = ""
elif line.startswith("== domain"):
# New domain
if query_seq:
alignments.append((query_seq, target_seq))
query_seq = target_seq = ""
line = next(fh).strip()
block = []
while line or len(block) < 4:
block.append(line)
line = next(fh).strip()
del block[:-4]
target_seq += block[0].split()[2]
query_seq += block[2].split()[2]
elif line == "Internal pipeline statistics summary:":
alignments.append((query_seq, target_seq))
query_seq = target_seq = ""
elif query_seq:
# New block of domain
block = []
while line or len(block) < 4:
block.append(line)
line = next(fh).strip()
del block[:-4]
target_seq += block[0].split()[2]
query_seq += block[2].split()[2]
return alignments
def load_compass_results(outfile) -> List[dict]:
# p1 = re.compile(r"length\s*=\s*(\d+)")
p2 = re.compile(r"Evalue\s*=\s*([\d.e\-]+)")
targets = {}
block = 0
query_id = None
query_seq = ""
target_id = None
target_seq = ""
length = None
evalue = None
evalue_str = None
pos_start = None
with open(outfile, "rt") as fh:
for line in fh:
line = line.rstrip()
if line.startswith("Subject="):
"""
Format:
Subject= cd154/cd15468.fa
length=413 filtered_length=413 Neff=1.000
Smith-Waterman score = 254 Evalue = 3.36e-16
(the path after "Subject=" might be truncated)
"""
if target_id:
targets[target_id] = {
"id": target_id,
"evalue": evalue,
"evaluestr": evalue_str,
"length": length,
"start": pos_start,
"end": pos_start + len(query_seq.replace('=', '')) - 1,
"sequences": {
"query": query_seq,
"target": target_seq
}
}
query_id = None
query_seq = None
target_id = None
target_seq = None
line = next(fh)
# length = int(p1.match(line).group(1))
line = next(fh)
evalue_str = p2.search(line).group(1)
try:
evalue = float(evalue_str)
except ValueError:
evalue = 0
block = 1
elif line.startswith("Parameters:"):
# Footer: end of results
break
elif not block:
continue
elif line:
"""
First block:
gnl|CDD|271233 1 PSFIPGPT==TPKGCTRIPSFSLSDTHWCYTHNVILSGCQDHSKSNQYLSLGVIKTNSDG
CONSENSUS_1 1 PSFIPGPT==TPKGCTRIPSFSLSDTHWCYTHNVILSGCQDHSKSNQYLSLGVIKTNSDG
P++IP+ T C+R PSF++S+ + YT+ V ++CQDH + +Y+++GVI+ ++ G
CONSENSUS_2 1 PNLIPADTGLLSGECVRQPSFAISSGIYAYTYLVRKGSCQDHRSLYRYFEVGVIRDDGLG
gnl|CDD|271230 1 PNLIPADTGLLSGECVRQPSFAISSGIYAYTYLVRKGSCQDHRSLYRYFEVGVIRDDGLG
(following blocks do not have the start position between the ID and the sequence)
"""
query = line.split()
next(fh)
next(fh)
next(fh)
target = next(fh).split()
if block == 1:
query_id = query[0]
pos_start = int(query[1])
query_seq = query[2]
target_id = target[0]
target_seq = target[2]
else:
query_seq += query[1]
target_seq += target[1]
block += 1
targets[target_id] = {
"id": target_id,
"evalue": evalue,
"evaluestr": evalue_str,
"length": length,
"start": pos_start,
"end": pos_start + len(query_seq.replace('=', '')) - 1,
"sequences": {
"query": query_seq,
"target": target_seq
}
}
return list(targets.values())
def run_compass(seqfile: str, database: str, outfile: str):
args = ["compass_vs_db", "-i", seqfile, "-d", database, "-o", outfile]
process = sp.run(args=args, stderr=sp.DEVNULL, stdout=sp.DEVNULL)
if process.returncode == 0:
return True
try:
os.remove(outfile)
except FileNotFoundError:
pass
return False
def run_hmmemit(hmmdb: str, seqfile: str):
sp.run(args=["hmmemit", "-c", "-o", seqfile, hmmdb],
stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True)
def run_hmmscan(hmmdb: str, seqfile: str, domfile: str, outfile: str) -> bool:
args = ["hmmscan", "-o", outfile, "--domtblout", domfile, "--cpu", "1",
hmmdb, seqfile]
process = sp.run(args=args, stderr=sp.DEVNULL, stdout=sp.DEVNULL)
if process.returncode == 0:
return True
for f in (domfile, outfile):
try:
os.remove(f)
except FileNotFoundError:
pass
return False
def update_cdd_clans(url: str, database: Database, cddmasters: str,
cddid: str, fam2supfam: str, **kwargs):
threads = kwargs.get("threads")
tmpdir = kwargs.get("tmpdir")
if tmpdir:
os.makedirs(tmpdir, exist_ok=True)
logger.info("deleting old clans")
con = cx_Oracle.connect(url)
cur = con.cursor()
cur.execute("DELETE FROM INTERPRO.CLAN WHERE DBCODE = :1",
(database.identifier,))
con.commit()
cur.close()
con.close()
clans = contrib.cdd.get_clans(cddid, fam2supfam)
clans_to_insert = {}
mem2clan = {}
for c in clans:
clans_to_insert[c.accession] = c
for m in c.members:
mem2clan[m["accession"]] = (c.accession, m["score"])
logger.info("parsing representative sequences")
workdir = mkdtemp(dir=tmpdir)
fd, files_list = mkstemp(dir=workdir)
id2acc = {}
seqfiles = {}
with open(fd, "wt") as fh:
for model_id, model_acc, sequence in iter_sequences(cddmasters):
if model_acc not in mem2clan or model_acc in seqfiles:
continue
subdir = os.path.join(workdir, model_acc[:5])
try:
os.mkdir(subdir)
except FileExistsError:
pass
prefix = os.path.join(subdir, model_acc)
seqfile = prefix + SEQ_SUFFIX
with open(seqfile, "wt") as fh2:
fh2.write(sequence)
fh.write(f"{seqfile}\n")
seqfiles[model_acc] = prefix
id2acc[model_id] = model_acc
logger.info("building profile database")
fd, database = mkstemp(dir=workdir)
os.close(fd)
os.remove(database)
sp.run(["mk_compass_db", "-i", files_list, "-o", database],
stderr=sp.DEVNULL, stdout=sp.DEVNULL, check=True)
with futures.ThreadPoolExecutor(max_workers=threads) as executor:
logger.info("querying sequences")
fs = {}
for model_acc, prefix in seqfiles.items():
seqfile = prefix + SEQ_SUFFIX
outfile = prefix + OUT_SUFFIX
f = executor.submit(run_compass, seqfile, database, outfile)
fs[f] = (model_acc, prefix)
con = cx_Oracle.connect(url)
cur = con.cursor()
cur2 = con.cursor()
cur2.setinputsizes(25, 25, cx_Oracle.DB_TYPE_BINARY_DOUBLE,
cx_Oracle.DB_TYPE_CLOB)
clan_sql = "INSERT INTO INTERPRO.CLAN VALUES (:1, :2, :3, :4)"
memb_sql = "INSERT INTO INTERPRO.CLAN_MEMBER VALUES (:1, :2, :3, :4)"
mtch_sql = "INSERT INTO INTERPRO.CLAN_MATCH VALUES (:1, :2, :3, :4)"
completed = errors = progress = 0
for f in futures.as_completed(fs):
model_acc, prefix = fs[f]
completed += 1
if not f.result():
logger.error(f"{model_acc}")
errors += 1
continue
clan_acc, score = mem2clan[model_acc]
sequence = load_sequence(prefix + SEQ_SUFFIX)
try:
clan = clans_to_insert.pop(clan_acc)
except KeyError:
# Clan already inserted
pass
else:
cur.execute(clan_sql, (clan.accession, database.identifier,
clan.name, clan.description))
cur.execute(memb_sql, (clan_acc, model_acc, len(sequence), score))
matches = []
for target in load_compass_results(prefix + OUT_SUFFIX):
target_acc = id2acc[target["id"]]
if target_acc == model_acc:
continue
matches.append((
model_acc,
target_acc,
target["evalue"],
json.dumps([(target["start"], target["end"])])
))
if matches:
cur2.executemany(mtch_sql, matches)
pc = completed * 100 // len(fs)
if pc > progress:
progress = pc
logger.debug(f"{progress:>10}%")
con.commit()
cur.close()
cur2.close()
con.close()
size = calc_dir_size(workdir)
logger.info(f"disk usage: {size / 1024 ** 2:,.0f} MB")
shutil.rmtree(workdir)
if errors:
raise RuntimeError(f"{errors} error(s)")
def update_hmm_clans(url: str, database: Database, hmmdb: str, **kwargs):
clan_source = kwargs.get("source")
threads = kwargs.get("threads")
tmpdir = kwargs.get("tmpdir")
if tmpdir:
os.makedirs(tmpdir, exist_ok=True)
logger.info("deleting old clans")
con = cx_Oracle.connect(url)
cur = con.cursor()
cur.execute("DELETE FROM INTERPRO.CLAN WHERE DBCODE = :1",
(database.identifier,))
con.commit()
cur.close()
con.close()
logger.info("loading new clans")
if database.name.lower() == "panther":
clans = contrib.panther.get_clans(url)
def getsubdir(x): return x[:7]
elif database.name.lower() == "pfam":
clans = contrib.pfam.get_clans(clan_source)
def getsubdir(x): return x[:5]
elif database.name.lower() == "pirsf":
clans = contrib.pirsf.get_clans(clan_source)
def getsubdir(x): return x[:8]
else:
raise NotImplementedError()
clans_to_insert = {}
mem2clan = {}
for c in clans:
clans_to_insert[c.accession] = c
for m in c.members:
mem2clan[m["accession"]] = (c.accession, m["score"])
workdir = mkdtemp(dir=tmpdir)
num_duplicates = 0
with futures.ThreadPoolExecutor(max_workers=threads) as executor:
logger.info("emitting consensus sequences")
fs = {}
models = set()
for model_acc, hmm in iter_models(hmmdb):
if model_acc not in mem2clan:
# Ignore models not belonging to a clan
continue
elif model_acc in models:
num_duplicates += 1
continue
subdir = os.path.join(workdir, getsubdir(model_acc))
try:
os.mkdir(subdir)
except FileExistsError:
pass
prefix = os.path.join(subdir, model_acc)
hmmfile = prefix + HMM_SUFFIX
with open(hmmfile, "wt") as fh:
fh.write(hmm)
seqfile = prefix + SEQ_SUFFIX
f = executor.submit(run_hmmemit, hmmfile, seqfile)
fs[f] = model_acc
models.add(model_acc)
done, not_done = futures.wait(fs)
if not_done:
shutil.rmtree(workdir)
raise RuntimeError(f"{len(not_done)} error(s)")
elif num_duplicates:
shutil.rmtree(workdir)
raise RuntimeError(f"HMM database {hmmdb} contains "
f"{num_duplicates} duplicated models.")
logger.info("searching consensus sequences")
fs = {}
for model_acc in models:
prefix = os.path.join(workdir, getsubdir(model_acc), model_acc)
seqfile = prefix + SEQ_SUFFIX
outfile = prefix + OUT_SUFFIX
domfile = prefix + DOM_SUFFIX
f = executor.submit(run_hmmscan, hmmdb, seqfile, domfile, outfile)
fs[f] = model_acc
con = cx_Oracle.connect(url)
cur = con.cursor()
cur2 = con.cursor()
cur2.setinputsizes(25, 25, cx_Oracle.DB_TYPE_BINARY_DOUBLE,
cx_Oracle.DB_TYPE_CLOB)
clan_sql = "INSERT INTO INTERPRO.CLAN VALUES (:1, :2, :3, :4)"
memb_sql = "INSERT INTO INTERPRO.CLAN_MEMBER VALUES (:1, :2, :3, :4)"
mtch_sql = "INSERT INTO INTERPRO.CLAN_MATCH VALUES (:1, :2, :3, :4)"
completed = errors = progress = 0
for f in futures.as_completed(fs):
model_acc = fs[f]
completed += 1
if not f.result():
logger.error(f"{model_acc}")
errors += 1
continue
prefix = os.path.join(workdir, getsubdir(model_acc), model_acc)
outfile = prefix + OUT_SUFFIX
domfile = prefix + DOM_SUFFIX
clan_acc, score = mem2clan[model_acc]
sequence = load_sequence(prefix + SEQ_SUFFIX)
try:
clan = clans_to_insert.pop(clan_acc)
except KeyError:
# Clan already inserted
pass
else:
cur.execute(clan_sql, (clan.accession, database.identifier,
clan.name, clan.description))
cur.execute(memb_sql, (clan_acc, model_acc, len(sequence), score))
matches = []
for target in load_hmmscan_results(outfile, domfile):
if target["accession"] == model_acc:
continue
domains = []
for dom in target["domains"]:
domains.append((
dom["coordinates"]["ali"]["start"],
dom["coordinates"]["ali"]["end"]
))
matches.append((
model_acc,
target["accession"],
target["evalue"],
json.dumps(domains)
))
if matches:
cur2.executemany(mtch_sql, matches)
pc = completed * 100 // len(fs)
if pc > progress:
progress = pc
logger.debug(f"{progress:>10}%")
con.commit()
cur.close()
cur2.close()
con.close()
size = calc_dir_size(workdir)
logger.info(f"disk usage: {size / 1024 ** 2:,.0f} MB")
shutil.rmtree(workdir)
if errors:
raise RuntimeError(f"{errors} error(s)")
def remove_hmm_duplicates():
prog = "python -m pyinterprod.interpro.clan"
description = ("Simple command line interface to stream an HMM file "
"without repeated models.")
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument("hmmdb", help="an HMM file")
options = parser.parse_args()
accessions = set()
for acc, hmm in iter_models(options.hmmdb):
if acc in accessions:
continue
accessions.add(acc)
print(hmm, end='')
if __name__ == '__main__':
try:
remove_hmm_duplicates()
except BrokenPipeError as exc:
sys.exit(exc.errno)
|
py | 1a3b3c1ccaf99fa75e1c38104d67e8e2cf4a8434 | """
Django settings for bitcoinnodestats project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'bitcoinnodestats',
'nodedata',
'bootstrap3',
'django_cron',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bitcoinnodestats.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'bitcoinnodestats.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
#
# Bootstrap3
#
BOOTSTRAP3 = {
'jquery_url': '//code.jquery.com/jquery-2.1.4.min.js', #'//code.jquery.com/jquery.min.js',
'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.6/', #'//netdna.bootstrapcdn.com/bootstrap/3.0.3/'
'css_url': os.path.join(STATIC_URL, 'bootstrap/css/bootstrap_cyborg.min.css'),
'theme_url': None,
'javascript_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js',
'horizontal_label_class': 'col-md-2',
'horizontal_field_class': 'col-md-4',
}
#
# django-cron
#
CRON_CLASSES = [
'nodedata.cronjobs.UpdateNodeData',
]
# import secret local settings
from bitcoinnodestats.local_settings import * |
py | 1a3b3c3892aa58f0f365ccb2e324182f1a994cae | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class BatchCounterCallback(callbacks.Callback):
def __init__(self):
self.batch_count = 0
def on_batch_end(self, *args, **kwargs):
self.batch_count += 1
class TestTrainingWithDatasetIterators(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_iterators_single_io(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(iterator, steps=2, verbose=1)
model.predict(iterator, steps=2)
# Test with validation data
model.fit(iterator,
epochs=1, steps_per_epoch=2, verbose=0,
validation_data=iterator, validation_steps=2)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(iterator,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(
iterator,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test invalid usage
with self.assertRaisesRegexp(ValueError,
'you should not specify a target'):
model.fit(iterator, iterator,
epochs=1, steps_per_epoch=2, verbose=0)
with self.assertRaisesRegexp(
ValueError, 'the `steps_per_epoch` argument'):
model.fit(iterator, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.evaluate(iterator, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.predict(iterator, verbose=0)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_iterators_running_out_of_data(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(2)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with test.mock.patch.object(logging, 'warning') as mock_log:
model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
self.assertRegexpMatches(
str(mock_log.call_args),
'dataset iterator ran out of data')
class TestTrainingWithDataset(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_calling_model_on_same_dataset(self):
if ((not testing_utils.should_run_eagerly())
and testing_utils.get_model_type() == 'subclass'
and context.executing_eagerly()):
self.skipTest('b/120673224')
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
# Call fit with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_training_and_eval_methods_on_dataset(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat() # Infinite dataset.
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
# Test with validation data
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
validation_data=dataset, validation_steps=2)
# Test with validation split
with self.assertRaisesRegexp(
ValueError, '`validation_split` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(dataset,
epochs=1, steps_per_epoch=2, verbose=0,
validation_split=0.5, validation_steps=2)
# Test with sample weight.
sample_weight = np.random.random((10,))
with self.assertRaisesRegexp(
ValueError, '`sample_weight` argument is not supported '
'when input `x` is a dataset or a dataset iterator'):
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
sample_weight=sample_weight)
# Test invalid usage
with self.assertRaisesRegexp(ValueError, 'The `batch_size` argument'
' must not be specified when using dataset'
' as an input.'):
model.fit(dataset, batch_size=10, epochs=1, steps_per_epoch=2,
verbose=0)
with self.assertRaisesRegexp(ValueError, 'The `batch_size` argument'
' must not be specified when using dataset'
' as an input.'):
model.predict(dataset, batch_size=10, steps=2, verbose=0)
with self.assertRaisesRegexp(ValueError, 'The `batch_size` argument'
' must not be specified when using dataset'
' as an input.'):
model.evaluate(dataset, batch_size=10, steps=2, verbose=0)
with self.assertRaisesRegexp(ValueError,
'you should not specify a target'):
model.fit(dataset, dataset,
epochs=1, steps_per_epoch=2, verbose=0)
# With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
with self.assertRaisesRegexp(
ValueError, 'the `steps_per_epoch` argument'):
model.fit(dataset, epochs=1, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.evaluate(dataset, verbose=0)
with self.assertRaisesRegexp(ValueError,
'the `steps` argument'):
model.predict(dataset, verbose=0)
# TODO(b/123531973): Include tests using dataset_v1.
@keras_parameterized.run_with_all_model_types(exclude_models='sequential')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_and_eval_methods_on_multi_input_output_dataset(self):
input_a = keras.layers.Input(shape=(3,), name='input_1')
input_b = keras.layers.Input(shape=(3,), name='input_2')
dense = keras.layers.Dense(4, name='dense')
dropout = keras.layers.Dropout(0.5, name='dropout')
branch_a = [input_a, dense]
branch_b = [input_b, dense, dropout]
model = testing_utils.get_multi_io_model(branch_a, branch_b)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
input_a_np = np.random.random((10, 3)).astype(dtype=np.float32)
input_b_np = np.random.random((10, 3)).astype(dtype=np.float32)
output_d_np = np.random.random((10, 4)).astype(dtype=np.float32)
output_e_np = np.random.random((10, 4)).astype(dtype=np.float32)
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices((
(input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_tuple, steps=2, verbose=1)
predict_dataset_tuple = dataset_ops.Dataset.from_tensor_slices(
(input_a_np, input_b_np))
# TODO(b/123360757): Remove below assertion once predict() supports
# muti-input datasets.
with self.assertRaisesRegexp(ValueError,
'Error when checking model input'):
model.predict(predict_dataset_tuple, steps=1)
# Test with dict
input_dict = {'input_1': input_a_np, 'input_2': input_b_np}
if testing_utils.get_model_type() == 'subclass':
output_dict = {'output_1': output_d_np, 'output_2': output_e_np}
else:
output_dict = {'dense': output_d_np, 'dropout': output_e_np}
dataset_dict = dataset_ops.Dataset.from_tensor_slices((
input_dict, output_dict))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset_dict, steps=2, verbose=1)
predict_dataset_dict = dataset_ops.Dataset.from_tensor_slices(
input_dict)
predict_dataset_dict = predict_dataset_dict.repeat(100)
predict_dataset_dict = predict_dataset_dict.batch(10)
model.predict(predict_dataset_dict, steps=1)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
loss = 'mse'
metrics = ['mae', metrics_module.CategoricalAccuracy()]
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
sample_weights = np.ones((10), np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sample_weights_correctness(self):
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(
1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(x)
model = keras.Model(x, y)
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
sample_weights)).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# ((2-0)^2) * 0.25 + ((4-1)^2) * 0.5 + ((6-2)^2 * 0.75) + ((8-3)^2 * 1)
# equals 42.5 / 4 = 10.625
self.assertEqual(result, 10.625)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_dataset_with_sparse_labels(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
optimizer = 'rmsprop'
model.compile(
optimizer,
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
@keras_parameterized.run_all_keras_modes
def test_dataset_fit_correctness(self):
class SumLayer(keras.layers.Layer):
def build(self, _):
self.w = self.add_weight('w', ())
def call(self, inputs):
return keras.backend.sum(inputs) + self.w * 0
model = keras.Sequential([SumLayer(input_shape=(2,))])
model.compile(
'rmsprop', loss='mae', run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((40, 2), dtype=np.float32)
inputs[10:20, :] = 2
inputs[20:30, :] = 1
inputs[30:, :] = 4
targets = np.zeros((40, 1), dtype=np.float32)
# Test correctness with `steps_per_epoch`.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, steps_per_epoch=2, verbose=1,
validation_data=val_dataset, validation_steps=2)
self.assertListEqual(history.history['loss'],
[inputs[:20].sum() / 2, inputs[20:].sum() / 2])
# The validation dataset will be reset at the end of each validation run.
self.assertListEqual(history.history['val_loss'],
[inputs[:20].sum() / 2, inputs[:20].sum() / 2])
# Test correctness with dataset reset.
train_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets)).batch(10)
history = model.fit(train_dataset,
epochs=2, verbose=1, validation_data=val_dataset)
self.assertListEqual(history.history['loss'],
[inputs.sum() / 4, inputs.sum() / 4])
self.assertListEqual(history.history['val_loss'],
[inputs.sum() / 4, inputs.sum() / 4])
@tf_test_util.run_deprecated_v1
def test_dataset_input_shape_validation(self):
with self.cached_session():
model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
model.compile(optimizer='rmsprop', loss='mse')
# User forgets to batch the dataset
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
with self.assertRaisesRegexp(
ValueError,
r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
):
model.train_on_batch(dataset)
# Wrong input shape
inputs = np.zeros((10, 5))
targets = np.zeros((10, 4))
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError,
r'expected (.*?) to have shape \(3,\)'):
model.train_on_batch(dataset)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_known_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile('rmsprop', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_no_steps_arg(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile('rmsprop', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self):
class CaptureStdout(object):
def __enter__(self):
self._stdout = sys.stdout
string_io = six.StringIO()
sys.stdout = string_io
self._stringio = string_io
return self
def __exit__(self, *args):
self.output = self._stringio.getvalue()
sys.stdout = self._stdout
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile(
'rmsprop', 'mse', run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with CaptureStdout() as capture:
history = model.fit(
dataset,
epochs=2,
callbacks=[batch_counter],
validation_data=dataset.take(3))
lines = capture.output.splitlines()
self.assertIn('1/Unknown', lines[2])
self.assertIn('10/10', lines[-1])
self.assertLen(history.history['loss'], 2)
self.assertEqual(batch_counter.batch_count, 20)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_finite_dataset_unknown_cardinality_out_of_data(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.compile('rmsprop', 'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.filter(lambda x, y: True).batch(10)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
batch_counter = BatchCounterCallback()
with test.mock.patch.object(logging, 'warning') as mock_log:
# steps_per_epoch (200) is greater than the dataset size (100). As this is
# unexpected, training will stop and not make it to the second epoch.
history = model.fit(
dataset,
epochs=2,
verbose=1,
callbacks=[batch_counter],
steps_per_epoch=200)
self.assertIn(
'Your dataset ran out of data; interrupting training. '
'Make sure that your dataset can generate at least '
'`steps_per_epoch * epochs` batches (in this case, 400 batches). '
'You may need to use the repeat() function when '
'building your dataset.', str(mock_log.call_args))
self.assertLen(history.history['loss'], 1)
self.assertEqual(batch_counter.batch_count, 10)
model.evaluate(dataset)
out = model.predict(dataset)
self.assertEqual(out.shape[0], 100)
class TestMetricsWithDatasetIterators(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_metrics_correctness_with_iterator(self):
layers = [
keras.layers.Dense(8, activation='relu', input_dim=4,
kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(layers, (4,))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
if __name__ == '__main__':
test.main()
|
py | 1a3b3cb624c19408e0684b2e830ce8f036fd0d71 | #!/usr/bin/python2.4
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility library containing various helpers used by the API."""
import re
CUSTOM_SERIALIZE_METHOD_NAME = 'serialize'
MARKUP_RE = re.compile(r'<([^>]*?)>')
def force_unicode(object):
""" Return the Unicode string version of object, with UTF-8 encoding. """
if isinstance(object, unicode):
return object
return unicode(str(object), 'utf-8')
def parse_markup(markup):
"""Parses a bit of markup into robot compatible text.
For now this is a rough approximation.
"""
def replace_tag(group):
if not group.groups:
return ''
tag = group.groups()[0].split(' ', 1)[0]
if (tag == 'p' or tag == 'br'):
return '\n'
return ''
return MARKUP_RE.sub(replace_tag, markup)
def is_iterable(inst):
"""Returns whether or not this is a list, tuple, set or dict .
Note that this does not return true for strings.
"""
return hasattr(inst, '__iter__')
def is_dict(inst):
"""Returns whether or not the specified instance is a dict."""
return hasattr(inst, 'iteritems')
def is_user_defined_new_style_class(obj):
"""Returns whether or not the specified instance is a user-defined type."""
return type(obj).__module__ != '__builtin__'
def lower_camel_case(s):
"""Converts a string to lower camel case.
Examples:
foo => foo
foo_bar => fooBar
foo__bar => fooBar
foo_bar_baz => fooBarBaz
Args:
s: The string to convert to lower camel case.
Returns:
The lower camel cased string.
"""
return reduce(lambda a, b: a + (a and b.capitalize() or b), s.split('_'))
def non_none_dict(d):
"""return a copy of the dictionary without none values."""
return dict([a for a in d.items() if not a[1] is None])
def _serialize_attributes(obj):
"""Serializes attributes of an instance.
Iterates all attributes of an object and invokes serialize if they are
public and not callable.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
data = {}
for attr_name in dir(obj):
if attr_name.startswith('_'):
continue
attr = getattr(obj, attr_name)
if attr is None or callable(attr):
continue
# Looks okay, serialize it.
data[lower_camel_case(attr_name)] = serialize(attr)
return data
def _serialize_dict(d):
"""Invokes serialize on all of its key/value pairs.
Args:
d: The dict instance to serialize.
Returns:
The serialized dict.
"""
data = {}
for k, v in d.items():
data[lower_camel_case(k)] = serialize(v)
return data
def serialize(obj):
"""Serializes any instance.
If this is a user-defined instance
type, it will first check for a custom Serialize() function and use that
if it exists. Otherwise, it will invoke serialize all of its public
attributes. Lists and dicts are serialized trivially.
Args:
obj: The instance to serialize.
Returns:
The serialized object.
"""
if is_user_defined_new_style_class(obj):
if obj and hasattr(obj, CUSTOM_SERIALIZE_METHOD_NAME):
method = getattr(obj, CUSTOM_SERIALIZE_METHOD_NAME)
if callable(method):
return method()
return _serialize_attributes(obj)
elif is_dict(obj):
return _serialize_dict(obj)
elif is_iterable(obj):
return [serialize(v) for v in obj]
return obj
class StringEnum(object):
"""Enum like class that is configured with a list of values.
This class effectively implements an enum for Elements, except for that
the actual values of the enums will be the string values.
"""
def __init__(self, *values):
for name in values:
setattr(self, name, name)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.