max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/grafana_dashboards/client/test_connection.py | Rvhappen/grafana-dashboard-builder | 131 | 11077368 | # -*- coding: utf-8 -*-
# Copyright 2015-2019 grafana-dashboard-builder contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
try:
from urllib2 import Request
except ImportError:
from urllib.request import Request
from mock import MagicMock, patch
from requests_kerberos import HTTPKerberosAuth
from grafana_dashboards.client.connection import KerberosConnection, BasicAuthConnection, BearerAuthConnection
__author__ = '<NAME> <<EMAIL>>'
class Capture(object):
"""
Class for use in method call verification that captures call argument that can be tested later on.
"""
def __eq__(self, other):
"""
Captures argument and always returns true to make verification successful.
:return: True
"""
self.value = other
return True
def test_connection():
connection = BasicAuthConnection('username', 'password', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': b'Basic dXNlcm5hbWU6cGFzc3dvcmQ='
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
def test_connection_with_token():
connection = BearerAuthConnection('token', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer token'
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
@patch('requests.post')
def test_connection_with_kerberos(post):
connection = KerberosConnection('https://host')
post().json.return_value = {'hello': 'world'}
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
capture = Capture()
post.assert_called_with('https://host/uri', auth=capture, json={"it's": 'alive'}, verify=False)
assert isinstance(capture.value, HTTPKerberosAuth)
|
src/france/utils/examples/qt_api_demo.py | ClementRolinat/covidtracker-data | 314 | 11077380 | <filename>src/france/utils/examples/qt_api_demo.py
#!/usr/bin/env python
import svg_stack as ss
doc = ss.Document()
layout1 = ss.HBoxLayout()
layout1.addSVG('red_ball.svg',alignment=ss.AlignTop|ss.AlignHCenter)
layout1.addSVG('blue_triangle.svg',alignment=ss.AlignCenter)
layout2 = ss.VBoxLayout()
layout2.addSVG('red_ball.svg',alignment=ss.AlignCenter)
layout2.addSVG('red_ball.svg',alignment=ss.AlignCenter)
layout2.addSVG('red_ball.svg',alignment=ss.AlignCenter)
layout1.addLayout(layout2)
doc.setLayout(layout1)
doc.save('qt_api_test.svg')
|
all/agents/vqn.py | kcorder/autonomous-learning-library | 584 | 11077384 | import torch
from torch.nn.functional import mse_loss
from ._agent import Agent
from ._parallel_agent import ParallelAgent
from .dqn import DQNTestAgent
class VQN(ParallelAgent):
'''
Vanilla Q-Network (VQN).
VQN is an implementation of the Q-learning algorithm found in the Sutton and Barto (2018) textbook.
Q-learning algorithms attempt to learning the optimal policy while executing a (generally)
suboptimal policy (typically epsilon-greedy). In theory, This allows the agent to gain the benefits
of exploration without sacrificing the performance of the final policy. However, the cost of this
is that Q-learning is generally less stable than its on-policy bretheren, SARSA.
http://www.cs.rhul.ac.uk/~chrisw/new_thesis.pdf
Args:
q (QNetwork): An Approximation of the Q function.
policy (GreedyPolicy): A policy derived from the Q-function.
discount_factor (float): Discount factor for future rewards.
'''
def __init__(self, q, policy, discount_factor=0.99):
self.q = q
self.policy = policy
self.discount_factor = discount_factor
self._state = None
self._action = None
def act(self, state):
self._train(state.reward, state)
action = self.policy.no_grad(state)
self._state = state
self._action = action
return action
def eval(self, state):
return self.policy.eval(state)
def _train(self, reward, next_state):
if self._state:
# forward pass
value = self.q(self._state, self._action)
# compute target
target = reward + self.discount_factor * torch.max(self.q.target(next_state), dim=1)[0]
# compute loss
loss = mse_loss(value, target)
# backward pass
self.q.reinforce(loss)
class VQNTestAgent(Agent, ParallelAgent):
def __init__(self, policy):
self.policy = policy
def act(self, state):
return self.policy.eval(state)
|
src/einsteinpy/symbolic/predefined/ernst.py | bibek22/einsteinpy | 485 | 11077395 | from sympy import diag, exp, sin, sqrt, symbols
from einsteinpy.symbolic import constants
from einsteinpy.symbolic.metric import MetricTensor
def Ernst(B=symbols("B"), M=symbols("M")):
"""
Black holes in a magnetic universe.
J. Math. Phys., 17:54–56, 1976.
<NAME>.
Parameters
----------
M : ~sympy.core.basic.Basic or int or float
Mass of the black hole. Defaults to ``M``.
B : ~sympy.core.basic.Basic or int or float
The magnetic field strength
Defaults to ``B``.
"""
coords = symbols("t r theta phi")
t, r, th, ph = coords
# Helper functions
lambd = 1 + ((B * r * sin(th)) ** 2)
w = 1 - ((2 * M) / r)
# define the metric
metric = diag(
-1 * (lambd ** 2) * w,
(lambd ** 2) / w,
((r * lambd) ** 2),
(((r * sin(th)) / lambd) ** 2),
).tolist()
return MetricTensor(metric, coords, "ll", name="ErnstMetric")
|
tacker/tests/unit/objects/test_vnf_software_images.py | takahashi-tsc/tacker | 116 | 11077405 | # Copyright (c) 2019 NTT DATA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tacker.common import exceptions
from tacker import context
from tacker import objects
from tacker.tests.unit.db.base import SqlTestCase
from tacker.tests.unit.objects import fakes
from tacker.tests import uuidsentinel
class TestVnfSoftwareImages(SqlTestCase):
def setUp(self):
super(TestVnfSoftwareImages, self).setUp()
self.context = context.get_admin_context()
self.vnf_package = self._create_vnf_package()
self.vnf_deployment_flavour = self._create_vnf_deployment_flavour()
self.vnf_softwate_images = self._create_vnf_softwate_images()
def _create_vnf_package(self):
vnfpkgm = objects.VnfPackage(context=self.context,
**fakes.vnf_package_data)
vnfpkgm.create()
return vnfpkgm
def _create_vnf_deployment_flavour(self):
flavour_data = fakes.vnf_deployment_flavour
flavour_data.update({'package_uuid': self.vnf_package.id})
vnf_deployment_flavour = objects.VnfDeploymentFlavour(
context=self.context, **flavour_data)
vnf_deployment_flavour.create()
return vnf_deployment_flavour
def _create_vnf_softwate_images(self):
software_image = fakes.software_image
software_image.update(
{'flavour_uuid': self.vnf_deployment_flavour.id})
vnf_soft_image_obj = objects.VnfSoftwareImage(
context=self.context, **software_image)
vnf_soft_image_obj.create()
return vnf_soft_image_obj
def test_create(self):
software_image = fakes.software_image
software_image.update(
{'flavour_uuid': self.vnf_deployment_flavour.id})
vnf_soft_image_obj = objects.VnfSoftwareImage(
context=self.context, **software_image)
vnf_soft_image_obj.create()
self.assertTrue(vnf_soft_image_obj.id)
def test_software_image_create_with_id(self):
software_image = fakes.software_image
software_image.update({'id': uuidsentinel.id})
vnf_soft_image_obj = objects.VnfSoftwareImage(
context=self.context, **software_image)
self.assertRaises(
exceptions.ObjectActionError,
vnf_soft_image_obj.create)
def test_get_by_id(self):
vnf_software_images = objects.VnfSoftwareImage.get_by_id(
self.context, self.vnf_softwate_images.id, expected_attrs=None)
self.compare_obj(self.vnf_softwate_images, vnf_software_images)
def test_get_by_id_with_no_existing_id(self):
self.assertRaises(
exceptions.VnfSoftwareImageNotFound,
objects.VnfSoftwareImage.get_by_id, self.context,
uuidsentinel.invalid_uuid)
def test_attribute_with_valid_data(self):
data = {'id': self.vnf_softwate_images.id}
vnf_software_image_obj = objects.VnfSoftwareImage(
context=self.context, **data)
vnf_software_image_obj.obj_load_attr('name')
self.assertEqual('test', vnf_software_image_obj.name)
def test_invalid_attribute(self):
self.assertRaises(exceptions.ObjectActionError,
self.vnf_softwate_images.obj_load_attr, 'invalid')
def test_obj_load_attr_without_context(self):
data = {'id': self.vnf_softwate_images.id}
vnf_software_image_obj = objects.VnfSoftwareImage(**data)
self.assertRaises(exceptions.OrphanedObjectError,
vnf_software_image_obj.obj_load_attr, 'name')
def test_obj_load_attr_without_id_in_object(self):
data = {'name': self.vnf_softwate_images.name}
vnf_software_image_obj = objects.VnfSoftwareImage(
context=self.context, **data)
self.assertRaises(exceptions.ObjectActionError,
vnf_software_image_obj.obj_load_attr, 'name')
|
kivy/tests/test_fbo_py2py3.py | Kolandiolaka/kivy | 13,889 | 11077454 | <reponame>Kolandiolaka/kivy
import unittest
from kivy.tests.common import GraphicUnitTest
from kivy.uix.widget import Widget
from kivy.graphics import Fbo, Color, Rectangle
class FboTest(Widget):
def __init__(self, **kwargs):
super(FboTest, self).__init__(**kwargs)
self.positions = [
(260.0, 260.0),
(192.0, 192.0),
(96.0, 192.0),
(192.0, 96.0),
(96.0, 96.0),
(32.0, 192.0),
(192.0, 32.0),
(32.0, 32.0)
]
self.fbo = Fbo(size=(256, 256))
with self.fbo:
Color(0.56789, 0, 0, 1)
Rectangle(size=(256, 64))
Color(0, 0.56789, 0, 1)
Rectangle(size=(64, 256))
Color(0.56789, 0, 0, .5)
Rectangle(pos=(64, 64), size=(192, 64))
Color(0, 0.56789, 0, .5)
Rectangle(pos=(64, 64), size=(64, 192))
self.fbo.draw()
class FBOPy2Py3TestCase(GraphicUnitTest):
def test_fbo_get_pixel_color(self):
fbow = FboTest()
render_error = 2
values = [
# out of bounds of FBO
(tuple, int, (0, 0, 0, 0)),
# in FBO, black
(list, int, [0, 0, 0, 0]),
# Color(0, 0.56789, 0, .5)
(list, int, [0, 72, 0, 128]),
# Color(0.56789, 0, 0, .5)
(list, int, [72, 0, 0, 128]),
# overlap above 2 w/ alpha
(list, int, [36, 72, 0, 255]),
# Color(0, 0.56789, 0, 1)
(list, int, [0, 145, 0, 255]),
# Color(0.56789, 0, 0, 1)
(list, int, [145, 0, 0, 255]),
# overlap above 2 w/o alpha
(list, int, [0, 145, 0, 255]),
]
for i, pos in enumerate(fbow.positions):
c = fbow.fbo.get_pixel_color(pos[0], pos[1])
# returned class
self.assertTrue(isinstance(c, values[i][0]))
# returned types in container
for v in c:
self.assertTrue(isinstance(v, values[i][1]))
# returned values
for j, val in enumerate(c):
self.assertAlmostEqual(
val, values[i][2][j],
delta=render_error
)
if __name__ == '__main__':
unittest.main()
|
cv2/tkinter-CV - with buttons to start and stop/main.py | whitmans-max/python-examples | 140 | 11077468 | #!/usr/bin/env python3
import tkinter as tk
from PIL import Image, ImageTk
import cv2
# --- functions ---
def play():
'''
start stream (run_camera and update_image)
and change state of buttons
'''
global run_camera
if not run_camera:
run_camera = True
button_play['state'] = 'disabled'
button_stop['state'] = 'normal'
update_image()
def stop():
'''
stop stream (run_camera)
and change state of buttons
'''
global run_camera
if run_camera:
run_camera = False
button_play['state'] = 'normal'
button_stop['state'] = 'disabled'
def update_image():
'''executed frequencally, it updates frame/image on canvas'''
# read one frame (and "return" status)
ret, frame = cap.read()
if ret is None:
print("Can't read from camera")
else:
image = Image.fromarray(frame)
photo.paste(image)
if run_camera:
root.after(10, update_image)
def save_image():
'''save current frame in file'''
image.save('output.png')
# --- main ---
# open stream
cap = cv2.VideoCapture(0) # local (built-in) camera
# check if opened
if not cap.isOpened():
print("Can't open camera")
cap.release()
exit(1)
# get first frame
ret, frame = cap.read()
if ret is None:
print("Can't read from camera")
cap.release()
exit(1)
# ---
# control stream on canvas
run_camera = False
# ---
root = tk.Tk()
image = Image.fromarray(frame)
photo = ImageTk.PhotoImage(image)
canvas = tk.Canvas(root, width=photo.width(), height=photo.height())
canvas.pack(fill='both', expand=True)
canvas.create_image((0,0), image=photo, anchor='nw')
# ---
buttons = tk.Frame(root)
buttons.pack(fill='x')
button_play = tk.Button(buttons, text="Play", command=play)
button_play.pack(side='left')
button_stop = tk.Button(buttons, text="Stop", command=stop, state='disabled')
button_stop.pack(side='left')
button_save = tk.Button(buttons, text="Save Image", command=save_image)
button_save.pack(side='left')
# ---
root.mainloop()
# ---
# close stream
cap.release()
|
examples/quicksqlviews/app/models.py | NeolithEra/Flask-AppBuilder | 3,862 | 11077473 | import datetime
from flask_appbuilder import Model
from sqlalchemy import Column, Date, ForeignKey, Integer, MetaData, String, Table
from sqlalchemy.orm import relationship
mindate = datetime.date(datetime.MINYEAR, 1, 1)
_mt = MetaData()
class ContactGroup(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True, nullable=False)
def __repr__(self):
return self.name
class Gender(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True, nullable=False)
def __repr__(self):
return self.name
class ContactGroupView(Model):
"""
First create the view on SQL:
create view contact_group_view as
select g.id as id, count(*) as group_count, g.name as group_name
from contact_group as g, contact as c
where c.contact_group_id=g.id group by g.name;
"""
__table__ = Table(
"contact_group_view",
_mt,
Column("id", Integer, primary_key=True),
Column("group_count", Integer),
Column("group_name", String(150)),
)
class Contact(Model):
id = Column(Integer, primary_key=True)
name = Column(String(150), unique=True, nullable=False)
address = Column(String(564))
birthday = Column(Date, nullable=True)
personal_phone = Column(String(20))
personal_celphone = Column(String(20))
contact_group_id = Column(Integer, ForeignKey("contact_group.id"), nullable=False)
contact_group = relationship("ContactGroup")
gender_id = Column(Integer, ForeignKey("gender.id"), nullable=False)
gender = relationship("Gender")
def __repr__(self):
return self.name
def month_year(self):
date = self.birthday or mindate
return datetime.datetime(date.year, date.month, 1) or mindate
def year(self):
date = self.birthday or mindate
return datetime.datetime(date.year, 1, 1)
|
setup.py | andreubernadserra/cadquery | 1,423 | 11077496 | # Copyright 2015 Parametric Products Intellectual Holdings, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
# if we are building in travis, use the build number as the sub-minor version
version = "2.1"
if "TRAVIS_TAG" in os.environ.keys():
version = os.environ["TRAVIS_TAG"]
setup(
name="cadquery",
version=version,
url="https://github.com/dcowden/cadquery",
license="Apache Public License 2.0",
author="<NAME>",
author_email="<EMAIL>",
description="CadQuery is a parametric scripting language for creating and traversing CAD models",
long_description=open("README.md").read(),
packages=find_packages(exclude=("tests",)),
include_package_data=True,
zip_safe=False,
platforms="any",
test_suite="tests",
classifiers=[
"Development Status :: 5 - Production/Stable",
#'Development Status :: 6 - Mature',
#'Development Status :: 7 - Inactive',
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Internet",
"Topic :: Scientific/Engineering",
],
)
|
tensorlayer/layers/extend.py | Howdy-Personally/tensorlayer-master | 4,484 | 11077502 | <reponame>Howdy-Personally/tensorlayer-master<gh_stars>1000+
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
from tensorlayer.layers.core import Layer
__all__ = [
'ExpandDims',
'Tile',
]
class ExpandDims(Layer):
"""
The :class:`ExpandDims` class inserts a dimension of 1 into a tensor's shape,
see `tf.expand_dims() <https://www.tensorflow.org/api_docs/python/tf/expand_dims>`__ .
Parameters
----------
axis : int
The dimension index at which to expand the shape of input.
name : str
A unique layer name. If None, a unique name will be automatically assigned.
Examples
--------
>>> x = tl.layers.Input([10, 3], name='in')
>>> y = tl.layers.ExpandDims(axis=-1)(x)
[10, 3, 1]
"""
def __init__(
self,
axis,
name=None # 'expand_dims',
):
super(ExpandDims, self).__init__(name)
self.axis = axis
self.build((None, ))
self._built = True
logging.info("ExpandDims %s: axis: %d" % (self.name, self.axis))
def __repr__(self):
s = '{classname}('
s += 'axis={axis},'
s += 'name={name}'
s += ")"
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
pass
# @tf.function
def forward(self, inputs):
outputs = tf.expand_dims(inputs, axis=self.axis, name=self.name)
return outputs
class Tile(Layer):
"""
The :class:`Tile` class constructs a tensor by tiling a given tensor,
see `tf.tile() <https://www.tensorflow.org/api_docs/python/tf/tile>`__ .
Parameters
----------
multiples: tensor
Must be one of the following types: int32, int64.
1-D Length must be the same as the number of dimensions in input.
name : None or str
A unique layer name.
Examples
--------
>>> x = tl.layers.Input([10, 3], name='in')
>>> y = tl.layers.Tile(multiples=[2, 3])(x)
[20, 9]
"""
def __init__(self, multiples=None, name=None): #'tile'):
super(Tile, self).__init__(name)
self.multiples = multiples
self.build((None, ))
self._built = True
logging.info("Tile %s: multiples: %s" % (self.name, self.multiples))
def __repr__(self):
s = '{classname}('
s += 'multiples={multiples},'
s += 'name={name}'
s += ")"
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape):
pass
# @tf.function
def forward(self, inputs):
outputs = tf.tile(inputs, multiples=self.multiples, name=self.name)
return outputs
|
tests/test_async_cursor.py | jobteaser-oss/PyAthena | 363 | 11077508 | # -*- coding: utf-8 -*-
import contextlib
import time
import unittest
from datetime import datetime
from random import randint
from pyathena.async_cursor import AsyncCursor, AsyncDictCursor
from pyathena.error import NotSupportedError, ProgrammingError
from pyathena.model import AthenaQueryExecution
from pyathena.result_set import AthenaResultSet
from tests import WithConnect
from tests.conftest import SCHEMA
from tests.util import with_cursor
class TestAsyncCursor(unittest.TestCase, WithConnect):
@with_cursor(cursor_class=AsyncCursor)
def test_fetchone(self, cursor):
query_id, future = cursor.execute("SELECT * FROM one_row")
result_set = future.result()
self.assertEqual(result_set.rownumber, 0)
self.assertEqual(result_set.fetchone(), (1,))
self.assertEqual(result_set.rownumber, 1)
self.assertIsNone(result_set.fetchone())
self.assertIsNotNone(result_set.query_id)
self.assertIsNotNone(result_set.query)
self.assertEqual(result_set.state, AthenaQueryExecution.STATE_SUCCEEDED)
self.assertIsNone(result_set.state_change_reason)
self.assertIsNotNone(result_set.completion_date_time)
self.assertIsInstance(result_set.completion_date_time, datetime)
self.assertIsNotNone(result_set.submission_date_time)
self.assertIsInstance(result_set.submission_date_time, datetime)
self.assertIsNotNone(result_set.data_scanned_in_bytes)
self.assertIsNotNone(result_set.engine_execution_time_in_millis)
self.assertIsNotNone(result_set.query_queue_time_in_millis)
self.assertIsNotNone(result_set.total_execution_time_in_millis)
# self.assertIsNotNone(result_set.query_planning_time_in_millis) # TODO flaky test
# self.assertIsNotNone(result_set.service_processing_time_in_millis) # TODO flaky test
self.assertIsNotNone(result_set.output_location)
self.assertIsNone(result_set.data_manifest_location)
@with_cursor(cursor_class=AsyncCursor)
def test_fetchmany(self, cursor):
query_id, future = cursor.execute("SELECT * FROM many_rows LIMIT 15")
result_set = future.result()
actual1 = result_set.fetchmany(10)
self.assertEqual(len(actual1), 10)
self.assertEqual(actual1, [(i,) for i in range(10)])
actual2 = result_set.fetchmany(10)
self.assertEqual(len(actual2), 5)
self.assertEqual(actual2, [(i,) for i in range(10, 15)])
@with_cursor(cursor_class=AsyncCursor)
def test_fetchall(self, cursor):
query_id, future = cursor.execute("SELECT * FROM one_row")
result_set = future.result()
self.assertEqual(result_set.fetchall(), [(1,)])
query_id, future = cursor.execute("SELECT a FROM many_rows ORDER BY a")
result_set = future.result()
self.assertEqual(result_set.fetchall(), [(i,) for i in range(10000)])
@with_cursor(cursor_class=AsyncCursor)
def test_iterator(self, cursor):
query_id, future = cursor.execute("SELECT * FROM one_row")
result_set = future.result()
self.assertEqual(list(result_set), [(1,)])
self.assertRaises(StopIteration, result_set.__next__)
@with_cursor(cursor_class=AsyncCursor)
def test_arraysize(self, cursor):
cursor.arraysize = 5
query_id, future = cursor.execute("SELECT * FROM many_rows LIMIT 20")
result_set = future.result()
self.assertEqual(len(result_set.fetchmany()), 5)
@with_cursor(cursor_class=AsyncCursor)
def test_arraysize_default(self, cursor):
self.assertEqual(cursor.arraysize, AthenaResultSet.DEFAULT_FETCH_SIZE)
@with_cursor(cursor_class=AsyncCursor)
def test_invalid_arraysize(self, cursor):
with self.assertRaises(ProgrammingError):
cursor.arraysize = 10000
with self.assertRaises(ProgrammingError):
cursor.arraysize = -1
@with_cursor(cursor_class=AsyncCursor)
def test_description(self, cursor):
query_id, future = cursor.execute("SELECT 1 AS foobar FROM one_row")
result_set = future.result()
self.assertEqual(
result_set.description,
[("foobar", "integer", None, None, 10, 0, "UNKNOWN")],
)
future = cursor.description(query_id)
description = future.result()
self.assertEqual(result_set.description, description)
@with_cursor(cursor_class=AsyncCursor)
def test_query_execution(self, cursor):
query = "SELECT * FROM one_row"
query_id, future = cursor.execute(query)
result_set = future.result()
future = cursor.query_execution(query_id)
query_execution = future.result()
self.assertEqual(query_execution.database, SCHEMA)
self.assertIsNotNone(query_execution.query_id)
self.assertEqual(query_execution.query, query)
self.assertEqual(
query_execution.statement_type, AthenaQueryExecution.STATEMENT_TYPE_DML
)
self.assertEqual(query_execution.state, AthenaQueryExecution.STATE_SUCCEEDED)
self.assertIsNone(query_execution.state_change_reason)
self.assertIsNotNone(query_execution.completion_date_time)
self.assertIsInstance(query_execution.completion_date_time, datetime)
self.assertIsNotNone(query_execution.submission_date_time)
self.assertIsInstance(query_execution.submission_date_time, datetime)
self.assertIsNotNone(query_execution.data_scanned_in_bytes)
self.assertIsNotNone(query_execution.engine_execution_time_in_millis)
self.assertIsNotNone(query_execution.query_queue_time_in_millis)
self.assertIsNotNone(query_execution.total_execution_time_in_millis)
# TODO flaky test
# self.assertIsNotNone(query_execution.query_planning_time_in_millis)
# self.assertIsNotNone(query_execution.service_processing_time_in_millis)
self.assertIsNotNone(query_execution.output_location)
self.assertIsNone(query_execution.encryption_option)
self.assertIsNone(query_execution.kms_key)
self.assertEqual(query_execution.work_group, "primary")
self.assertEqual(result_set.database, query_execution.database)
self.assertEqual(result_set.query_id, query_execution.query_id)
self.assertEqual(result_set.query, query_execution.query)
self.assertEqual(result_set.statement_type, query_execution.statement_type)
self.assertEqual(result_set.state, query_execution.state)
self.assertEqual(
result_set.state_change_reason, query_execution.state_change_reason
)
self.assertEqual(
result_set.completion_date_time, query_execution.completion_date_time
)
self.assertEqual(
result_set.submission_date_time, query_execution.submission_date_time
)
self.assertEqual(
result_set.data_scanned_in_bytes, query_execution.data_scanned_in_bytes
)
self.assertEqual(
result_set.engine_execution_time_in_millis,
query_execution.engine_execution_time_in_millis,
)
self.assertEqual(
result_set.query_queue_time_in_millis,
query_execution.query_queue_time_in_millis,
)
self.assertEqual(
result_set.total_execution_time_in_millis,
query_execution.total_execution_time_in_millis,
)
self.assertEqual(
result_set.query_planning_time_in_millis,
query_execution.query_planning_time_in_millis,
)
self.assertEqual(
result_set.service_processing_time_in_millis,
query_execution.service_processing_time_in_millis,
)
self.assertEqual(result_set.output_location, query_execution.output_location)
self.assertEqual(
result_set.data_manifest_location, query_execution.data_manifest_location
)
self.assertEqual(
result_set.encryption_option, query_execution.encryption_option
)
self.assertEqual(result_set.kms_key, query_execution.kms_key)
self.assertEqual(result_set.work_group, query_execution.work_group)
@with_cursor(cursor_class=AsyncCursor)
def test_poll(self, cursor):
query_id, _ = cursor.execute("SELECT * FROM one_row")
future = cursor.poll(query_id)
query_execution = future.result()
self.assertIn(
query_execution.state,
[
AthenaQueryExecution.STATE_QUEUED,
AthenaQueryExecution.STATE_RUNNING,
AthenaQueryExecution.STATE_SUCCEEDED,
AthenaQueryExecution.STATE_FAILED,
AthenaQueryExecution.STATE_CANCELLED,
],
)
@with_cursor(cursor_class=AsyncCursor)
def test_bad_query(self, cursor):
query_id, future = cursor.execute(
"SELECT does_not_exist FROM this_really_does_not_exist"
)
result_set = future.result()
self.assertEqual(result_set.state, AthenaQueryExecution.STATE_FAILED)
self.assertIsNotNone(result_set.state_change_reason)
@with_cursor(cursor_class=AsyncCursor)
def test_cancel(self, cursor):
query_id, future = cursor.execute(
"""
SELECT a.a * rand(), b.a * rand()
FROM many_rows a
CROSS JOIN many_rows b
"""
)
time.sleep(randint(1, 5))
cursor.cancel(query_id)
result_set = future.result()
self.assertEqual(result_set.state, AthenaQueryExecution.STATE_CANCELLED)
# self.assertIsNotNone(result_set.state_change_reason) # TODO flaky test
self.assertIsNone(result_set.description)
self.assertIsNone(result_set.fetchone())
self.assertEqual(result_set.fetchmany(), [])
self.assertEqual(result_set.fetchall(), [])
def test_open_close(self):
with contextlib.closing(self.connect()) as conn:
with conn.cursor(AsyncCursor):
pass
def test_no_ops(self):
conn = self.connect()
cursor = conn.cursor(AsyncCursor)
self.assertRaises(
NotSupportedError, lambda: cursor.executemany("SELECT * FROM one_row", [])
)
cursor.close()
conn.close()
class TestAsyncDictCursor(unittest.TestCase, WithConnect):
@with_cursor(cursor_class=AsyncDictCursor)
def test_fetchone(self, cursor):
query_id, future = cursor.execute("SELECT * FROM one_row")
result_set = future.result()
self.assertEqual(result_set.fetchone(), {"number_of_rows": 1})
@with_cursor(cursor_class=AsyncDictCursor)
def test_fetchmany(self, cursor):
query_id, future = cursor.execute("SELECT * FROM many_rows LIMIT 15")
result_set = future.result()
actual1 = result_set.fetchmany(10)
self.assertEqual(len(actual1), 10)
self.assertEqual(actual1, [{"a": i} for i in range(10)])
actual2 = result_set.fetchmany(10)
self.assertEqual(len(actual2), 5)
self.assertEqual(actual2, [{"a": i} for i in range(10, 15)])
@with_cursor(cursor_class=AsyncDictCursor)
def test_fetchall(self, cursor):
query_id, future = cursor.execute("SELECT * FROM one_row")
result_set = future.result()
self.assertEqual(result_set.fetchall(), [{"number_of_rows": 1}])
query_id, future = cursor.execute("SELECT a FROM many_rows ORDER BY a")
result_set = future.result()
self.assertEqual(result_set.fetchall(), [{"a": i} for i in range(10000)])
|
brainrender/gui/widgets/screenshot_modal.py | crsanderford/brainrender | 226 | 11077513 | <filename>brainrender/gui/widgets/screenshot_modal.py
from qtpy.QtWidgets import QDialog, QLabel, QVBoxLayout
from qtpy import QtCore
from brainrender.gui.style import style, update_css
class ScreenshotModal(QDialog):
left = 250
top = 250
width = 400
height = 120
def __init__(self, main_window, palette):
"""
Creates a new window for user to input
which regions to add to scene.
Arguments:
----------
main_window: reference to the App's main window
palette: main_window's palette, used to style widgets
"""
super().__init__()
self.setWindowTitle("Add brain regions")
self.ui()
self.main_window = main_window
self.setStyleSheet(update_css(style, palette))
# Start timer to autoclose
self.timer = QtCore.QTimer(self)
self.timer.setInterval(1500)
self.timer.timeout.connect(self.close)
self.timer.start()
def ui(self):
"""
Define UI's elements
"""
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QVBoxLayout()
label = QLabel(self)
label.setStyleSheet("font-size: 18pt; font-weight: 700;")
label.setObjectName("PopupLabel")
label.setText("Screenshot saved")
layout.addWidget(label)
self.setLayout(layout)
self.setModal(True)
self.show()
|
lib/galaxy/jobs/runners/util/process_groups.py | rikeshi/galaxy | 1,085 | 11077530 | import errno
import logging
import os
import signal
from time import sleep
log = logging.getLogger(__name__)
def check_pg(pgid):
"""Check whether processes in process group pgid are still alive."""
try:
(pid, exit_status) = os.waitpid(-pgid, os.WNOHANG)
except OSError as e:
if e.errno == errno.ECHILD:
log.debug("check_pg(): No process found in process group %d", pgid)
else:
log.warning("check_pg(): Got errno %s when checking process group %d: %s", errno.errorcode[e.errno], pgid, e.strerror)
return False
# Since we are passing os.WNOHANG to os.waitpid(), pid is 0 if no process
# status is available immediately.
return pid == 0
def kill_pg(pgid):
"""Kill all processes in process group pgid."""
for sig in [signal.SIGTERM, signal.SIGKILL]:
try:
os.killpg(pgid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
return
log.warning("Got errno %s when sending signal %d to process group %d: %s", errno.errorcode[e.errno], sig, pgid, e.strerror)
sleep(1)
if not check_pg(pgid):
log.debug("Processes in process group %d successfully killed with signal %d", pgid, sig)
return
else:
log.warning("Some process in process group %d refuses to die after signaling TERM/KILL", pgid)
|
src/azure-cli/azure/cli/command_modules/natgateway/tests/latest/test_natgateway_commands.py | YuanyuanNi/azure-cli | 3,287 | 11077536 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
class NatGatewayScenarioTests(ScenarioTest):
@ResourceGroupPreparer(location='eastus2')
def test_natgateway(self, resource_group, resource_group_location):
self.kwargs.update({
'name': "ng1",
'idle_timeout': 4,
'sku': "Standard",
'ip_addr': "pip",
'ip_prefix': "prefix",
'idle_timeout_updated': 5,
'zone': 2,
'location': resource_group_location,
'resource_type': 'Microsoft.Network/NatGateways'
})
# create public ip address
self.cmd('az network public-ip create -g {rg} -n {ip_addr} --location {location} --zone {zone} --sku Standard ')
# create public ip prefix
self.cmd('az network public-ip prefix create --length 29 --location {location} --name {ip_prefix} --resource-group {rg} --zone {zone}')
self.cmd('az network nat gateway create --resource-group {rg} --public-ip-prefixes {ip_prefix} --name {name} --location {location} --public-ip-addresses {ip_addr} --idle-timeout {idle_timeout} --zone {zone}', checks=[
self.check('resourceGroup', '{rg}'),
self.check('idleTimeoutInMinutes', '{idle_timeout}'),
self.check("contains(publicIpAddresses[0].id, '{ip_addr}')", True),
self.check("contains(publicIpPrefixes[0].id, '{ip_prefix}')", True),
self.check('sku.name', 'Standard'),
self.check('location', '{location}'),
self.check('zones[0]', '{zone}')
])
self.cmd('az network nat gateway update -g {rg} --name {name} --idle-timeout {idle_timeout_updated}',
checks=self.check('idleTimeoutInMinutes', 5))
self.cmd('az network nat gateway list -g {rg}',
checks=self.check('length(@)', 1))
self.cmd('az network nat gateway show --resource-group {rg} --name {name}',
checks=self.check('name', '{name}'))
# delete and verify item is removed
self.cmd('az network nat gateway delete --resource-group {rg} --name {name}')
self.cmd('az network nat gateway list -g {rg}',
checks=self.check('length(@)', 0))
@ResourceGroupPreparer(location='eastus2')
def test_natgateway_empty_create(self, resource_group, resource_group_location):
self.kwargs.update({
'name': "ng1",
'idle_timeout': 4,
'sku': "Standard",
'ip_addr': "pip",
'ip_prefix': "prefix",
'idle_timeout_updated': 5,
'zone': 2,
'location': resource_group_location,
'resource_type': 'Microsoft.Network/NatGateways'
})
self.cmd(
'az network nat gateway create --resource-group {rg} --name {name} --location {location} --idle-timeout {idle_timeout} --zone {zone}',
checks=[
self.check('resourceGroup', '{rg}'),
self.check('idleTimeoutInMinutes', '{idle_timeout}'),
self.check('sku.name', 'Standard'),
self.check('location', '{location}'),
self.check('zones[0]', '{zone}')
])
|
src/bindings/python/src/openvino/runtime/opset2/ops.py | artkuli/openvino | 1,127 | 11077567 | # -*- coding: utf-8 -*-
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Factory functions for all openvino ops."""
from typing import Callable, Iterable, List, Optional, Set, Union
import numpy as np
from functools import partial
from openvino.runtime import Node, Shape
from openvino.runtime.op import Constant, Parameter
from openvino.runtime.opset_utils import _get_node_factory
from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op
from openvino.runtime.utils.input_validation import (
assert_list_of_ints,
check_valid_attributes,
is_non_negative_value,
is_positive_value,
)
from openvino.runtime.utils.node_factory import NodeFactory
from openvino.runtime.utils.types import (
NodeInput,
NumericData,
NumericType,
ScalarData,
TensorShape,
as_node,
as_nodes,
get_dtype,
get_element_type,
get_element_type_str,
make_constant_node,
)
_get_node_factory_opset2 = partial(_get_node_factory, "opset2")
# -------------------------------------------- ops ------------------------------------------------
@nameable_op
def batch_to_space(
data: NodeInput,
block_shape: NodeInput,
crops_begin: NodeInput,
crops_end: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Perform BatchToSpace operation on the input tensor.
BatchToSpace permutes data from the batch dimension of the data tensor into spatial dimensions.
:param data: Node producing the data tensor.
:param block_shape: The sizes of the block of values to be moved.
:param crops_begin: Specifies the amount to crop from the beginning along each axis of `data`.
:param crops_end: Specifies the amount to crop from the end along each axis of `data`.
:param name: Optional output node name.
:return: The new node performing a BatchToSpace operation.
"""
return _get_node_factory_opset2().create(
"BatchToSpace", as_nodes(data, block_shape, crops_begin, crops_end),
)
@unary_op
def gelu(node: NodeInput, name: Optional[str] = None) -> Node:
r"""Perform Gaussian Error Linear Unit operation element-wise on data from input node.
Computes GELU function:
\f[ f(x) = 0.5\cdot x\cdot(1 + erf( \dfrac{x}{\sqrt{2}}) \f]
For more information refer to [Gaussian Error Linear Unit (GELU)](https://arxiv.org/pdf/1606.08415.pdf>)
:param node: Input tensor. One of: input node, array or scalar.
:param name: Optional output node name.
:return: The new node performing a GELU operation on its input data element-wise.
"""
return _get_node_factory_opset2().create("Gelu", [node])
@nameable_op
def mvn(
data: Node,
across_channels: bool = False,
normalize_variance: bool = False,
eps: float = 1e-9,
name: str = None,
) -> Node:
r"""Perform Mean Variance Normalization operation on data from input node.
Computes MVN on the input tensor `data` (called `X`) using formula:
\f[ Y = \dfrac{X-EX}{\sqrt{E(X-EX)^2}} \f]
:param data: The node with data tensor.
:param across_channels: Denotes if mean values are shared across channels.
:param normalize_variance: Denotes whether to perform variance normalization.
:param eps: The number added to the variance to avoid division by zero
when normalizing the value. Scalar value.
:param name: Optional output node name.
:return: The new node performing a MVN operation on input tensor.
"""
return _get_node_factory_opset2().create(
"MVN",
[data],
{
"across_channels": across_channels,
"normalize_variance": normalize_variance,
"eps": eps,
},
)
@nameable_op
def reorg_yolo(input: Node, stride: List[int], name: Optional[str] = None) -> Node:
"""Return a node which produces the ReorgYolo operation.
:param input: Input data.
:param stride: Stride to reorganize input by.
:param name: Optional name for output node.
:return: ReorgYolo node.
"""
return _get_node_factory_opset2().create("ReorgYolo", [input], {"stride": stride})
@nameable_op
def roi_pooling(
input: NodeInput,
coords: NodeInput,
output_size: TensorShape,
spatial_scale: NumericData,
method: str,
name: Optional[str] = None,
) -> Node:
"""Return a node which produces an ROIPooling operation.
:param input: Input feature map `{N, C, ...}`.
:param coords: Coordinates of bounding boxes.
:param output_size: Height/Width of ROI output features (shape).
:param spatial_scale: Ratio of input feature map over input image size (float).
:param method: Method of pooling - string: "max" or "bilinear".
:return: ROIPooling node.
"""
method = method.lower()
return _get_node_factory_opset2().create(
"ROIPooling",
as_nodes(input, coords),
{
"output_size": Shape(output_size),
"spatial_scale": spatial_scale,
"method": method,
},
)
@nameable_op
def space_to_batch(
data: NodeInput,
block_shape: NodeInput,
pads_begin: NodeInput,
pads_end: NodeInput,
name: Optional[str] = None,
) -> Node:
"""Perform SpaceToBatch operation on the input tensor.
SpaceToBatch permutes data tensor blocks of spatial data into batch dimension.
The operator returns a copy of the input tensor where values from spatial blocks dimensions
are moved in the batch dimension
:param data: Node producing the data tensor.
:param block_shape: The sizes of the block of values to be moved.
:param pads_begin: Specifies the padding for the beginning along each axis of `data`.
:param pads_end: Specifies the padding for the ending along each axis of `data`.
:param name: Optional output node name.
:return: The new node performing a SpaceToBatch operation.
"""
return _get_node_factory_opset2().create(
"SpaceToBatch", as_nodes(data, block_shape, pads_begin, pads_end),
)
|
examples/python/markov_autoregression.py | CCHiggins/statsmodels | 6,931 | 11077575 | <gh_stars>1000+
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook markov_autoregression.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# ## Markov switching autoregression models
# This notebook provides an example of the use of Markov switching models
# in statsmodels to replicate a number of results presented in Kim and
# Nelson (1999). It applies the Hamilton (1989) filter the Kim (1994)
# smoother.
#
# This is tested against the Markov-switching models from E-views 8, which
# can be found at http://www.eviews.com/EViews8/ev8ecswitch_n.html#MarkovAR
# or the Markov-switching models of Stata 14 which can be found at
# http://www.stata.com/manuals14/tsmswitch.pdf.
from datetime import datetime
from io import BytesIO
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
import statsmodels.api as sm
# NBER recessions
from pandas_datareader.data import DataReader
usrec = DataReader("USREC",
"fred",
start=datetime(1947, 1, 1),
end=datetime(2013, 4, 1))
# ### Hamilton (1989) switching model of GNP
#
# This replicates Hamilton's (1989) seminal paper introducing Markov-
# switching models. The model is an autoregressive model of order 4 in which
# the mean of the process switches between two regimes. It can be written:
#
# $$
# y_t = \mu_{S_t} + \phi_1 (y_{t-1} - \mu_{S_{t-1}}) + \phi_2 (y_{t-2} -
# \mu_{S_{t-2}}) + \phi_3 (y_{t-3} - \mu_{S_{t-3}}) + \phi_4 (y_{t-4} -
# \mu_{S_{t-4}}) + \varepsilon_t
# $$
#
# Each period, the regime transitions according to the following matrix of
# transition probabilities:
#
# $$ P(S_t = s_t | S_{t-1} = s_{t-1}) =
# \begin{bmatrix}
# p_{00} & p_{10} \\
# p_{01} & p_{11}
# \end{bmatrix}
# $$
#
# where $p_{ij}$ is the probability of transitioning *from* regime $i$,
# *to* regime $j$.
#
# The model class is `MarkovAutoregression` in the time-series part of
# `statsmodels`. In order to create the model, we must specify the number of
# regimes with `k_regimes=2`, and the order of the autoregression with
# `order=4`. The default model also includes switching autoregressive
# coefficients, so here we also need to specify `switching_ar=False` to
# avoid that.
#
# After creation, the model is `fit` via maximum likelihood estimation.
# Under the hood, good starting parameters are found using a number of steps
# of the expectation maximization (EM) algorithm, and a quasi-Newton (BFGS)
# algorithm is applied to quickly find the maximum.
# Get the RGNP data to replicate Hamilton
dta = pd.read_stata("https://www.stata-press.com/data/r14/rgnp.dta").iloc[1:]
dta.index = pd.DatetimeIndex(dta.date, freq="QS")
dta_hamilton = dta.rgnp
# Plot the data
dta_hamilton.plot(title="Growth rate of Real GNP", figsize=(12, 3))
# Fit the model
mod_hamilton = sm.tsa.MarkovAutoregression(dta_hamilton,
k_regimes=2,
order=4,
switching_ar=False)
res_hamilton = mod_hamilton.fit()
res_hamilton.summary()
# We plot the filtered and smoothed probabilities of a recession. Filtered
# refers to an estimate of the probability at time $t$ based on data up to
# and including time $t$ (but excluding time $t+1, ..., T$). Smoothed refers
# to an estimate of the probability at time $t$ using all the data in the
# sample.
#
# For reference, the shaded periods represent the NBER recessions.
fig, axes = plt.subplots(2, figsize=(7, 7))
ax = axes[0]
ax.plot(res_hamilton.filtered_marginal_probabilities[0])
ax.fill_between(usrec.index,
0,
1,
where=usrec["USREC"].values,
color="k",
alpha=0.1)
ax.set_xlim(dta_hamilton.index[4], dta_hamilton.index[-1])
ax.set(title="Filtered probability of recession")
ax = axes[1]
ax.plot(res_hamilton.smoothed_marginal_probabilities[0])
ax.fill_between(usrec.index,
0,
1,
where=usrec["USREC"].values,
color="k",
alpha=0.1)
ax.set_xlim(dta_hamilton.index[4], dta_hamilton.index[-1])
ax.set(title="Smoothed probability of recession")
fig.tight_layout()
# From the estimated transition matrix we can calculate the expected
# duration of a recession versus an expansion.
print(res_hamilton.expected_durations)
# In this case, it is expected that a recession will last about one year
# (4 quarters) and an expansion about two and a half years.
# ### Kim, Nelson, and Startz (1998) Three-state Variance Switching
#
# This model demonstrates estimation with regime heteroskedasticity
# (switching of variances) and no mean effect. The dataset can be reached at
# http://econ.korea.ac.kr/~cjkim/MARKOV/data/ew_excs.prn.
#
# The model in question is:
#
# $$
# \begin{align}
# y_t & = \varepsilon_t \\
# \varepsilon_t & \sim N(0, \sigma_{S_t}^2)
# \end{align}
# $$
#
# Since there is no autoregressive component, this model can be fit using
# the `MarkovRegression` class. Since there is no mean effect, we specify
# `trend='n'`. There are hypothesized to be three regimes for the switching
# variances, so we specify `k_regimes=3` and `switching_variance=True` (by
# default, the variance is assumed to be the same across regimes).
# Get the dataset
ew_excs = requests.get(
"http://econ.korea.ac.kr/~cjkim/MARKOV/data/ew_excs.prn").content
raw = pd.read_table(BytesIO(ew_excs),
header=None,
skipfooter=1,
engine="python")
raw.index = pd.date_range("1926-01-01", "1995-12-01", freq="MS")
dta_kns = raw.loc[:"1986"] - raw.loc[:"1986"].mean()
# Plot the dataset
dta_kns[0].plot(title="Excess returns", figsize=(12, 3))
# Fit the model
mod_kns = sm.tsa.MarkovRegression(dta_kns,
k_regimes=3,
trend="n",
switching_variance=True)
res_kns = mod_kns.fit()
res_kns.summary()
# Below we plot the probabilities of being in each of the regimes; only in
# a few periods is a high-variance regime probable.
fig, axes = plt.subplots(3, figsize=(10, 7))
ax = axes[0]
ax.plot(res_kns.smoothed_marginal_probabilities[0])
ax.set(title="Smoothed probability of a low-variance regime for stock returns")
ax = axes[1]
ax.plot(res_kns.smoothed_marginal_probabilities[1])
ax.set(
title="Smoothed probability of a medium-variance regime for stock returns")
ax = axes[2]
ax.plot(res_kns.smoothed_marginal_probabilities[2])
ax.set(
title="Smoothed probability of a high-variance regime for stock returns")
fig.tight_layout()
# ### Filardo (1994) Time-Varying Transition Probabilities
#
# This model demonstrates estimation with time-varying transition
# probabilities. The dataset can be reached at
# http://econ.korea.ac.kr/~cjkim/MARKOV/data/filardo.prn.
#
# In the above models we have assumed that the transition probabilities
# are constant across time. Here we allow the probabilities to change with
# the state of the economy. Otherwise, the model is the same Markov
# autoregression of Hamilton (1989).
#
# Each period, the regime now transitions according to the following
# matrix of time-varying transition probabilities:
#
# $$ P(S_t = s_t | S_{t-1} = s_{t-1}) =
# \begin{bmatrix}
# p_{00,t} & p_{10,t} \\
# p_{01,t} & p_{11,t}
# \end{bmatrix}
# $$
#
# where $p_{ij,t}$ is the probability of transitioning *from* regime $i$,
# *to* regime $j$ in period $t$, and is defined to be:
#
# $$
# p_{ij,t} = \frac{\exp\{ x_{t-1}' \beta_{ij} \}}{1 + \exp\{ x_{t-1}'
# \beta_{ij} \}}
# $$
#
# Instead of estimating the transition probabilities as part of maximum
# likelihood, the regression coefficients $\beta_{ij}$ are estimated. These
# coefficients relate the transition probabilities to a vector of pre-
# determined or exogenous regressors $x_{t-1}$.
# Get the dataset
filardo = requests.get(
"http://econ.korea.ac.kr/~cjkim/MARKOV/data/filardo.prn").content
dta_filardo = pd.read_table(BytesIO(filardo),
sep=" +",
header=None,
skipfooter=1,
engine="python")
dta_filardo.columns = ["month", "ip", "leading"]
dta_filardo.index = pd.date_range("1948-01-01", "1991-04-01", freq="MS")
dta_filardo["dlip"] = np.log(dta_filardo["ip"]).diff() * 100
# Deflated pre-1960 observations by ratio of std. devs.
# See hmt_tvp.opt or Filardo (1994) p. 302
std_ratio = (dta_filardo["dlip"]["1960-01-01":].std() /
dta_filardo["dlip"][:"1959-12-01"].std())
dta_filardo[
"dlip"][:"1959-12-01"] = dta_filardo["dlip"][:"1959-12-01"] * std_ratio
dta_filardo["dlleading"] = np.log(dta_filardo["leading"]).diff() * 100
dta_filardo["dmdlleading"] = dta_filardo["dlleading"] - dta_filardo[
"dlleading"].mean()
# Plot the data
dta_filardo["dlip"].plot(
title="Standardized growth rate of industrial production", figsize=(13, 3))
plt.figure()
dta_filardo["dmdlleading"].plot(title="Leading indicator", figsize=(13, 3))
# The time-varying transition probabilities are specified by the
# `exog_tvtp` parameter.
#
# Here we demonstrate another feature of model fitting - the use of a
# random search for MLE starting parameters. Because Markov switching models
# are often characterized by many local maxima of the likelihood function,
# performing an initial optimization step can be helpful to find the best
# parameters.
#
# Below, we specify that 20 random perturbations from the starting
# parameter vector are examined and the best one used as the actual starting
# parameters. Because of the random nature of the search, we seed the random
# number generator beforehand to allow replication of the result.
mod_filardo = sm.tsa.MarkovAutoregression(
dta_filardo.iloc[2:]["dlip"],
k_regimes=2,
order=4,
switching_ar=False,
exog_tvtp=sm.add_constant(dta_filardo.iloc[1:-1]["dmdlleading"]),
)
np.random.seed(12345)
res_filardo = mod_filardo.fit(search_reps=20)
res_filardo.summary()
# Below we plot the smoothed probability of the economy operating in a
# low-production state, and again include the NBER recessions for
# comparison.
fig, ax = plt.subplots(figsize=(12, 3))
ax.plot(res_filardo.smoothed_marginal_probabilities[0])
ax.fill_between(usrec.index,
0,
1,
where=usrec["USREC"].values,
color="gray",
alpha=0.2)
ax.set_xlim(dta_filardo.index[6], dta_filardo.index[-1])
ax.set(title="Smoothed probability of a low-production state")
# Using the time-varying transition probabilities, we can see how the
# expected duration of a low-production state changes over time:
#
res_filardo.expected_durations[0].plot(
title="Expected duration of a low-production state", figsize=(12, 3))
# During recessions, the expected duration of a low-production state is
# much higher than in an expansion.
|
backend/data/hdmetor/text_generation/blob/a9b307ce9167b3d9fd40f61407ba6c912634889d/model.py | zhiiker/neural_complete | 1,237 | 11077582 | from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Activation, Dropout
def lstm_model(seq_len, total_char):
model = Sequential()
model.add(LSTM(512, return_sequences=True, input_shape=(seq_len, total_char)))
model.add(Dropout(0.4))
model.add(LSTM(512, return_sequences=False))
model.add(Dropout(0.4))
model.add(Dense(total_char))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
return model |
demos/imagetagger/setup.py | Ixyk-Wolf/aiohttp-demos | 649 | 11077613 | <reponame>Ixyk-Wolf/aiohttp-demos
import os
import re
from setuptools import find_packages, setup
REGEXP = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
def read_version():
init_py = os.path.join(
os.path.dirname(__file__), 'imagetagger', '__init__.py'
)
with open(init_py) as f:
for line in f:
match = REGEXP.match(line)
if match is not None:
return match.group(1)
msg = f'Cannot find version in ${init_py}'
raise RuntimeError(msg)
install_requires = ['aiohttp', 'aiohttp_jinja2', 'trafaret_config']
setup(
name='imagetagger',
version=read_version(),
description='imagetagger',
platforms=['POSIX'],
packages=find_packages(),
package_data={'': ['config/*.*']},
include_package_data=True,
install_requires=install_requires,
zip_safe=False,
)
|
tests/orm/test_groups.py | aiidateam/aiida_core | 153 | 11077623 | <reponame>aiidateam/aiida_core
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=no-self-use
"""Test for the Group ORM class."""
import pytest
from aiida import orm
from aiida.common import exceptions
from aiida.tools.graph.deletions import delete_nodes
@pytest.mark.usefixtures('aiida_profile_clean')
class TestGroups:
"""Test backend entities and their collections"""
def test_count(self):
"""Test the `count` method."""
node_00 = orm.Data().store()
node_01 = orm.Data().store()
nodes = [node_00, node_01]
group = orm.Group(label='label', description='description').store()
group.add_nodes(nodes)
assert group.count() == len(nodes)
def test_creation(self):
"""Test the creation of Groups."""
node = orm.Data()
stored_node = orm.Data().store()
group = orm.Group(label='testgroup')
with pytest.raises(exceptions.ModificationNotAllowed):
# group unstored
group.add_nodes(node)
with pytest.raises(exceptions.ModificationNotAllowed):
# group unstored
group.add_nodes(stored_node)
group.store()
with pytest.raises(ValueError):
# node unstored
group.add_nodes(node)
group.add_nodes(stored_node)
nodes = list(group.nodes)
assert len(nodes) == 1
assert nodes[0].pk == stored_node.pk
def test_node_iterator(self):
"""Test the indexing and slicing functionality of the node iterator."""
node_00 = orm.Data().store()
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
nodes = [node_00, node_01, node_02, node_03]
group = orm.Group(label='label', description='description').store()
group.add_nodes(nodes)
# Indexing
node_indexed = group.nodes[0]
assert isinstance(node_indexed, orm.Data)
assert node_indexed.uuid in [node.uuid for node in nodes]
# Slicing
nodes_sliced = group.nodes[1:3]
assert isinstance(nodes_sliced, list)
assert len(nodes_sliced) == 2
assert all(isinstance(node, orm.Data) for node in nodes_sliced)
assert all(node.uuid in set(node.uuid for node in nodes) for node in nodes_sliced)
def test_entry_point(self):
"""Test the :meth:`aiida.orm.groups.Group.entry_point` property."""
from aiida.plugins.entry_point import get_entry_point_from_string
group = orm.Group('label')
assert group.entry_point == get_entry_point_from_string('aiida.groups:core')
assert orm.Group.entry_point == get_entry_point_from_string('aiida.groups:core')
class Custom(orm.Group):
pass
group = Custom('label')
assert group.entry_point is None
assert Custom.entry_point is None
def test_description(self):
"""Test the update of the description both for stored and unstored groups."""
node = orm.Data().store()
group_01 = orm.Group(label='testgroupdescription1', description='group_01').store()
group_01.add_nodes(node)
group_02 = orm.Group(label='testgroupdescription2', description='group_02')
# Preliminary checks
assert group_01.is_stored
assert not group_02.is_stored
assert group_01.description == 'group_01'
assert group_02.description == 'group_02'
# Change
group_01.description = 'new1'
group_02.description = 'new2'
# Test that the groups remained in their proper stored state and that
# the description was updated
assert group_01.is_stored
assert not group_02.is_stored
assert group_01.description == 'new1'
assert group_02.description == 'new2'
# Store group_02 and check that the description is OK
group_02.store()
assert group_02.is_stored
assert group_02.description == 'new2'
def test_add_nodes(self):
"""Test different ways of adding nodes."""
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
nodes = [node_01, node_02, node_03]
group = orm.Group(label='test_adding_nodes').store()
# Single node
group.add_nodes(node_01)
# List of nodes
group.add_nodes([node_02, node_03])
# Check
assert set(_.pk for _ in nodes) == set(_.pk for _ in group.nodes)
# Try to add a node that is already present: there should be no problem
group.add_nodes(node_01)
assert set(_.pk for _ in nodes) == set(_.pk for _ in group.nodes)
def test_remove_nodes(self):
"""Test node removal."""
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
node_04 = orm.Data().store()
nodes = [node_01, node_02, node_03]
group = orm.Group(label='test_remove_nodes').store()
# Add initial nodes
group.add_nodes(nodes)
assert set(_.pk for _ in nodes) == set(_.pk for _ in group.nodes)
# Remove a node that is not in the group: nothing should happen
group.remove_nodes(node_04)
assert set(_.pk for _ in nodes) == set(_.pk for _ in group.nodes)
# Remove one orm.Node
nodes.remove(node_03)
group.remove_nodes(node_03)
assert set(_.pk for _ in nodes) == set(_.pk for _ in group.nodes)
# Remove a list of Nodes and check
nodes.remove(node_01)
nodes.remove(node_02)
group.remove_nodes([node_01, node_02])
assert set(_.pk for _ in nodes) == set(_.pk for _ in group.nodes)
def test_clear(self):
"""Test the `clear` method to remove all nodes."""
node_01 = orm.Data().store()
node_02 = orm.Data().store()
node_03 = orm.Data().store()
nodes = [node_01, node_02, node_03]
group = orm.Group(label='test_clear_nodes').store()
# Add initial nodes
group.add_nodes(nodes)
assert set(_.pk for _ in nodes) == set(_.pk for _ in group.nodes)
group.clear()
assert list(group.nodes) == []
def test_name_desc(self):
"""Test Group description."""
group = orm.Group(label='testgroup2', description='some desc')
assert group.label == 'testgroup2'
assert group.description == 'some desc'
assert group.is_user_defined
group.store()
# Same checks after storing
assert group.label == 'testgroup2'
assert group.is_user_defined
assert group.description == 'some desc'
# To avoid to find it in further tests
orm.Group.collection.delete(group.pk)
def test_delete(self):
"""Test Group deletion."""
node = orm.Data().store()
group = orm.Group(label='testgroup3', description='some other desc').store()
group_copy = orm.Group.collection.get(label='testgroup3')
assert group.uuid == group_copy.uuid
group.add_nodes(node)
assert len(group.nodes) == 1
orm.Group.collection.delete(group.pk)
with pytest.raises(exceptions.NotExistent):
# The group does not exist anymore
orm.Group.collection.get(label='testgroup3')
def test_delete_node(self):
"""Test deletion of a node that has been assigned to a group."""
node = orm.Data().store()
group = orm.Group(label='testgroup', description='some desc').store()
group.add_nodes(node)
assert len(group.nodes) == 1
delete_nodes([node.pk], dry_run=False)
assert len(group.nodes) == 0
def test_rename(self):
"""Test the renaming of a Group."""
label_original = 'groupie'
label_changed = 'nogroupie'
group = orm.Group(label=label_original, description='I will be renamed')
# Check name changes work before storing
assert group.label == label_original
group.label = label_changed
assert group.label == label_changed
# Revert the name to its original and store it
group.label = label_original
group.store()
# Check name changes work after storing
assert group.label == label_original
group.label = label_changed
assert group.label == label_changed
def test_rename_existing(self):
"""Test that renaming to an already existing name is not permitted."""
label_group_a = 'group_a'
label_group_b = 'group_b'
orm.Group(label=label_group_a, description='I am the Original G').store()
# Before storing everything should be fine
group_b = orm.Group(label=label_group_a, description='They will try to rename me')
# Storing for duplicate group name should trigger UniquenessError
with pytest.raises(exceptions.IntegrityError):
group_b.store()
# Reverting to unique name before storing
group_b.label = label_group_b
group_b.store()
# After storing name change to existing should raise
with pytest.raises(exceptions.IntegrityError):
group_b.label = label_group_a
def test_group_uuid_hashing_for_querybuidler(self):
"""QueryBuilder results should be reusable and shouldn't brake hashing."""
group = orm.Group(label='test_group')
group.store()
# Search for the UUID of the stored group
builder = orm.QueryBuilder()
builder.append(orm.Group, project=['uuid'], filters={'label': {'==': 'test_group'}})
uuid = builder.first(flat=True)
# Look the node with the previously returned UUID
builder = orm.QueryBuilder()
builder.append(orm.Group, project=['id'], filters={'uuid': {'==': uuid}})
# Check that the query doesn't fail
builder.all()
# And that the results are correct
assert builder.count() == 1
assert builder.first(flat=True) == group.pk
@pytest.mark.usefixtures('aiida_profile_clean')
class TestGroupsSubclasses:
"""Test rules around creating `Group` subclasses."""
@staticmethod
def test_creation_registered():
"""Test rules around creating registered `Group` subclasses."""
group = orm.AutoGroup('some-label')
assert isinstance(group, orm.AutoGroup)
assert group.type_string == 'core.auto'
group, _ = orm.AutoGroup.collection.get_or_create('some-auto-group')
assert isinstance(group, orm.AutoGroup)
assert group.type_string == 'core.auto'
@staticmethod
def test_loading():
"""Test that loading instances from the database returns the correct subclass of `Group`."""
group = orm.Group('normal-group').store()
loaded = orm.load_group(group.pk)
assert isinstance(loaded, orm.Group)
group = orm.AutoGroup('auto-group').store()
loaded = orm.load_group(group.pk)
assert isinstance(group, orm.AutoGroup)
@staticmethod
def test_creation_unregistered():
"""Test rules around creating `Group` subclasses without a registered entry point."""
# Defining an unregistered subclas should issue a warning and its type string should be set to `None`
with pytest.warns(UserWarning):
class SubGroup(orm.Group):
pass
assert SubGroup._type_string is None # pylint: disable=protected-access
# Creating an instance is allowed
instance = SubGroup(label='subgroup')
assert instance._type_string is None # pylint: disable=protected-access
# Storing the instance, however, is forbidden and should raise
with pytest.raises(exceptions.StoringNotAllowed):
instance.store()
@staticmethod
def test_loading_unregistered():
"""Test rules around loading `Group` subclasses without a registered entry point.
Storing instances of unregistered subclasses is not allowed so we have to create one sneakily by instantiating
a normal group and manipulating the type string directly on the database model.
"""
group = orm.Group(label='group')
group.backend_entity.bare_model.type_string = 'unregistered.subclass'
group.store()
with pytest.warns(UserWarning):
loaded = orm.load_group(group.pk)
assert isinstance(loaded, orm.Group)
# Removing it as other methods might get a warning instead
group_pk = group.pk
del group
orm.Group.collection.delete(pk=group_pk)
@staticmethod
def test_querying():
"""Test querying for groups with and without subclassing."""
orm.Group(label='group').store()
orm.AutoGroup(label='auto-group').store()
# Fake a subclass by manually setting the type string
group = orm.Group(label='custom-group')
group.backend_entity.bare_model.type_string = 'custom.group'
group.store()
assert orm.QueryBuilder().append(orm.AutoGroup).count() == 1
assert orm.QueryBuilder().append(orm.AutoGroup, subclassing=False).count() == 1
assert orm.QueryBuilder().append(orm.Group, subclassing=False).count() == 1
assert orm.QueryBuilder().append(orm.Group).count() == 3
assert orm.QueryBuilder().append(orm.Group, filters={'type_string': 'custom.group'}).count() == 1
# Removing it as other methods might get a warning instead
group_pk = group.pk
del group
orm.Group.collection.delete(pk=group_pk)
@staticmethod
def test_querying_node_subclasses():
"""Test querying for groups with multiple types for nodes it contains."""
group = orm.Group(label='group').store()
data_int = orm.Int().store()
data_str = orm.Str().store()
data_bool = orm.Bool().store()
group.add_nodes([data_int, data_str, data_bool])
builder = orm.QueryBuilder().append(orm.Group, tag='group')
builder.append((orm.Int, orm.Str), with_group='group', project='id')
results = [entry[0] for entry in builder.iterall()]
assert len(results) == 2
assert data_int.pk in results
assert data_str.pk in results
assert data_bool.pk not in results
@staticmethod
def test_query_with_group():
"""Test that querying a data node in a group works."""
group = orm.Group(label='group').store()
data = orm.Data().store()
group.add_nodes([data])
builder = orm.QueryBuilder().append(orm.Data, filters={
'id': data.pk
}, tag='data').append(orm.Group, with_node='data')
loaded = builder.one()[0]
assert loaded.pk == group.pk
class TestGroupExtras:
"""Test the property and methods of group extras."""
@pytest.fixture(autouse=True)
def init_profile(self, aiida_profile_clean): # pylint: disable=unused-argument
"""Initialize the profile."""
# pylint: disable=attribute-defined-outside-init
self.group = orm.Group('test_extras')
def test_extras(self):
"""Test the `Group.base.extras.all` property."""
original_extra = {'nested': {'a': 1}}
self.group.base.extras.set('key', original_extra)
group_extras = self.group.base.extras.all
assert group_extras['key'] == original_extra
group_extras['key']['nested']['a'] = 2
assert original_extra['nested']['a'] == 2
# Now store the group and verify that `extras` then returns a deep copy
self.group.store()
group_extras = self.group.base.extras.all
# We change the returned group extras but the original extra should remain unchanged
group_extras['key']['nested']['a'] = 3
assert original_extra['nested']['a'] == 2
def test_get_extra(self):
"""Test the `Group.get_extra` method."""
original_extra = {'nested': {'a': 1}}
self.group.base.extras.set('key', original_extra)
group_extra = self.group.base.extras.get('key')
assert group_extra == original_extra
group_extra['nested']['a'] = 2
assert original_extra['nested']['a'] == 2
default = 'default'
assert self.group.base.extras.get('not_existing', default=default) == default
with pytest.raises(AttributeError):
self.group.base.extras.get('not_existing')
# Now store the group and verify that `get_extra` then returns a deep copy
self.group.store()
group_extra = self.group.base.extras.get('key')
# We change the returned group extras but the original extra should remain unchanged
group_extra['nested']['a'] = 3
assert original_extra['nested']['a'] == 2
default = 'default'
assert self.group.base.extras.get('not_existing', default=default) == default
with pytest.raises(AttributeError):
self.group.base.extras.get('not_existing')
def test_get_extra_many(self):
"""Test the `Group.base.extras.get_many` method."""
original_extra = {'nested': {'a': 1}}
self.group.base.extras.set('key', original_extra)
group_extra = self.group.base.extras.get_many(['key'])[0]
assert group_extra == original_extra
group_extra['nested']['a'] = 2
assert original_extra['nested']['a'] == 2
# Now store the group and verify that `get_extra` then returns a deep copy
self.group.store()
group_extra = self.group.base.extras.get_many(['key'])[0]
# We change the returned group extras but the original extra should remain unchanged
group_extra['nested']['a'] = 3
assert original_extra['nested']['a'] == 2
def test_set_extra(self):
"""Test the `Group.set_extra` method."""
with pytest.raises(exceptions.ValidationError):
self.group.base.extras.set('illegal.key', 'value')
self.group.base.extras.set('valid_key', 'value')
self.group.store()
self.group.base.extras.set('valid_key', 'changed')
assert orm.load_group(self.group.pk).base.extras.get('valid_key') == 'changed'
def test_set_extra_many(self):
"""Test the `Group.base.extras.set_many` method."""
with pytest.raises(exceptions.ValidationError):
self.group.base.extras.set_many({'illegal.key': 'value', 'valid_key': 'value'})
self.group.base.extras.set_many({'valid_key': 'value'})
self.group.store()
self.group.base.extras.set_many({'valid_key': 'changed'})
assert orm.load_group(self.group.pk).base.extras.get('valid_key') == 'changed'
def test_reset_extra(self):
"""Test the `Group.base.extras.reset` method."""
extras_before = {'extra_one': 'value', 'extra_two': 'value'}
extras_after = {'extra_three': 'value', 'extra_four': 'value'}
extras_illegal = {'extra.illegal': 'value', 'extra_four': 'value'}
self.group.base.extras.set_many(extras_before)
assert self.group.base.extras.all == extras_before
self.group.base.extras.reset(extras_after)
assert self.group.base.extras.all == extras_after
with pytest.raises(exceptions.ValidationError):
self.group.base.extras.reset(extras_illegal)
self.group.store()
self.group.base.extras.reset(extras_after)
assert orm.load_group(self.group.pk).base.extras.all == extras_after
def test_delete_extra(self):
"""Test the `Group.base.extras.delete` method."""
self.group.base.extras.set('valid_key', 'value')
assert self.group.base.extras.get('valid_key') == 'value'
self.group.base.extras.delete('valid_key')
with pytest.raises(AttributeError):
self.group.base.extras.delete('valid_key')
# Repeat with stored group
self.group.base.extras.set('valid_key', 'value')
self.group.store()
self.group.base.extras.delete('valid_key')
with pytest.raises(AttributeError):
orm.load_group(self.group.pk).base.extras.get('valid_key')
def test_delete_extra_many(self):
"""Test the `Group.base.extras.delete_many` method."""
extras_valid = {'extra_one': 'value', 'extra_two': 'value'}
valid_keys = ['extra_one', 'extra_two']
invalid_keys = ['extra_one', 'invalid_key']
self.group.base.extras.set_many(extras_valid)
assert self.group.base.extras.all == extras_valid
with pytest.raises(AttributeError):
self.group.base.extras.delete_many(invalid_keys)
self.group.store()
self.group.base.extras.delete_many(valid_keys)
assert orm.load_group(self.group.pk).base.extras.all == {}
def test_clear_extras(self):
"""Test the `Group.base.extras.clear` method."""
extras = {'extra_one': 'value', 'extra_two': 'value'}
self.group.base.extras.set_many(extras)
assert self.group.base.extras.all == extras
self.group.base.extras.clear()
assert self.group.base.extras.all == {}
# Repeat for stored group
self.group.store()
self.group.base.extras.clear()
assert orm.load_group(self.group.pk).base.extras.all == {}
def test_extras_items(self):
"""Test the `Group.base.extras.items` generator."""
extras = {'extra_one': 'value', 'extra_two': 'value'}
self.group.base.extras.set_many(extras)
assert dict(self.group.base.extras.items()) == extras
def test_extras_keys(self):
"""Test the `Group.base.extras.keys` generator."""
extras = {'extra_one': 'value', 'extra_two': 'value'}
self.group.base.extras.set_many(extras)
assert set(self.group.base.extras.keys()) == set(extras)
|
crackq/logger.py | cah-antonia-vonoepen/crackq | 908 | 11077645 | <reponame>cah-antonia-vonoepen/crackq
"""Setup logging handler"""
#!/usr/bin/env python
import logging
from logging.config import fileConfig
# Setup logging
fileConfig('log_config.ini')
logger = logging.getLogger()
|
instascrape/scrapers/scrape_tools.py | code1dot/instascrape | 445 | 11077675 | <gh_stars>100-1000
from __future__ import annotations
import json
from typing import Any, Dict, Union, Callable, List
from collections import deque
import datetime
from functools import partial
import copy
import time
import requests
from bs4 import BeautifulSoup
from instascrape.core.json_algos import _JSONTree, _parse_json_str
JSONDict = Dict[str, Any]
def parse_data_from_json(json_dict, map_dict, default_value=float('nan')):
"""
Parse data from a JSON dictionary using a mapping dictionary that tells
the program how to parse the data
"""
return_data = {}
for key in map_dict:
steps_to_value = map_dict[key]
# Loop through all steps into the JSON dict that will give us our data
first_step = steps_to_value.popleft()
try:
value = json_dict[first_step]
except KeyError:
value = default_value
else:
for step in steps_to_value:
value = json_dict[step]
finally:
return_data[key] = value
return return_data
def flatten_dict(json_dict: JSONDict) -> JSONDict:
"""
Returns a flattened dictionary of data
Parameters
----------
json_dict : dict
Input dictionary for flattening
Returns
-------
flattened_dict : dict
Flattened dictionary
"""
json_tree = _JSONTree(json_dict)
flattened_dict = {}
for leaf_node in json_tree.leaf_nodes:
key_arr = deque([])
for key in leaf_node.prior_keys[::-1]:
key_arr.appendleft(str(key))
new_key = "_".join(key_arr)
if new_key not in flattened_dict:
break
flattened_dict[new_key] = list(leaf_node.json_data.values())[0]
return flattened_dict
def json_from_html(source: Union[str, "BeautifulSoup"], as_dict: bool = True, flatten=False) -> Union[JSONDict, str]:
"""
Return JSON data parsed from Instagram source HTML
Parameters
----------
source : Union[str, BeautifulSoup]
Instagram HTML source code to parse the JSON from
as_dict : bool = True
Return JSON as dict if True else return JSON as string
flatten : bool
Flatten the dictionary prior to returning it
Returns
-------
json_data : Union[JSONDict, str]
Parsed JSON data from the HTML source as either a JSON-like dictionary
or just the string serialization
"""
soup = BeautifulSoup(source, features="html.parser")
json_data = json_from_soup(source=soup, as_dict=as_dict, flatten=flatten)
return json_data
def json_from_soup(source, as_dict: bool = True, flatten=False):
json_data = _parse_json_str(source=source)
if as_dict:
json_data = [json.loads(json_str) for json_str in json_data]
if flatten:
json_data = [flatten_dict(json_dict) for json_dict in json_data]
return json_data
def determine_json_type(json_data: Union[JSONDict, str]) -> str:
"""
Return the type of Instagram page based on the JSON data parsed from source
Parameters
----------
json_data: Union[JSONDict, str]
JSON data that will be checked and parsed to determine what type of page
the program is looking at (Profile, Post, Hashtag, etc)
Returns
-------
instagram_type : str
Name of the type of page the program is currently parsing or looking at
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
try:
instagram_type = list(json_data["entry_data"])[0]
except KeyError:
instagram_type = "Inconclusive"
return instagram_type
def json_from_url(
url: str,
as_dict: bool = True,
headers={
"user-agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36 Edg/87.0.664.57"
},
flatten=False
) -> Union[JSONDict, str]:
"""
Return JSON data parsed from a provided Instagram URL
Parameters
----------
url : str
URL of the page to get the JSON data from
as_dict : bool = True
Return JSON as dict if True else return JSON as string
headers : Dict[str, str]
Dictionary of request headers to be passed on the GET request
flatten : bool
Flatten the dictionary prior to returning it
Returns
-------
json_data : Union[JSONDict, str]
Parsed JSON data from the URL as either a JSON-like dictionary
or just the string serialization
"""
source = requests.get(url, headers=headers).text
return json_from_html(source, as_dict=as_dict, flatten=flatten)
def scrape_posts(
posts: List["Post"],
session: requests.Session = None,
webdriver: "selenium.webdriver.chrome.webdriver.WebDriver" = None,
limit: Union[int, datetime.datetime] = None,
headers: dict = {
"user-agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Mobile Safari/537.36 Edg/87.0.664.57"
},
pause: int = 5,
on_exception: str = "raise",
silent: bool = True,
inplace: bool = False
):
# Default setup
if not inplace:
posts = copy.deepcopy(posts)
if limit is None:
limit = len(posts)
scraped_posts = []
for i, post in enumerate(posts):
temporary_post = copy.deepcopy(post)
try:
post.scrape(session=session, webdriver=webdriver, headers=headers)
scraped_posts.append(post)
except Exception as e:
if on_exception == "raise":
raise
elif on_exception == "pass":
if not silent:
print(f"PASSING EXCEPTION: {e}")
pass
elif on_exception == "return":
if not silent:
print(f"{e}, RETURNING SCRAPED AND UNSCRAPED")
break
if not silent:
output_str = f"{i}: {post.shortcode} - {post.upload_date}"
print(output_str)
if _stop_scraping(limit, post, i):
break
time.sleep(pause)
unscraped_posts = list(set(posts) - set(scraped_posts))
if not isinstance(limit, int):
scraped_posts.pop()
unscraped_posts.insert(0, temporary_post)
return scraped_posts, unscraped_posts if not inplace else None
def _stop_scraping(limit, post, i):
stop = False
if isinstance(limit, int):
if i == limit - 1:
stop = True
elif (isinstance(limit, datetime.datetime) or isinstance(limit, datetime.date)):
if post.upload_date <= limit:
stop = True
return stop
|
msg/tools/genmsg/src/genmsg/gentools.py | sikuner/Firmware_Marine | 742 | 11077688 | #! /usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for supporting message and service generation for all ROS
client libraries. This is mainly responsible for calculating the
md5sums and message definitions of classes.
"""
# NOTE: this should not contain any rospy-specific code. The rospy
# generator library is rospy.genpy.
import sys
import hashlib
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . import msgs
from .msgs import InvalidMsgSpec, MsgSpec, bare_msg_type, is_builtin
from .msg_loader import load_depends
from .srvs import SrvSpec
from . import names
from . import base
def compute_md5_text(msg_context, spec):
"""
Compute the text used for md5 calculation. MD5 spec states that we
removes comments and non-meaningful whitespace. We also strip
packages names from type names. For convenience sake, constants are
reordered ahead of other declarations, in the order that they were
originally defined.
:returns: text for ROS MD5-processing, ``str``
"""
package = spec.package
buff = StringIO()
for c in spec.constants:
buff.write("%s %s=%s\n"%(c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
msg_type = bare_msg_type(type_)
# md5 spec strips package names
if is_builtin(msg_type):
buff.write("%s %s\n"%(type_, name))
else:
# recursively generate md5 for subtype. have to build up
# dependency representation for subtype in order to
# generate md5
sub_pkg, _ = names.package_resource_name(msg_type)
sub_pkg = sub_pkg or package
sub_spec = msg_context.get_registered(msg_type)
sub_md5 = compute_md5(msg_context, sub_spec)
buff.write("%s %s\n"%(sub_md5, name))
return buff.getvalue().strip() # remove trailing new line
def _compute_hash(msg_context, spec, hash):
"""
subroutine of compute_md5()
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute hash for.
:param hash: hash instance
"""
# accumulate the hash
# - root file
if isinstance(spec, MsgSpec):
hash.update(compute_md5_text(msg_context, spec).encode())
elif isinstance(spec, SrvSpec):
hash.update(compute_md5_text(msg_context, spec.request).encode())
hash.update(compute_md5_text(msg_context, spec.response).encode())
else:
raise Exception("[%s] is not a message or service"%spec)
return hash.hexdigest()
def compute_md5(msg_context, spec):
"""
Compute md5 hash for message/service
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute md5 for.
:returns: md5 hash, ``str``
"""
return _compute_hash(msg_context, spec, hashlib.md5())
## alias
compute_md5_v2 = compute_md5
def _unique_deps(dep_list):
uniques = []
for d in dep_list:
if d not in uniques:
uniques.append(d)
return uniques
def compute_full_text(msg_context, spec):
"""
Compute full text of message/service, including text of embedded
types. The text of the main msg/srv is listed first. Embedded
msg/srv files are denoted first by an 80-character '=' separator,
followed by a type declaration line,'MSG: pkg/type', followed by
the text of the embedded type.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute full text for.
:returns: concatenated text for msg/srv file and embedded msg/srv types, ``str``
"""
buff = StringIO()
sep = '='*80+'\n'
# write the text of the top-level type
buff.write(spec.text)
buff.write('\n')
# append the text of the dependencies (embedded types). Can't use set() as we have to preserve order.
for d in _unique_deps(msg_context.get_all_depends(spec.full_name)):
buff.write(sep)
buff.write("MSG: %s\n"%d)
buff.write(msg_context.get_registered(d).text)
buff.write('\n')
# #1168: remove the trailing \n separator that is added by the concatenation logic
return buff.getvalue()[:-1]
def compute_full_type_name(package_name, file_name):
"""
Compute the full type name of message/service 'pkg/type'.
:param package_name: name of package file is in, ``str``
:file_name: name of the msg or srv file, ``str``
:returns: typename in format 'pkg/type'
:raises: :exc:`MsgGenerationException` if file_name ends with an unknown file extension
"""
# strip extension
for ext in (base.EXT_MSG, base.EXT_SRV):
if file_name.endswith(ext):
short_name = file_name[:-len(ext)]
break
else:
raise base.MsgGenerationException("Processing file: '%s' - unknown file extension"% (file_name))
return "%s/%s"%(package_name, short_name)
|
mbrl/util/env.py | MansonX/mbrl-lib | 592 | 11077709 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Dict, Optional, Tuple, Union, cast
import gym
import gym.wrappers
import hydra
import numpy as np
import omegaconf
import torch
import mbrl.planning
import mbrl.types
def _get_term_and_reward_fn(
cfg: Union[omegaconf.ListConfig, omegaconf.DictConfig],
) -> Tuple[mbrl.types.TermFnType, Optional[mbrl.types.RewardFnType]]:
import mbrl.env
term_fn = getattr(mbrl.env.termination_fns, cfg.overrides.term_fn)
if hasattr(cfg.overrides, "reward_fn") and cfg.overrides.reward_fn is not None:
reward_fn = getattr(mbrl.env.reward_fns, cfg.overrides.reward_fn)
else:
reward_fn = getattr(mbrl.env.reward_fns, cfg.overrides.term_fn, None)
return term_fn, reward_fn
def _handle_learned_rewards_and_seed(
cfg: Union[omegaconf.ListConfig, omegaconf.DictConfig],
env: gym.Env,
reward_fn: mbrl.types.RewardFnType,
) -> Tuple[gym.Env, mbrl.types.RewardFnType]:
if cfg.overrides.get("learned_rewards", True):
reward_fn = None
if cfg.seed is not None:
env.seed(cfg.seed)
env.observation_space.seed(cfg.seed + 1)
env.action_space.seed(cfg.seed + 2)
return env, reward_fn
def _legacy_make_env(
cfg: Union[omegaconf.ListConfig, omegaconf.DictConfig],
) -> Tuple[gym.Env, mbrl.types.TermFnType, Optional[mbrl.types.RewardFnType]]:
if "dmcontrol___" in cfg.overrides.env:
import mbrl.third_party.dmc2gym as dmc2gym
domain, task = cfg.overrides.env.split("___")[1].split("--")
term_fn, reward_fn = _get_term_and_reward_fn(cfg)
env = dmc2gym.make(domain_name=domain, task_name=task)
elif "gym___" in cfg.overrides.env:
env = gym.make(cfg.overrides.env.split("___")[1])
term_fn, reward_fn = _get_term_and_reward_fn(cfg)
else:
import mbrl.env.mujoco_envs
if cfg.overrides.env == "cartpole_continuous":
env = mbrl.env.cartpole_continuous.CartPoleEnv()
term_fn = mbrl.env.termination_fns.cartpole
reward_fn = mbrl.env.reward_fns.cartpole
elif cfg.overrides.env == "cartpole_pets_version":
env = mbrl.env.mujoco_envs.CartPoleEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = mbrl.env.reward_fns.cartpole_pets
elif cfg.overrides.env == "pets_halfcheetah":
env = mbrl.env.mujoco_envs.HalfCheetahEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = getattr(mbrl.env.reward_fns, "halfcheetah", None)
elif cfg.overrides.env == "pets_reacher":
env = mbrl.env.mujoco_envs.Reacher3DEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = None
elif cfg.overrides.env == "pets_pusher":
env = mbrl.env.mujoco_envs.PusherEnv()
term_fn = mbrl.env.termination_fns.no_termination
reward_fn = mbrl.env.reward_fns.pusher
elif cfg.overrides.env == "ant_truncated_obs":
env = mbrl.env.mujoco_envs.AntTruncatedObsEnv()
term_fn = mbrl.env.termination_fns.ant
reward_fn = None
elif cfg.overrides.env == "humanoid_truncated_obs":
env = mbrl.env.mujoco_envs.HumanoidTruncatedObsEnv()
term_fn = mbrl.env.termination_fns.ant
reward_fn = None
else:
raise ValueError("Invalid environment string.")
env = gym.wrappers.TimeLimit(
env, max_episode_steps=cfg.overrides.get("trial_length", 1000)
)
env, reward_fn = _handle_learned_rewards_and_seed(cfg, env, reward_fn)
return env, term_fn, reward_fn
class Freeze(ABC):
"""Abstract base class for freezing various gym backends"""
def __enter__(self, env):
raise NotImplementedError
def __exit__(self, env):
raise NotImplementedError
class EnvHandler(ABC):
"""Abstract base class for handling various gym backends
Subclasses of EnvHandler should define an associated Freeze subclass
and override self.freeze with that subclass
"""
freeze = Freeze
@staticmethod
@abstractmethod
def is_correct_env_type(env: gym.wrappers.TimeLimit) -> bool:
"""Checks that the env being handled is of the correct type"""
raise NotImplementedError
@staticmethod
def make_env(
cfg: Union[Dict, omegaconf.ListConfig, omegaconf.DictConfig],
) -> Tuple[gym.Env, mbrl.types.TermFnType, Optional[mbrl.types.RewardFnType]]:
"""Creates an environment from a given OmegaConf configuration object.
This method expects the configuration, ``cfg``,
to have the following attributes (some are optional):
- If ``cfg.overrides.env_cfg`` is present, this method
instantiates the environment using `hydra.utils.instantiate(env_cfg)`.
Otherwise, it expects attribute ``cfg.overrides.env``, which should be a
string description of the environment where valid options are:
- "dmcontrol___<domain>--<task>": a Deep-Mind Control suite environment
with the indicated domain and task (e.g., "dmcontrol___cheetah--run".
- "gym___<env_name>": a Gym environment (e.g., "gym___HalfCheetah-v2").
- "cartpole_continuous": a continuous version of gym's Cartpole environment.
- "pets_halfcheetah": the implementation of HalfCheetah used in Chua et al.,
PETS paper.
- "ant_truncated_obs": the implementation of Ant environment used in Janner et al.,
MBPO paper.
- "humanoid_truncated_obs": the implementation of Humanoid environment used in
Janner et al., MBPO paper.
- ``cfg.overrides.term_fn``: (only for dmcontrol and gym environments) a string
indicating the environment's termination function to use when simulating the
environment with the model. It should correspond to the name of a function in
:mod:`mbrl.env.termination_fns`.
- ``cfg.overrides.reward_fn``: (only for dmcontrol and gym environments)
a string indicating the environment's reward function to use when simulating the
environment with the model. If not present, it will try to use
``cfg.overrides.term_fn``.
If that's not present either, it will return a ``None`` reward function.
If provided, it should correspond to the name of a function in
:mod:`mbrl.env.reward_fns`.
- ``cfg.overrides.learned_rewards``: (optional) if present indicates that
the reward function will be learned, in which case the method will return
a ``None`` reward function.
- ``cfg.overrides.trial_length``: (optional) if presents indicates the maximum length
of trials. Defaults to 1000.
Args:
cfg (omegaconf.DictConf): the configuration to use.
Returns:
(tuple of env, termination function, reward function): returns the new environment,
the termination function to use, and the reward function to use (or ``None`` if
``cfg.learned_rewards == True``).
"""
# Handle the case where cfg is a dict
cfg = omegaconf.OmegaConf.create(cfg)
env_cfg = cfg.overrides.get("env_cfg", None)
if env_cfg is None:
return _legacy_make_env(cfg)
env = hydra.utils.instantiate(cfg.overrides.env_cfg)
env = gym.wrappers.TimeLimit(
env, max_episode_steps=cfg.overrides.get("trial_length", 1000)
)
term_fn, reward_fn = _get_term_and_reward_fn(cfg)
env, reward_fn = _handle_learned_rewards_and_seed(cfg, env, reward_fn)
return env, term_fn, reward_fn
@staticmethod
@abstractmethod
def make_env_from_str(env_name: str) -> gym.Env:
"""Creates a new environment from its string description.
Args:
env_name (str): the string description of the environment.
Returns:
(gym.Env): the created environment.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def get_current_state(env: gym.wrappers.TimeLimit) -> Tuple:
"""Returns the internal state of the environment.
Returns a tuple with information that can be passed to :func:set_env_state` to manually
set the environment (or a copy of it) to the same state it had when this function was
called.
Args:
env (:class:`gym.wrappers.TimeLimit`): the environment.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def set_env_state(state: Tuple, env: gym.wrappers.TimeLimit):
"""Sets the state of the environment.
Assumes ``state`` was generated using :func:`get_current_state`.
Args:
state (tuple): see :func:`get_current_state` for a description.
env (:class:`gym.wrappers.TimeLimit`): the environment.
"""
raise NotImplementedError
def rollout_env(
self,
env: gym.wrappers.TimeLimit,
initial_obs: np.ndarray,
lookahead: int,
agent: Optional[mbrl.planning.Agent] = None,
plan: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Runs the environment for some number of steps then returns it to its original state.
Works with mujoco gym and dm_control environments
(with `dmc2gym <https://github.com/denisyarats/dmc2gym>`_).
Args:
env (:class:`gym.wrappers.TimeLimit`): the environment.
initial_obs (np.ndarray): the latest observation returned by the environment (only
needed when ``agent is not None``, to get the first action).
lookahead (int): the number of steps to run. If ``plan is not None``,
it is overridden by `len(plan)`.
agent (:class:`mbrl.planning.Agent`, optional): if given, an agent to obtain actions.
plan (sequence of np.ndarray, optional): if given, a sequence of actions to execute.
Takes precedence over ``agent`` when both are given.
Returns:
(tuple of np.ndarray): the observations, rewards, and actions observed, respectively.
"""
actions = []
real_obses = []
rewards = []
with self.freeze(cast(gym.wrappers.TimeLimit, env)): # type: ignore
current_obs = initial_obs.copy()
real_obses.append(current_obs)
if plan is not None:
lookahead = len(plan)
for i in range(lookahead):
a = plan[i] if plan is not None else agent.act(current_obs)
if isinstance(a, torch.Tensor):
a = a.numpy()
next_obs, reward, done, _ = env.step(a)
actions.append(a)
real_obses.append(next_obs)
rewards.append(reward)
if done:
break
current_obs = next_obs
return np.stack(real_obses), np.stack(rewards), np.stack(actions)
|
sdk/python/tests/compiler/testdata/recur_cond.py | shrivs3/kfp-tekton | 102 | 11077718 | # Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl, components
from kfp_tekton.tekton import CEL_ConditionOp
from kfp_tekton.compiler import TektonCompiler
class Coder:
def empty(self):
return ""
TektonCompiler._get_unique_id_code = Coder.empty
print_op = components.load_component_from_text("""
name: print-iter
description: print msg
inputs:
- {name: msg, type: String}
outputs:
- {name: stdout, type: String}
implementation:
container:
image: alpine:3.6
command:
- sh
- -c
args:
- |
echo $0 > $1
- {inputValue: msg}
- {outputPath: stdout}
""")
@dsl.graph_component
def recur(i: int):
decr_i = CEL_ConditionOp(f"{i} - 1").output
print_op(f"Iter: {decr_i}")
with dsl.Condition(decr_i != 0):
recur(decr_i)
@dsl.pipeline("recur-and-condition")
def recur_and_condition(iter_num: int = 42):
recur(iter_num)
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler as Compiler
Compiler().compile(recur_and_condition, __file__.replace('.py', '.yaml'))
|
kAFL-Fuzzer/fuzzer/technique/redqueen/hash_fix.py | SafeBreach-Labs/hAFL2 | 102 | 11077741 | <filename>kAFL-Fuzzer/fuzzer/technique/redqueen/hash_fix.py
# Copyright (C) 2017-2019 <NAME>, <NAME>, <NAME>
# Copyright (C) 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Redqueen Checksum Fixer
"""
import traceback
from array import array
from common.debug import log_redq
from .parser import RedqueenRunInfo
from .cmp import Cmp
MAX_NUMBER_PERMUTATIONS = 100000
class HashFixer:
def __init__(self, qemu, rq_state):
self.qemu = qemu
self.addrs = rq_state.get_candidate_hash_addrs()
self.redqueen_state = rq_state
self.blacklisted_addrs = set()
def get_broken_cmps(self, data):
broken_cmps = []
res, run_info = self.get_cmps(data)
for addr in res:
for cmp in res[addr]:
if not cmp.was_true_in(run_info) and not cmp.addr in self.blacklisted_addrs:
broken_cmps.append(cmp)
return broken_cmps, run_info
def get_cmps(self, data):
# log_redq("runnning on %s"%repr("".join( map(chr, data) )) )
self.qemu.set_payload(data)
# self.qemu.send_enable_patches()
log_redq("hashfix run in rq mode")
self.qemu.send_rq_set_whitelist_instrumentation()
self.qemu.send_enable_redqueen()
self.qemu.send_payload(timeout_detection=True, apply_patches=True)
log_redq("hashfix run in non rq mode")
self.qemu.send_disable_redqueen()
self.qemu.send_payload(timeout_detection=True, apply_patches=True)
log_redq("hashfix done running, now parsing")
res = self.parse_redqueen_results(data)
log_redq("hashfix done parsing")
return res
def mark_unfixable(self, cmp):
log_redq("Unfixable cmp at: %x" % cmp.addr)
self.blacklisted_addrs.add(cmp.addr)
self.redqueen_state.blacklist_hash_addr(cmp.addr)
def get_shape(self, redqueen_results):
res = {}
for addr in redqueen_results:
res[addr] = len(redqueen_results[addr])
return res
def try_fix_data(self, data):
self.qemu.send_payload(timeout_detection=True, apply_patches=False)
self.qemu.send_payload(timeout_detection=True, apply_patches=True)
log_redq("PATCHES %s\n" % repr(list(map(hex, self.redqueen_state.get_candidate_hash_addrs()))))
log_redq("BLACKLIST %s\n" % repr(list(map(hex, self.redqueen_state.get_blacklisted_hash_addrs()))))
self.redqueen_state.update_redqueen_patches(self.qemu.redqueen_workdir)
self.redqueen_state.update_redqueen_whitelist(self.qemu.redqueen_workdir,
self.redqueen_state.get_candidate_hash_addrs())
fixed_data = array('B', data)
orig_cmps, _ = self.get_cmps(fixed_data)
shape = self.get_shape(orig_cmps)
log_redq("shape of hashes: ")
for addr in shape:
log_redq("\t%x: %d" % (addr, shape[addr]))
if len(shape) == 0:
return fixed_data
num_iters = min(len(orig_cmps) ** 2 + 1, len(orig_cmps) * 3 + 1)
num_cmps = sum(shape.values()) + 1
if num_iters < num_cmps:
num_iters = num_cmps
log_redq("try fixing for %d iters" % num_iters)
for i in range(num_iters):
broken_checks, run_info = self.get_broken_cmps(fixed_data)
log_redq("got %d broken checks\n" % len(broken_checks))
if not broken_checks:
return fixed_data
cmp = broken_checks.pop(-1);
if not self.try_fix_cmp(shape, fixed_data, run_info, cmp):
log_redq("cmp at %x unfixable:" % cmp.addr)
self.mark_unfixable(cmp)
broken_checks, run_info = self.get_broken_cmps(fixed_data)
for cmp in broken_checks:
self.mark_unfixable(cmp)
return False
def parse_redqueen_results(self, data):
res = {}
rq_res = parser.read_file(self.qemu.redqueen_workdir.redqueen())
data_string = "".join(map(chr, data))
run_info = RedqueenRunInfo(1, False, rq_res, data_string)
for line in run_info.hook_info.splitlines():
addr, type, size, is_imm, lhs, rhs = parser.RedqueenInfo.parse_line(line)
assert (type == "CMP")
res[addr] = res.get(addr, [])
cmp = Cmp(addr, type, size, is_imm)
cmp.index = len(res[addr])
res[addr].append(cmp)
cmp.add_result(run_info, lhs, rhs)
return res, run_info
@staticmethod
def replace_data(data, offset, repl):
for o in range(len(repl)):
data[offset + o] = repl[o]
def try_fix_cmp_with(self, shape, fixed_data, cmp, offsets, lhs, rhs, enc):
log_redq("Trying mutation %s" % (repr((offsets, lhs, rhs, enc))))
if list(map(len, lhs)) != list(map(len, rhs)):
return False
self.redqueen_state.update_redqueen_whitelist(self.qemu.redqueen_workdir,
self.redqueen_state.get_candidate_hash_addrs())
try:
if self.try_fix_cmp_offset(shape, fixed_data, cmp, offsets, rhs):
log_redq("Mutation fixed it")
return True
log_redq("Mutation didn't Fix it")
return False
except Exception as e:
log_redq("fixing hash failed %s" % traceback.format_exc())
raise e
def try_fix_cmp(self, shape, fixed_data, run_info, cmp):
known_offsets = self.redqueen_state.get_candidate_file_offsets(cmp.addr)
log_redq("known offsets for: %x = %s" % (cmp.addr, known_offsets))
mutations = [x for x in cmp.calc_mutations(run_info, 1)]
for (offsets, lhs, rhs, enc) in cmp.calc_mutations(run_info, 1):
if offsets in known_offsets:
if self.try_fix_cmp_with(shape, fixed_data, cmp, offsets, lhs, rhs, enc):
return True
for (offsets, lhs, rhs, enc) in cmp.calc_mutations(run_info, 1):
if not offsets in known_offsets:
if self.try_fix_cmp_with(shape, fixed_data, cmp, offsets, lhs, rhs, enc):
return True
return False
def does_data_fix_cmp(self, shape, data, cmp):
res, run_info = self.get_cmps(data)
return shape == self.get_shape(res) and res[cmp.addr][cmp.index].was_true_in(run_info)
def try_fix_cmp_offset(self, shape, data, cmp, offsets, repls):
# try:
backup = {}
for i, repl in zip(offsets, repls):
backup[i] = data[i:i + len(repl)]
HashFixer.replace_data(data, i, array('B', repl))
if self.does_data_fix_cmp(shape, data, cmp):
log_redq("found candidate offset %x %s" % (cmp.addr, repr(offsets)))
self.redqueen_state.add_candidate_file_offset(cmp.addr, tuple(offsets))
return True
for i in offsets:
HashFixer.replace_data(data, i, backup[i])
return False
# except Exception as e:
# log_redq("failed to fix %s with %s"%(cmp.addr,(offset_tuples,repls)) )
# raise e
|
datasets/nsmc/nsmc.py | WojciechKusa/datasets | 10,608 | 11077745 | <gh_stars>1000+
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Naver movie review corpus for binary sentiment classification"""
import csv
import datasets
_CITATION = """\
@InProceedings{Park:2016,
title = "Naver Sentiment Movie Corpus",
author = "<NAME>",
year = "2016",
howpublished = {\\url{https://github.com/e9t/nsmc}}
}
"""
_DESCRIPTION = """\
This is a movie review dataset in the Korean language. Reviews were scraped from Naver movies. The dataset construction is based on the method noted in Large movie review dataset from Maas et al., 2011.
"""
_HOMEPAGE = "https://github.com/e9t/nsmc/"
_LICENSE = "CC0 1.0 Universal (CC0 1.0)"
_URL = "https://raw.githubusercontent.com/e9t/nsmc/master/"
_URLs = {
"train": _URL + "ratings_train.txt",
"test": _URL + "ratings_test.txt",
}
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class NSMC(datasets.GeneratorBasedBuilder):
"""Korean Naver movie review dataset."""
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"document": datasets.Value("string"),
"label": datasets.ClassLabel(names=["negative", "positive"]),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": downloaded_files["train"],
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": downloaded_files["test"],
"split": "test",
},
),
]
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
next(f)
reader = csv.reader(f, delimiter="\t")
for id_, row in enumerate(reader):
yield id_, {
"id": row[0],
"document": row[1],
"label": int(row[2]),
}
|
Algorithm.Python/ShortableProviderOrdersRejectedRegressionAlgorithm.py | BlackBoxAM/Lean | 6,580 | 11077779 | <reponame>BlackBoxAM/Lean
### QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
### Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
from AlgorithmImports import *
class RegressionTestShortableBrokerageModel(DefaultBrokerageModel):
def __init__(self):
self.ShortableProvider = LocalDiskShortableProvider(SecurityType.Equity, "testbrokerage", Market.USA)
### <summary>
### Tests that orders are denied if they exceed the max shortable quantity.
### </summary>
class ShortableProviderOrdersRejectedRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.ordersAllowed = []
self.ordersDenied = []
self.initialize = False
self.invalidatedAllowedOrder = False
self.invalidatedNewOrderWithPortfolioHoldings = False
self.SetStartDate(2013, 10, 4)
self.SetEndDate(2013, 10, 11)
self.SetCash(10000000)
self.spy = self.AddEquity("SPY", Resolution.Minute).Symbol
self.aig = self.AddEquity("AIG", Resolution.Minute).Symbol
self.SetBrokerageModel(RegressionTestShortableBrokerageModel())
def OnData(self, data):
if not self.initialize:
self.HandleOrder(self.LimitOrder(self.spy, -1001, 10000)) # Should be canceled, exceeds the max shortable quantity
self.HandleOrder(self.LimitOrder(self.spy, -1000, 10000)) # Allowed, orders at or below 1000 should be accepted
self.HandleOrder(self.LimitOrder(self.spy, -10, 0.01)) # Should be canceled, the total quantity we would be short would exceed the max shortable quantity.
self.initialize = True
return
if not self.invalidatedAllowedOrder:
if len(self.ordersAllowed) != 1:
raise Exception(f"Expected 1 successful order, found: {len(self.ordersAllowed)}")
if len(self.ordersDenied) != 2:
raise Exception(f"Expected 2 failed orders, found: {len(self.ordersDenied)}")
allowedOrder = self.ordersAllowed[0]
orderUpdate = UpdateOrderFields()
orderUpdate.LimitPrice = 0.01
orderUpdate.Quantity = -1001
orderUpdate.Tag = "Testing updating and exceeding maximum quantity"
response = allowedOrder.Update(orderUpdate)
if response.ErrorCode != OrderResponseErrorCode.ExceedsShortableQuantity:
raise Exception(f"Expected order to fail due to exceeded shortable quantity, found: {response.ErrorCode}")
cancelResponse = allowedOrder.Cancel()
if cancelResponse.IsError:
raise Exception("Expected to be able to cancel open order after bad qty update")
self.invalidatedAllowedOrder = True
self.ordersDenied.clear()
self.ordersAllowed.clear()
return
if not self.invalidatedNewOrderWithPortfolioHoldings:
self.HandleOrder(self.MarketOrder(self.spy, -1000)) # Should succeed, no holdings and no open orders to stop this
spyShares = self.Portfolio[self.spy].Quantity
if spyShares != -1000:
raise Exception(f"Expected -1000 shares in portfolio, found: {spyShares}")
self.HandleOrder(self.LimitOrder(self.spy, -1, 0.01)) # Should fail, portfolio holdings are at the max shortable quantity.
if len(self.ordersDenied) != 1:
raise Exception(f"Expected limit order to fail due to existing holdings, but found {len(self.ordersDenied)} failures")
self.ordersAllowed.clear()
self.ordersDenied.clear()
self.HandleOrder(self.MarketOrder(self.aig, -1001))
if len(self.ordersAllowed) != 1:
raise Exception(f"Expected market order of -1001 BAC to not fail")
self.invalidatedNewOrderWithPortfolioHoldings = True
def HandleOrder(self, orderTicket):
if orderTicket.SubmitRequest.Status == OrderRequestStatus.Error:
self.ordersDenied.append(orderTicket)
return
self.ordersAllowed.append(orderTicket)
|
tools/code-animation/main.py | lautalom/QuanticDev | 141 | 11077794 | <filename>tools/code-animation/main.py
from manim import *
"""
$ manim code.py/js -p
-p: preview: open vide file after rendering
"""
def highlight_line(code: Code, line_from, line_to):
if not line_to:
line_to = line_from
lines = range(line_from, line_to)
return [ApplyMethod(code.code[line_no].set_opacity, 1 if line_no in lines else .3) for line_no in range(len(code.code))]
def create_code(code_src: str, ext: str = 'py', scale_factor: float = .7, insert_line_no=False):
file = f'code.{ext}'
with open(file, 'w') as f:
f.write(code_src.replace("""
""", """
""")) # hack: not to get newlines trimmed by LaTex renderer, we replace '\n' with '\n '
code = Code(file, scale_factor=scale_factor, insert_line_no=insert_line_no)
with open(file, 'w') as f:
f.write('')
return code
# https://docs.manim.community/en/stable/reference.html
class AnimatedCode(Scene):
config['pixel_height'] = 2160
config['pixel_width'] = 3840
def animation_template(self):
title = Tex('Title')
title.to_corner(UP + LEFT)
self.play(title)
self.clear()
code = create_code("""xxxxxx""")
self.play(ShowCreation(code, run_time=10, rate_func=linear))
self.wait(5)
self.play(*highlight_line(code, 2, 6))
self.wait(5)
self.clear()
code2 = create_code("""xxxxxx""")
self.play(Transform(code, code2, run_time=2, rate_func=linear))
self.wait(5)
def construct(self):
code = create_code("""db.connect('postgresql://localhost/testdb')
db.initialize()
user = db.create_new_user('<NAME>', '<EMAIL>')
user.give_permission('create_blog_post')
post = user.new_blog_post('Test Blog Post Title', 'Lorem ipsum dolor sit amet...')
post.upload_thumbnail('./test_thumbnail.jpg')
post.publish()""")
self.play(ShowCreation(code, run_time=10, rate_func=linear))
self.wait(5)
self.clear()
code2 = create_code("""db.connect('postgresql://localhost/testdb') \\
.initialize() \\
.create_new_user('<NAME>', '<EMAIL>') \\
.give_permission('create_blog_post') \\
.new_blog_post('Test Blog Post Title', 'Lorem ipsum dolor sit amet...') \\
.upload_thumbnail('./test_thumbnail.jpg') \\
.publish()""")
self.play(Transform(code, code2, run_time=2, rate_func=linear))
self.wait(5)
self.clear()
code = create_code("""db.connect('postgresql://localhost/testdb')
db.initialize()""")
self.play(ShowCreation(code, run_time=5))
self.wait(5)
self.clear()
code2 = create_code("""db.connect('postgresql://localhost/testdb').initialize()""")
self.play(Transform(code, code2, run_time=2, rate_func=linear))
self.wait(5)
self.clear()
code3 = create_code("""db.connect('postgresql://localhost/testdb') \\
.initialize() \\
.close()""")
self.play(Transform(code2, code3, run_time=2, rate_func=linear))
self.wait(5)
self.clear()
code = create_code("""class DB:
def connect(self, url):
pass
def initialize(self):
pass
def close(self):
pass""")
self.play(ShowCreation(code, run_time=10, rate_func=linear))
self.wait(5)
self.clear()
code2 = create_code("""class DB:
def connect(self, url):
# todo
return self
def initialize(self):
# todo
return self
def close(self):
# todo
return self""")
self.play(Transform(code, code2, run_time=2, rate_func=linear))
self.wait(5)
self.clear()
code = create_code("""db.connect('postgresql://localhost/testdb')
log.info('Initializing the database.')
db.initialize()""")
self.play(ShowCreation(code, run_time=10, rate_func=linear))
self.wait(5)
self.clear()
code2 = create_code("""db.connect('postgresql://localhost/testdb') \\
.log_info('Initializing the database.') \\
.initialize()""")
self.play(Transform(code, code2, run_time=2, rate_func=linear))
self.wait(5)
self.clear()
code = create_code("""subscribers
.filter(p => p.liked_the_video === true)
.sort(p => p.name)
.pat_on_the_back()""", ext='js')
self.play(ShowCreation(code, run_time=10, rate_func=linear))
self.wait(5)
|
test/test_custom_loss.py | ignatovmg/mhcflurry | 113 | 11077824 | import logging
logging.getLogger('tensorflow').disabled = True
logging.getLogger('matplotlib').disabled = True
from nose.tools import eq_, assert_less, assert_greater, assert_almost_equal
import numpy
numpy.random.seed(0)
import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow.keras.backend as K
from mhcflurry.custom_loss import CUSTOM_LOSSES, MultiallelicMassSpecLoss
from mhcflurry.testing_utils import cleanup, startup
teardown = cleanup
setup = startup
def evaluate_loss(loss, y_true, y_pred):
y_true = numpy.array(y_true)
y_pred = numpy.array(y_pred)
if y_pred.ndim == 1:
y_pred = y_pred.reshape((len(y_pred), 1))
if y_true.ndim == 1:
y_true = y_true.reshape((len(y_true), 1))
assert y_true.ndim == 2
assert y_pred.ndim == 2
assert K.backend() == "tensorflow"
import tensorflow.compat.v1 as v1
v1.disable_eager_execution()
session = v1.keras.backend.get_session()
y_true_var = K.constant(y_true, name="y_true")
y_pred_var = K.constant(y_pred, name="y_pred")
result = loss(y_true_var, y_pred_var)
return result.eval(session=session)
def test_mse_with_inequalities(loss_obj=CUSTOM_LOSSES['mse_with_inequalities']):
y_values = [0.0, 0.5, 0.8, 1.0]
adjusted_y = loss_obj.encode_y(y_values)
print(adjusted_y)
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, y_values)
print(loss0)
eq_(loss0, 0.0)
adjusted_y = loss_obj.encode_y(y_values, [">", ">", ">", ">"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, y_values)
eq_(loss0, 0.0)
adjusted_y = loss_obj.encode_y(y_values, ["<", "<", "<", "<"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, y_values)
eq_(loss0, 0.0)
adjusted_y = loss_obj.encode_y(y_values, ["=", "<", "=", ">"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, y_values)
eq_(loss0, 0.0)
adjusted_y = loss_obj.encode_y(y_values, ["=", "<", "=", ">"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, [0.0, 0.4, 0.8, 1.0])
eq_(loss0, 0.0)
adjusted_y = loss_obj.encode_y(y_values, [">", "<", ">", ">"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, [0.1, 0.4, 0.9, 1.0])
eq_(loss0, 0.0)
adjusted_y = loss_obj.encode_y(y_values, [">", "<", ">", ">"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, [0.1, 0.6, 0.9, 1.0])
assert_greater(loss0, 0.0)
adjusted_y = loss_obj.encode_y(y_values, ["=", "<", ">", ">"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, [0.1, 0.6, 0.9, 1.0])
assert_almost_equal(loss0, 0.02 / 4)
adjusted_y = loss_obj.encode_y(y_values, ["=", "<", "=", ">"])
loss0 = evaluate_loss(loss_obj.loss, adjusted_y, [0.1, 0.6, 0.9, 1.0])
assert_almost_equal(loss0, 0.03 / 4)
def test_mse_with_inequalities_and_multiple_outputs():
loss_obj = CUSTOM_LOSSES['mse_with_inequalities_and_multiple_outputs']
test_mse_with_inequalities(loss_obj)
y_values = [0.0, 0.5, 0.8, 1.0]
adjusted_y = loss_obj.encode_y(
y_values, output_indices=[0, 1, 1, 1])
loss0 = evaluate_loss(
loss_obj.loss,
adjusted_y,
[
[0.0, 1000],
[2000, 0.5],
[3000, 0.8],
[4000, 1.0],
])
assert_almost_equal(loss0, 0.0)
y_values = [0.0, 0.5, 0.8, 1.0]
adjusted_y = loss_obj.encode_y(
y_values, output_indices=[0, 1, 1, 0])
loss0 = evaluate_loss(
loss_obj.loss,
adjusted_y,
[
[0.1, 1000],
[2000, 0.6],
[3000, 0.8],
[1.0, 4000],
])
assert_almost_equal(loss0, 0.02 / 4)
y_values = [0.0, 0.5, 0.8, 1.0]
adjusted_y = loss_obj.encode_y(
y_values, output_indices=[0, 1, 1, 0], inequalities=["=", ">", "<", "<"])
loss0 = evaluate_loss(
loss_obj.loss,
adjusted_y,
[
[0.1, 1000],
[2000, 0.6],
[3000, 0.8],
[1.0, 4000],
])
assert_almost_equal(loss0, 0.01 / 4)
y_values = [0.0, 0.5, 0.8, 1.0]
adjusted_y = loss_obj.encode_y(
y_values, output_indices=[0, 1, 1, 0], inequalities=["=", "<", "<", "<"])
loss0 = evaluate_loss(
loss_obj.loss,
adjusted_y,
[
[0.1, 1000],
[2000, 0.6],
[3000, 0.8],
[1.0, 4000],
])
assert_almost_equal(loss0, 0.02 / 4)
def test_multiallelic_mass_spec_loss():
for delta in [0.0, 0.3]:
print("delta", delta)
# Hit labels
y_true = [
1.0,
0.0,
1.0,
-1.0, # ignored
1.0,
0.0,
1.0,
]
y_true = numpy.array(y_true)
y_pred = [
[0.3, 0.7, 0.5],
[0.2, 0.4, 0.6],
[0.1, 0.5, 0.3],
[0.9, 0.1, 0.2],
[0.1, 0.7, 0.1],
[0.8, 0.2, 0.4],
[0.1, 0.2, 0.4],
]
y_pred = numpy.array(y_pred)
# reference implementation 1
def smooth_max(x, alpha):
x = numpy.array(x)
alpha = numpy.array([alpha])
return (x * numpy.exp(x * alpha)).sum() / (
numpy.exp(x * alpha)).sum()
contributions = []
for i in range(len(y_true)):
if y_true[i] == 1.0:
for j in range(len(y_true)):
if y_true[j] == 0.0:
tightest_i = max(y_pred[i])
for k in range(y_pred.shape[1]):
contribution = max(
0, y_pred[j, k] - tightest_i + delta)**2
contributions.append(contribution)
contributions = numpy.array(contributions)
expected1 = contributions.sum() / len(contributions)
# reference implementation 2: numpy
pos = numpy.array([
max(y_pred[i])
for i in range(len(y_pred))
if y_true[i] == 1.0
])
neg = y_pred[(y_true == 0.0).astype(bool)]
term = neg.reshape((-1, 1)) - pos + delta
expected2 = (
numpy.maximum(0, term)**2).sum() / (
len(pos) * neg.shape[0] * neg.shape[1])
numpy.testing.assert_almost_equal(expected1, expected2)
computed = evaluate_loss(
MultiallelicMassSpecLoss(delta=delta).loss,
y_true,
y_pred.reshape(y_pred.shape))
numpy.testing.assert_almost_equal(computed, expected1, 4)
|
atlas/foundations_events/src/test/consumers/jobs/queued/test_run_data_keys.py | DeepLearnI/atlas | 296 | 11077831 | <reponame>DeepLearnI/atlas<filename>atlas/foundations_events/src/test/consumers/jobs/queued/test_run_data_keys.py
import unittest
from mock import Mock
from foundations_events.consumers.jobs.queued.run_data_keys import RunDataKeys
class TestRunDataKeys(unittest.TestCase):
def setUp(self):
self._redis = Mock()
self._consumer = RunDataKeys(self._redis)
def test_call_saved_run_data_keys(self):
input_data = {'project_name': 'here be dragons',
'job_parameters': {'number_of_neurons': 3434}
}
self._consumer.call(input_data, None, None)
self._redis.sadd.assert_called_with('projects:here be dragons:job_parameter_names', 'number_of_neurons')
def test_call_saved_run_data_keys_different_keys(self):
input_data = {'project_name': 'here be dragons',
'job_parameters': {'hidden_layers': 7777}
}
self._consumer.call(input_data, None, None)
self._redis.sadd.assert_called_with('projects:here be dragons:job_parameter_names', 'hidden_layers')
def test_call_saved_run_data_keys_multiple_keys(self):
input_data = {'project_name': 'here be dragons',
'job_parameters': {'shown_layers': 7777, 'neurons': 33}
}
self._consumer.call(input_data, None, None)
self._redis.sadd.assert_any_call('projects:here be dragons:job_parameter_names', 'shown_layers')
self._redis.sadd.assert_any_call('projects:here be dragons:job_parameter_names', 'neurons')
def test_call_saved_run_data_keys_different_project_name(self):
input_data = {'project_name': 'here be sheep',
'job_parameters': {'hidden_layers': 7777}
}
self._consumer.call(input_data, None, None)
self._redis.sadd.assert_called_with('projects:here be sheep:job_parameter_names', 'hidden_layers')
|
packages/pytea/pylib/collections.py | Sehun0819/pytea | 241 | 11077865 | <filename>packages/pytea/pylib/collections.py<gh_stars>100-1000
import LibCall
def namedtuple(typename, field_names, **kwargs):
names = field_names
if isinstance(field_names, str):
names = []
LibCall.builtins.namedtuple_pushField(names, field_names)
rev_names = dict()
for i, name in enumerate(names):
rev_names[name] = i
class __TempTuple(tuple):
def __init__(self, *args, **kwargs):
for i, value in enumerate(args):
LibCall.builtins.setIndice(self, i, value)
LibCall.builtins.setAttr(self, names[i], value)
for key, value in kwargs.items():
LibCall.builtins.setIndice(self, rev_names[key], value)
LibCall.builtins.setAttr(self, key, value)
def __len__(self):
return len(names)
__TempTuple.__name__ = typename
return __TempTuple
|
slybot/slybot/starturls/__init__.py | hackrush01/portia | 6,390 | 11077881 | <reponame>hackrush01/portia
from collections import OrderedDict as ODict
from itertools import chain, product
from scrapy.utils.spider import arg_to_iter
import six
from six.moves.urllib_parse import urlparse
from .feed_generator import FeedGenerator
from .fragment_generator import FragmentGenerator
from .generated_url import GeneratedUrl
from .generator import IdentityGenerator, UrlGenerator
class StartUrlCollection(object):
def __init__(self, start_urls, generators=None):
self.generators = generators or []
self.start_urls = [self._from_type(url) for url in start_urls]
def __iter__(self):
generated = (self._generate_urls(url) for url in self.start_urls)
for url in chain(*(arg_to_iter(g) for g in generated)):
yield url
def uniq(self):
return list(ODict([(s.key, s.spec) for s in self.start_urls]).values())
@property
def allowed_domains(self):
domains = [start_url.allowed_domains for start_url in self.start_urls]
return list(set(chain(*domains)))
def normalize(self):
return [start_url.normalized for start_url in self.start_urls]
def _generate_urls(self, start_url):
generator = self.generators[start_url.generator_type]
return generator(start_url.generator_value)
def _from_type(self, start_url):
if isinstance(start_url, six.string_types):
return StringUrl(start_url)
if start_url.get('paths') or start_url.get('template'):
return GeneratedUrl(start_url)
return StartUrl(start_url, self.generators)
class StartUrl(object):
def __init__(self, spec, generators):
self.spec = spec
self.generators = generators
self.generator_type = spec['type']
self.generator_value = self.spec if self._has_fragments else self.spec['url']
@property
def key(self):
fragments = self.spec.get('fragments', [])
fragment_values = [fragment['value'] for fragment in fragments]
return self.spec['url'] + ''.join(fragment_values)
@property
def allowed_domains(self):
if self._has_fragments:
return self._find_fragment_domains()
return [self.spec['url']]
@property
def normalized(self):
return self.spec
def _find_fragment_domains(self):
generator = self.generators[self.generator_type]
fragment_lists = list(generator.process_fragments(self.spec))
while len(fragment_lists) > 0:
fragment_list = fragment_lists.pop(0)
if all(self._has_domain(fragment) for fragment in fragment_list):
return fragment_list
if len(fragment_lists) == 0:
return []
augmented_first_fragments = product(fragment_list, fragment_lists[0])
fragment_lists[0] = self._join_fragments(augmented_first_fragments)
return []
def _join_fragments(self, fragments):
return [''.join([f, g]) for (f, g) in fragments]
def _has_domain(self, url):
parsed_url = urlparse(url)
methods = ['path', 'params', 'query', 'fragment']
return any(getattr(parsed_url, method) != '' for method in methods)
@property
def _has_fragments(self):
return self.spec.get('fragments')
class StringUrl(object):
def __init__(self, spec):
self.key = spec
self.spec = spec
self.generator_value = spec
self.generator_type = 'start_urls'
@property
def allowed_domains(self):
return [self.spec]
@property
def normalized(self):
return {'url': self.spec, 'type': 'url'}
|
applications/tensorflow/cnns/training/Models/resnet.py | payoto/graphcore_examples | 260 | 11077888 | <gh_stars>100-1000
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from . import resnet_base as rb
from .model_base import ModelBase
from functools import partial
class ResNet(rb.ResNetBase):
def __init__(self, opts, is_training=True):
if opts['dataset'] == 'imagenet':
definitions = {**rb.RESNETS_Imagenet, **rb.RESNETS_Bottleneck_Imagenet}
else:
definitions = {**rb.RESNETS_Cifar, **rb.RESNETS_Bottleneck_Cifar}
definition = definitions[opts["model_size"]]
super().__init__(opts, definition, is_training)
def Model(opts, training, image):
return ResNet(opts, training)(image)
def staged_model(opts):
splits = opts['pipeline_splits']
x = ResNet(opts, True)
if splits is None or (
len(splits) != opts['shards'] - 1 and opts['shards'] > 1):
possible_splits = [
s.keywords['name'] for s in x._build_function_list()
if 'relu' in s.keywords['name']
]
raise ValueError(
"--pipeline-splits not specified or wrong number of splits. Need {} of {}".format(
opts['shards'] - 1, possible_splits))
splits.append(None)
stages = [partial(x.first_stage, first_split_name=splits[0])]
for i in range(len(splits) - 1):
stages.append(
partial(x.later_stage,
prev_split_name=splits[i],
end_split_name=splits[i + 1]))
return stages
def add_arguments(parser):
group = parser.add_argument_group('ResNet')
rb.add_resnet_base_arguments(group)
return parser
def set_defaults(opts):
opts['summary_str'] += "ResNet-{model_size}\n"
if opts["dataset"] == "imagenet":
opts["shortcut_type"] = "B"
elif "cifar" in opts["dataset"]:
opts["shortcut_type"] = "A"
# set ImageNet specific defaults
if opts["dataset"] == "imagenet":
if opts.get("weight_decay") is None:
# value taken from tf_official_resnet - may not be appropriate for
# small batch sizes
wd_default = 0 if opts.get("optimiser") == "LARS" else 1e-4
opts["weight_decay"] = wd_default
if opts.get("lars_weight_decay") is None:
opts["lars_weight_decay"] = 1e-4
if not opts.get("base_learning_rate_exponent"):
if opts["optimiser"] == "SGD":
opts["base_learning_rate_exponent"] = -8
elif opts["optimiser"] == "momentum":
opts["base_learning_rate_exponent"] = -11
if not opts.get("epochs") and not opts.get("iterations"):
opts["epochs"] = 100
if not opts.get("learning_rate_schedule"):
opts["learning_rate_schedule"] = [0.3, 0.6, 0.8, 0.9]
if not opts.get("learning_rate_decay"):
opts["learning_rate_decay"] = [1.0, 0.1, 0.01, 0.001, 1e-4]
if not (
opts.get("group_norm") is True or opts.get("batch_norm") is True
):
# set group norm as default for ImageNet
opts["group_norm"] = True
if opts.get("group_norm"):
if not opts.get("groups"):
opts["groups"] = 32
if not opts.get("model_size"):
opts["model_size"] = 18
if not opts.get("micro_batch_size"):
opts["micro_batch_size"] = 4
if opts.get("warmup") is None:
# warmup on by default for ImageNet
opts["warmup"] = True
# exclude beta and gamma from weight decay calculation
opts["wd_exclude"] = ["beta", "gamma"]
# set CIFAR specific defaults
elif "cifar" in opts["dataset"]:
if opts.get("weight_decay") is None:
# based on sweep with CIFAR-10
wd_default = 0 if opts.get("optimiser") == "LARS" else 1e-6
opts["weight_decay"] = wd_default
if opts.get("lars_weight_decay") is None:
opts["lars_weight_decay"] = 1e-6
if not opts.get("base_learning_rate_exponent"):
opts["base_learning_rate_exponent"] = -6
if not opts.get("epochs") and not opts.get("iterations"):
opts["epochs"] = 160
if not opts.get("learning_rate_schedule"):
opts["learning_rate_schedule"] = [0.5, 0.75]
if not opts.get("learning_rate_decay"):
opts["learning_rate_decay"] = [1.0, 0.1, 0.01]
if not (
opts.get("group_norm") is True or opts.get("batch_norm") is True
):
# set batch norm as default for CIFAR
opts["batch_norm"] = True
if opts.get("group_norm"):
if not opts.get("groups"):
opts["groups"] = 16
if not opts.get("model_size"):
opts["model_size"] = 20
if not opts.get("micro_batch_size"):
opts["micro_batch_size"] = 32
if not opts["BN_decay"]:
opts["BN_decay"] = 0.97
opts["name"] = "RN{}".format(opts["model_size"])
opts["name"] += "_bs{}".format(opts["micro_batch_size"])
if opts.get("replicas") > 1:
opts["name"] += "x{}r".format(opts["replicas"])
if opts["pipeline"]:
opts["name"] += "x{}p".format(opts["gradient_accumulation_count"])
elif opts.get("gradient_accumulation_count") > 1:
opts["name"] += "x{}a".format(opts["gradient_accumulation_count"])
if not (opts["batch_norm"] or opts["group_norm"]):
opts["name"] += "_noBN"
opts["summary_str"] += " No Batch Norm\n"
elif opts["group_norm"]:
opts["name"] += "_GN{}".format(opts["groups"])
opts["summary_str"] += " Group Norm\n" " {groups} groups\n"
else:
opts["name"] += "_BN"
opts["summary_str"] += " Batch Norm\n"
if (
opts["BN_decay"] and opts["BN_decay"] != 0.97
): # defined and not default
opts["summary_str"] += " Decay: {}\n".format(opts["BN_decay"])
opts["name"] += "_{}{}".format(
opts["precision"], "_noSR" if opts["no_stochastic_rounding"] else ""
)
|
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/streams/models.py | kaka-lin/azure-intelligent-edge-patterns | 176 | 11077891 | """App models.
"""
import logging
import threading
import time
import cv2
from configs.general_configs import PRINT_THREAD
from ..cameras.utils import normalize_rtsp, verify_rtsp
from .exceptions import StreamOpenRTSPError
logger = logging.getLogger(__name__)
# Stream
KEEP_ALIVE_THRESHOLD = 10 # Seconds
# Stream Manager
STREAM_GC_TIME_THRESHOLD = 5 # Seconds
class Stream:
"""Stream Class"""
def __init__(self, rtsp, camera_id, part_id=None):
self.rtsp = normalize_rtsp(rtsp=rtsp)
self.camera_id = camera_id
self.part_id = part_id
self.last_active = time.time()
self.status = "init"
self.cur_img_index = 0
self.last_get_img_index = 1
self.id = id(self)
# test rtsp
if not verify_rtsp(self.rtsp):
raise StreamOpenRTSPError
self.cap = cv2.VideoCapture(self.rtsp)
self.last_img = self.cap.read()[1]
def update_keep_alive(self):
"""update_keep_alive."""
self.last_active = time.time()
def gen(self):
"""generator for stream."""
self.status = "running"
logger.info("Start streaming with %s.", self.rtsp)
while self.status == "running" and (
self.last_active + KEEP_ALIVE_THRESHOLD > time.time()
):
if not self.cap.isOpened():
raise StreamOpenRTSPError
has_img, img = self.cap.read()
# Need to add the video flag FIXME
if not has_img:
self.cap = cv2.VideoCapture(self.rtsp)
time.sleep(1)
continue
img = cv2.resize(img, None, fx=0.5, fy=0.5)
self.last_active = time.time()
self.last_img = img.copy()
self.cur_img_index = (self.cur_img_index + 1) % 10000
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n"
+ cv2.imencode(".jpg", img)[1].tobytes()
+ b"\r\n"
)
self.cap.release()
logger.info("%s cap released.", self)
def get_frame(self):
"""get_frame."""
logger.info("%s get frame.", self)
# b, img = self.cap.read()
time_begin = time.time()
while True:
if time.time() - time_begin > 5:
break
if self.last_get_img_index == self.cur_img_index:
time.sleep(0.01)
else:
break
self.last_get_img_index = self.cur_img_index
img = self.last_img.copy()
# if b: return cv2.imencode('.jpg', img)[1].tobytes()
# else : return None
return cv2.imencode(".jpg", img)[1].tobytes()
def close(self):
"""close.
close the stream.
"""
self.status = "stopped"
logger.info("%s stopped.", self)
def __str__(self):
return f"<Stream id:{self.id} rtsp:{self.rtsp}>"
def __repr__(self):
return f"<Stream id:{self.id} rtsp:{self.rtsp}>"
class StreamManager:
"""StreamManager"""
def __init__(self):
self.streams = []
self.mutex = threading.Lock()
self.gc()
def add(self, stream: Stream):
"""add stream"""
self.mutex.acquire()
self.streams.append(stream)
self.mutex.release()
def get_stream_by_id(self, stream_id):
"""get_stream_by_id"""
self.mutex.acquire()
for i in range(len(self.streams)):
stream = self.streams[i]
if stream.id == stream_id:
self.mutex.release()
return stream
self.mutex.release()
return None
def gc(self):
"""Garbage collector
IMPORTANT, autoreloader will not reload threading,
please restart the server if you modify the thread
"""
def _gc(self):
while True:
self.mutex.acquire()
if PRINT_THREAD:
logger.info("streams: %s", self.streams)
to_delete = []
for stream in self.streams:
if stream.last_active + STREAM_GC_TIME_THRESHOLD < time.time():
# stop the inactive stream
# (the ones users didnt click disconnect)
logger.info("stream %s inactive", stream)
logger.info("Time now %s", time.time())
logger.info("Stream alive through %s", stream.last_active)
stream.close()
# collect the stream, to delete later
to_delete.append(stream)
for stream in to_delete:
self.streams.remove(stream)
self.mutex.release()
time.sleep(3)
threading.Thread(target=_gc, args=(self,), daemon=True).start()
|
manila/policies/share_snapshot.py | openstack/manila | 159 | 11077940 | <reponame>openstack/manila
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from manila.policies import base
BASE_POLICY_NAME = 'share_snapshot:%s'
DEPRECATED_REASON = """
The share snapshot API now supports system scope and default roles.
"""
deprecated_snapshot_get = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'get_snapshot',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_get_all = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'get_all_snapshots',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_force_delete = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'force_delete',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_manage = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'manage_snapshot',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_unmanage = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'unmanage_snapshot',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_reset_status = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'reset_status',
check_str=base.RULE_ADMIN_API,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_access_list = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'access_list',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_allow_access = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'allow_access',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_snapshot_deny_access = policy.DeprecatedRule(
name=BASE_POLICY_NAME % 'deny_access',
check_str=base.RULE_DEFAULT,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
share_snapshot_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'get_snapshot',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description="Get share snapshot.",
operations=[
{
'method': 'GET',
'path': '/snapshots/{snapshot_id}'
}
],
deprecated_rule=deprecated_snapshot_get
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'get_all_snapshots',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description="Get all share snapshots.",
operations=[
{
'method': 'GET',
'path': '/snapshots'
},
{
'method': 'GET',
'path': '/snapshots/detail'
},
{
'method': 'GET',
'path': '/snapshots?{query}'
},
{
'method': 'GET',
'path': '/snapshots/detail?{query}'
}
],
deprecated_rule=deprecated_snapshot_get_all
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'force_delete',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_ADMIN,
scope_types=['system', 'project'],
description="Force Delete a share snapshot.",
operations=[
{
'method': 'DELETE',
'path': '/snapshots/{snapshot_id}'
}
],
deprecated_rule=deprecated_snapshot_force_delete
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'manage_snapshot',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description="Manage share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/manage'
}
],
deprecated_rule=deprecated_snapshot_manage
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'unmanage_snapshot',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description="Unmanage share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action'
}
],
deprecated_rule=deprecated_snapshot_unmanage
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'reset_status',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_ADMIN,
scope_types=['system', 'project'],
description="Reset status.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action',
}
],
deprecated_rule=deprecated_snapshot_reset_status
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'access_list',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description="List access rules of a share snapshot.",
operations=[
{
'method': 'GET',
'path': '/snapshots/{snapshot_id}/access-list'
}
],
deprecated_rule=deprecated_snapshot_access_list
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'allow_access',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Allow access to a share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action'
}
],
deprecated_rule=deprecated_snapshot_allow_access
),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'deny_access',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Deny access to a share snapshot.",
operations=[
{
'method': 'POST',
'path': '/snapshots/{snapshot_id}/action'
}
],
deprecated_rule=deprecated_snapshot_deny_access
),
]
def list_rules():
return share_snapshot_policies
|
008-osdev-01/build.py | gynvael/stream | 152 | 11077995 | <filename>008-osdev-01/build.py
import os
cmds_to_run = [
"nasm stage1.asm",
"nasm stage2.asm"
]
files_to_img = [
"stage1",
"stage2"
]
for cmd in cmds_to_run:
os.system(cmd)
buf = []
for fn in files_to_img:
with open(fn, "rb") as f:
buf.append(f.read())
# XXX: nie wyrownuje do wielkosci sektora
with open("floppy.bin", "wb") as f:
f.write(''.join(buf))
|
koalixcrm/crm/factories/factory_customer_group.py | Cataldir/koalixcrm | 290 | 11077997 | # -*- coding: utf-8 -*-
import factory
from koalixcrm.crm.models import CustomerGroup
class StandardCustomerGroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = CustomerGroup
django_get_or_create = ('name',)
name = factory.Sequence(lambda n: "Customer Group #%s" % n)
class AdvancedCustomerGroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = CustomerGroup
name = factory.Sequence(lambda n: "Customer Group #%s" % n)
|
pgcontents/utils/sync.py | freedom079215/pgcontents | 138 | 11078029 | """
Utilities for synchronizing directories.
"""
from __future__ import (
print_function,
unicode_literals,
)
from ..checkpoints import PostgresCheckpoints
from ..crypto import FallbackCrypto
from ..query import (
list_users,
reencrypt_user_content,
)
def create_user(db_url, user):
"""
Create a user.
"""
PostgresCheckpoints(
db_url=db_url,
user_id=user,
create_user_on_startup=True,
)
def _separate_dirs_files(models):
"""
Split an iterable of models into a list of file paths and a list of
directory paths.
"""
dirs = []
files = []
for model in models:
if model['type'] == 'directory':
dirs.append(model['path'])
else:
files.append(model['path'])
return dirs, files
def walk(mgr):
"""
Like os.walk, but written in terms of the ContentsAPI.
Takes a ContentsManager and returns a generator of tuples of the form:
(directory name, [subdirectories], [files in directory])
"""
return walk_dirs(mgr, [''])
def walk_dirs(mgr, dirs):
"""
Recursive helper for walk.
"""
for directory in dirs:
children = mgr.get(
directory,
content=True,
type='directory',
)['content']
dirs, files = map(sorted, _separate_dirs_files(children))
yield directory, dirs, files
if dirs:
for entry in walk_dirs(mgr, dirs):
yield entry
def walk_files(mgr):
"""
Iterate over all files visible to ``mgr``.
"""
for dir_, subdirs, files in walk_files(mgr):
for file_ in files:
yield file_
def walk_files_with_content(mgr):
"""
Iterate over the contents of all files visible to ``mgr``.
"""
for _, _, files in walk(mgr):
for f in files:
yield mgr.get(f, content=True)
def all_user_ids(engine):
"""
Get a list of user_ids from an engine.
"""
with engine.begin() as db:
return [row[0] for row in list_users(db)]
def reencrypt_all_users(engine,
old_crypto_factory,
new_crypto_factory,
logger):
"""
Re-encrypt data for all users.
This function is idempotent, meaning that it should be possible to apply
the same re-encryption process multiple times without having any effect on
the database. Idempotency is achieved by first attempting to decrypt with
the old crypto and falling back to the new crypto on failure.
An important consequence of this strategy is that **decrypting** a database
is not supported with this function, because ``NoEncryption.decrypt``
always succeeds. To decrypt an already-encrypted database, use
``unencrypt_all_users`` instead.
It is, however, possible to perform an initial encryption of a database by
passing a function returning a ``NoEncryption`` as ``old_crypto_factory``.
Parameters
----------
engine : SQLAlchemy.engine
Engine encapsulating database connections.
old_crypto_factory : function[str -> Any]
A function from user_id to an object providing the interface required
by PostgresContentsManager.crypto. Results of this will be used for
decryption of existing database content.
new_crypto_factory : function[str -> Any]
A function from user_id to an object providing the interface required
by PostgresContentsManager.crypto. Results of this will be used for
re-encryption of database content.
This **must not** return instances of ``NoEncryption``. Use
``unencrypt_all_users`` if you want to unencrypt a database.
logger : logging.Logger, optional
A logger to user during re-encryption.
See Also
--------
reencrypt_user
unencrypt_all_users
"""
logger.info("Beginning re-encryption for all users.")
for user_id in all_user_ids(engine):
reencrypt_single_user(
engine,
user_id,
old_crypto=old_crypto_factory(user_id),
new_crypto=new_crypto_factory(user_id),
logger=logger,
)
logger.info("Finished re-encryption for all users.")
def reencrypt_single_user(engine, user_id, old_crypto, new_crypto, logger):
"""
Re-encrypt all files and checkpoints for a single user.
"""
# Use FallbackCrypto so that we're re-entrant if we halt partway through.
crypto = FallbackCrypto([new_crypto, old_crypto])
reencrypt_user_content(
engine=engine,
user_id=user_id,
old_decrypt_func=crypto.decrypt,
new_encrypt_func=crypto.encrypt,
logger=logger,
)
def unencrypt_all_users(engine, old_crypto_factory, logger):
"""
Unencrypt data for all users.
Parameters
----------
engine : SQLAlchemy.engine
Engine encapsulating database connections.
old_crypto_factory : function[str -> Any]
A function from user_id to an object providing the interface required
by PostgresContentsManager.crypto. Results of this will be used for
decryption of existing database content.
logger : logging.Logger, optional
A logger to user during re-encryption.
"""
logger.info("Beginning re-encryption for all users.")
for user_id in all_user_ids(engine):
unencrypt_single_user(
engine=engine,
user_id=user_id,
old_crypto=old_crypto_factory(user_id),
logger=logger,
)
logger.info("Finished re-encryption for all users.")
def unencrypt_single_user(engine, user_id, old_crypto, logger):
"""
Unencrypt all files and checkpoints for a single user.
"""
reencrypt_user_content(
engine=engine,
user_id=user_id,
old_decrypt_func=old_crypto.decrypt,
new_encrypt_func=lambda s: s,
logger=logger,
)
|
sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/models/_models_py3.py | praveenkuttappan/azure-sdk-for-python | 2,728 | 11078043 | <filename>sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/models/_models_py3.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._net_app_management_client_enums import *
class AccountEncryption(msrest.serialization.Model):
"""Encryption settings.
:param key_source: Encryption Key Source. Possible values are: 'Microsoft.NetApp'.
:type key_source: str
"""
_attribute_map = {
'key_source': {'key': 'keySource', 'type': 'str'},
}
def __init__(
self,
*,
key_source: Optional[str] = None,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.key_source = key_source
class ActiveDirectory(msrest.serialization.Model):
"""Active Directory.
Variables are only populated by the server, and will be ignored when sending a request.
:param active_directory_id: Id of the Active Directory.
:type active_directory_id: str
:param username: Username of Active Directory domain administrator.
:type username: str
:param password: Plain text password of Active Directory domain administrator, value is masked
in the response.
:type password: str
:param domain: Name of the Active Directory domain.
:type domain: str
:param dns: Comma separated list of DNS server IP addresses (IPv4 only) for the Active
Directory domain.
:type dns: str
:ivar status: Status of the Active Directory. Possible values include: "Created", "InUse",
"Deleted", "Error", "Updating".
:vartype status: str or ~azure.mgmt.netapp.models.ActiveDirectoryStatus
:ivar status_details: Any details in regards to the Status of the Active Directory.
:vartype status_details: str
:param smb_server_name: NetBIOS name of the SMB server. This name will be registered as a
computer account in the AD and used to mount volumes.
:type smb_server_name: str
:param organizational_unit: The Organizational Unit (OU) within the Windows Active Directory.
:type organizational_unit: str
:param site: The Active Directory site the service will limit Domain Controller discovery to.
:type site: str
:param backup_operators: Users to be added to the Built-in Backup Operator active directory
group. A list of unique usernames without domain specifier.
:type backup_operators: list[str]
:param administrators: Users to be added to the Built-in Administrators active directory group.
A list of unique usernames without domain specifier.
:type administrators: list[str]
:param kdc_ip: kdc server IP addresses for the active directory machine. This optional
parameter is used only while creating kerberos volume.
:type kdc_ip: str
:param ad_name: Name of the active directory machine. This optional parameter is used only
while creating kerberos volume.
:type ad_name: str
:param server_root_ca_certificate: When LDAP over SSL/TLS is enabled, the LDAP client is
required to have base64 encoded Active Directory Certificate Service's self-signed root CA
certificate, this optional parameter is used only for dual protocol with LDAP user-mapping
volumes.
:type server_root_ca_certificate: str
:param aes_encryption: If enabled, AES encryption will be enabled for SMB communication.
:type aes_encryption: bool
:param ldap_signing: Specifies whether or not the LDAP traffic needs to be signed.
:type ldap_signing: bool
:param security_operators: Domain Users in the Active directory to be given SeSecurityPrivilege
privilege (Needed for SMB Continuously available shares for SQL). A list of unique usernames
without domain specifier.
:type security_operators: list[str]
:param ldap_over_tls: Specifies whether or not the LDAP traffic needs to be secured via TLS.
:type ldap_over_tls: bool
:param allow_local_nfs_users_with_ldap: If enabled, NFS client local users can also (in
addition to LDAP users) access the NFS volumes.
:type allow_local_nfs_users_with_ldap: bool
"""
_validation = {
'dns': {'pattern': r'^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)((, ?)(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))*$'},
'status': {'readonly': True},
'status_details': {'readonly': True},
'kdc_ip': {'pattern': r'^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)((, ?)(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))*$'},
'ad_name': {'max_length': 64, 'min_length': 1},
'server_root_ca_certificate': {'max_length': 10240, 'min_length': 1},
}
_attribute_map = {
'active_directory_id': {'key': 'activeDirectoryId', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'dns': {'key': 'dns', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'status_details': {'key': 'statusDetails', 'type': 'str'},
'smb_server_name': {'key': 'smbServerName', 'type': 'str'},
'organizational_unit': {'key': 'organizationalUnit', 'type': 'str'},
'site': {'key': 'site', 'type': 'str'},
'backup_operators': {'key': 'backupOperators', 'type': '[str]'},
'administrators': {'key': 'administrators', 'type': '[str]'},
'kdc_ip': {'key': 'kdcIP', 'type': 'str'},
'ad_name': {'key': 'adName', 'type': 'str'},
'server_root_ca_certificate': {'key': 'serverRootCACertificate', 'type': 'str'},
'aes_encryption': {'key': 'aesEncryption', 'type': 'bool'},
'ldap_signing': {'key': 'ldapSigning', 'type': 'bool'},
'security_operators': {'key': 'securityOperators', 'type': '[str]'},
'ldap_over_tls': {'key': 'ldapOverTLS', 'type': 'bool'},
'allow_local_nfs_users_with_ldap': {'key': 'allowLocalNfsUsersWithLdap', 'type': 'bool'},
}
def __init__(
self,
*,
active_directory_id: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
domain: Optional[str] = None,
dns: Optional[str] = None,
smb_server_name: Optional[str] = None,
organizational_unit: Optional[str] = "CN=Computers",
site: Optional[str] = None,
backup_operators: Optional[List[str]] = None,
administrators: Optional[List[str]] = None,
kdc_ip: Optional[str] = None,
ad_name: Optional[str] = None,
server_root_ca_certificate: Optional[str] = None,
aes_encryption: Optional[bool] = None,
ldap_signing: Optional[bool] = None,
security_operators: Optional[List[str]] = None,
ldap_over_tls: Optional[bool] = None,
allow_local_nfs_users_with_ldap: Optional[bool] = None,
**kwargs
):
super(ActiveDirectory, self).__init__(**kwargs)
self.active_directory_id = active_directory_id
self.username = username
self.password = password
self.domain = domain
self.dns = dns
self.status = None
self.status_details = None
self.smb_server_name = smb_server_name
self.organizational_unit = organizational_unit
self.site = site
self.backup_operators = backup_operators
self.administrators = administrators
self.kdc_ip = kdc_ip
self.ad_name = ad_name
self.server_root_ca_certificate = server_root_ca_certificate
self.aes_encryption = aes_encryption
self.ldap_signing = ldap_signing
self.security_operators = security_operators
self.ldap_over_tls = ldap_over_tls
self.allow_local_nfs_users_with_ldap = allow_local_nfs_users_with_ldap
class AuthorizeRequest(msrest.serialization.Model):
"""Authorize request.
:param remote_volume_resource_id: Resource id of the remote volume.
:type remote_volume_resource_id: str
"""
_attribute_map = {
'remote_volume_resource_id': {'key': 'remoteVolumeResourceId', 'type': 'str'},
}
def __init__(
self,
*,
remote_volume_resource_id: Optional[str] = None,
**kwargs
):
super(AuthorizeRequest, self).__init__(**kwargs)
self.remote_volume_resource_id = remote_volume_resource_id
class Backup(msrest.serialization.Model):
"""Backup of a Volume.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar backup_id: UUID v4 used to identify the Backup.
:vartype backup_id: str
:ivar creation_date: The creation date of the backup.
:vartype creation_date: ~datetime.datetime
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:ivar size: Size of backup.
:vartype size: long
:param label: Label for backup.
:type label: str
:ivar backup_type: Type of backup Manual or Scheduled. Possible values include: "Manual",
"Scheduled".
:vartype backup_type: str or ~azure.mgmt.netapp.models.BackupType
:ivar failure_reason: Failure reason.
:vartype failure_reason: str
:ivar volume_name: Volume name.
:vartype volume_name: str
:param use_existing_snapshot: Manual backup an already existing snapshot. This will always be
false for scheduled backups and true/false for manual backups.
:type use_existing_snapshot: bool
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'backup_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'creation_date': {'readonly': True},
'provisioning_state': {'readonly': True},
'size': {'readonly': True},
'backup_type': {'readonly': True},
'failure_reason': {'readonly': True},
'volume_name': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_id': {'key': 'properties.backupId', 'type': 'str'},
'creation_date': {'key': 'properties.creationDate', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'size': {'key': 'properties.size', 'type': 'long'},
'label': {'key': 'properties.label', 'type': 'str'},
'backup_type': {'key': 'properties.backupType', 'type': 'str'},
'failure_reason': {'key': 'properties.failureReason', 'type': 'str'},
'volume_name': {'key': 'properties.volumeName', 'type': 'str'},
'use_existing_snapshot': {'key': 'properties.useExistingSnapshot', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
label: Optional[str] = None,
use_existing_snapshot: Optional[bool] = False,
**kwargs
):
super(Backup, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.backup_id = None
self.creation_date = None
self.provisioning_state = None
self.size = None
self.label = label
self.backup_type = None
self.failure_reason = None
self.volume_name = None
self.use_existing_snapshot = use_existing_snapshot
class BackupPatch(msrest.serialization.Model):
"""Backup patch.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar backup_id: UUID v4 used to identify the Backup.
:vartype backup_id: str
:ivar creation_date: The creation date of the backup.
:vartype creation_date: ~datetime.datetime
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:ivar size: Size of backup.
:vartype size: long
:param label: Label for backup.
:type label: str
:ivar backup_type: Type of backup Manual or Scheduled. Possible values include: "Manual",
"Scheduled".
:vartype backup_type: str or ~azure.mgmt.netapp.models.BackupType
:ivar failure_reason: Failure reason.
:vartype failure_reason: str
:ivar volume_name: Volume name.
:vartype volume_name: str
:param use_existing_snapshot: Manual backup an already existing snapshot. This will always be
false for scheduled backups and true/false for manual backups.
:type use_existing_snapshot: bool
"""
_validation = {
'backup_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'creation_date': {'readonly': True},
'provisioning_state': {'readonly': True},
'size': {'readonly': True},
'backup_type': {'readonly': True},
'failure_reason': {'readonly': True},
'volume_name': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'backup_id': {'key': 'properties.backupId', 'type': 'str'},
'creation_date': {'key': 'properties.creationDate', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'size': {'key': 'properties.size', 'type': 'long'},
'label': {'key': 'properties.label', 'type': 'str'},
'backup_type': {'key': 'properties.backupType', 'type': 'str'},
'failure_reason': {'key': 'properties.failureReason', 'type': 'str'},
'volume_name': {'key': 'properties.volumeName', 'type': 'str'},
'use_existing_snapshot': {'key': 'properties.useExistingSnapshot', 'type': 'bool'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
label: Optional[str] = None,
use_existing_snapshot: Optional[bool] = False,
**kwargs
):
super(BackupPatch, self).__init__(**kwargs)
self.tags = tags
self.backup_id = None
self.creation_date = None
self.provisioning_state = None
self.size = None
self.label = label
self.backup_type = None
self.failure_reason = None
self.volume_name = None
self.use_existing_snapshot = use_existing_snapshot
class BackupPoliciesList(msrest.serialization.Model):
"""List of Backup Policies.
:param value: A list of backup policies.
:type value: list[~azure.mgmt.netapp.models.BackupPolicy]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BackupPolicy]'},
}
def __init__(
self,
*,
value: Optional[List["BackupPolicy"]] = None,
**kwargs
):
super(BackupPoliciesList, self).__init__(**kwargs)
self.value = value
class BackupPolicy(msrest.serialization.Model):
"""Backup policy information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar name_properties_name: Name of backup policy.
:vartype name_properties_name: str
:ivar backup_policy_id: Backup Policy Resource ID.
:vartype backup_policy_id: str
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:param daily_backups_to_keep: Daily backups count to keep.
:type daily_backups_to_keep: int
:param weekly_backups_to_keep: Weekly backups count to keep.
:type weekly_backups_to_keep: int
:param monthly_backups_to_keep: Monthly backups count to keep.
:type monthly_backups_to_keep: int
:ivar volumes_assigned: Volumes using current backup policy.
:vartype volumes_assigned: int
:param enabled: The property to decide policy is enabled or not.
:type enabled: bool
:ivar volume_backups: A list of volumes assigned to this policy.
:vartype volume_backups: list[~azure.mgmt.netapp.models.VolumeBackups]
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
'backup_policy_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'volumes_assigned': {'readonly': True},
'volume_backups': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'backup_policy_id': {'key': 'properties.backupPolicyId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'daily_backups_to_keep': {'key': 'properties.dailyBackupsToKeep', 'type': 'int'},
'weekly_backups_to_keep': {'key': 'properties.weeklyBackupsToKeep', 'type': 'int'},
'monthly_backups_to_keep': {'key': 'properties.monthlyBackupsToKeep', 'type': 'int'},
'volumes_assigned': {'key': 'properties.volumesAssigned', 'type': 'int'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'volume_backups': {'key': 'properties.volumeBackups', 'type': '[VolumeBackups]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
daily_backups_to_keep: Optional[int] = None,
weekly_backups_to_keep: Optional[int] = None,
monthly_backups_to_keep: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(BackupPolicy, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
self.name_properties_name = None
self.backup_policy_id = None
self.provisioning_state = None
self.daily_backups_to_keep = daily_backups_to_keep
self.weekly_backups_to_keep = weekly_backups_to_keep
self.monthly_backups_to_keep = monthly_backups_to_keep
self.volumes_assigned = None
self.enabled = enabled
self.volume_backups = None
class BackupPolicyDetails(msrest.serialization.Model):
"""Backup policy properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param location: Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar name_properties_name: Name of backup policy.
:vartype name_properties_name: str
:ivar backup_policy_id: Backup Policy Resource ID.
:vartype backup_policy_id: str
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:param daily_backups_to_keep: Daily backups count to keep.
:type daily_backups_to_keep: int
:param weekly_backups_to_keep: Weekly backups count to keep.
:type weekly_backups_to_keep: int
:param monthly_backups_to_keep: Monthly backups count to keep.
:type monthly_backups_to_keep: int
:ivar volumes_assigned: Volumes using current backup policy.
:vartype volumes_assigned: int
:param enabled: The property to decide policy is enabled or not.
:type enabled: bool
:ivar volume_backups: A list of volumes assigned to this policy.
:vartype volume_backups: list[~azure.mgmt.netapp.models.VolumeBackups]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
'backup_policy_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'volumes_assigned': {'readonly': True},
'volume_backups': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'backup_policy_id': {'key': 'properties.backupPolicyId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'daily_backups_to_keep': {'key': 'properties.dailyBackupsToKeep', 'type': 'int'},
'weekly_backups_to_keep': {'key': 'properties.weeklyBackupsToKeep', 'type': 'int'},
'monthly_backups_to_keep': {'key': 'properties.monthlyBackupsToKeep', 'type': 'int'},
'volumes_assigned': {'key': 'properties.volumesAssigned', 'type': 'int'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'volume_backups': {'key': 'properties.volumeBackups', 'type': '[VolumeBackups]'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
daily_backups_to_keep: Optional[int] = None,
weekly_backups_to_keep: Optional[int] = None,
monthly_backups_to_keep: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(BackupPolicyDetails, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.name_properties_name = None
self.backup_policy_id = None
self.provisioning_state = None
self.daily_backups_to_keep = daily_backups_to_keep
self.weekly_backups_to_keep = weekly_backups_to_keep
self.monthly_backups_to_keep = monthly_backups_to_keep
self.volumes_assigned = None
self.enabled = enabled
self.volume_backups = None
class BackupPolicyPatch(msrest.serialization.Model):
"""Backup policy Details for create and update.
Variables are only populated by the server, and will be ignored when sending a request.
:param location: Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar name_properties_name: Name of backup policy.
:vartype name_properties_name: str
:ivar backup_policy_id: Backup Policy Resource ID.
:vartype backup_policy_id: str
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:param daily_backups_to_keep: Daily backups count to keep.
:type daily_backups_to_keep: int
:param weekly_backups_to_keep: Weekly backups count to keep.
:type weekly_backups_to_keep: int
:param monthly_backups_to_keep: Monthly backups count to keep.
:type monthly_backups_to_keep: int
:ivar volumes_assigned: Volumes using current backup policy.
:vartype volumes_assigned: int
:param enabled: The property to decide policy is enabled or not.
:type enabled: bool
:ivar volume_backups: A list of volumes assigned to this policy.
:vartype volume_backups: list[~azure.mgmt.netapp.models.VolumeBackups]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'name_properties_name': {'readonly': True},
'backup_policy_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'volumes_assigned': {'readonly': True},
'volume_backups': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'backup_policy_id': {'key': 'properties.backupPolicyId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'daily_backups_to_keep': {'key': 'properties.dailyBackupsToKeep', 'type': 'int'},
'weekly_backups_to_keep': {'key': 'properties.weeklyBackupsToKeep', 'type': 'int'},
'monthly_backups_to_keep': {'key': 'properties.monthlyBackupsToKeep', 'type': 'int'},
'volumes_assigned': {'key': 'properties.volumesAssigned', 'type': 'int'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'volume_backups': {'key': 'properties.volumeBackups', 'type': '[VolumeBackups]'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
daily_backups_to_keep: Optional[int] = None,
weekly_backups_to_keep: Optional[int] = None,
monthly_backups_to_keep: Optional[int] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(BackupPolicyPatch, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.name_properties_name = None
self.backup_policy_id = None
self.provisioning_state = None
self.daily_backups_to_keep = daily_backups_to_keep
self.weekly_backups_to_keep = weekly_backups_to_keep
self.monthly_backups_to_keep = monthly_backups_to_keep
self.volumes_assigned = None
self.enabled = enabled
self.volume_backups = None
class BackupsList(msrest.serialization.Model):
"""List of Backups.
:param value: A list of Backups.
:type value: list[~azure.mgmt.netapp.models.Backup]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Backup]'},
}
def __init__(
self,
*,
value: Optional[List["Backup"]] = None,
**kwargs
):
super(BackupsList, self).__init__(**kwargs)
self.value = value
class BackupStatus(msrest.serialization.Model):
"""Backup status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar healthy: Backup health status.
:vartype healthy: bool
:ivar relationship_status: Status of the backup mirror relationship. Possible values include:
"Idle", "Transferring".
:vartype relationship_status: str or ~azure.mgmt.netapp.models.RelationshipStatus
:ivar mirror_state: The status of the backup. Possible values include: "Uninitialized",
"Mirrored", "Broken".
:vartype mirror_state: str or ~azure.mgmt.netapp.models.MirrorState
:ivar unhealthy_reason: Reason for the unhealthy backup relationship.
:vartype unhealthy_reason: str
:ivar error_message: Displays error message if the backup is in an error state.
:vartype error_message: str
:ivar last_transfer_size: Displays the last transfer size.
:vartype last_transfer_size: long
:ivar last_transfer_type: Displays the last transfer type.
:vartype last_transfer_type: str
:ivar total_transfer_bytes: Displays the total bytes transferred.
:vartype total_transfer_bytes: long
"""
_validation = {
'healthy': {'readonly': True},
'relationship_status': {'readonly': True},
'mirror_state': {'readonly': True},
'unhealthy_reason': {'readonly': True},
'error_message': {'readonly': True},
'last_transfer_size': {'readonly': True},
'last_transfer_type': {'readonly': True},
'total_transfer_bytes': {'readonly': True},
}
_attribute_map = {
'healthy': {'key': 'healthy', 'type': 'bool'},
'relationship_status': {'key': 'relationshipStatus', 'type': 'str'},
'mirror_state': {'key': 'mirrorState', 'type': 'str'},
'unhealthy_reason': {'key': 'unhealthyReason', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'last_transfer_size': {'key': 'lastTransferSize', 'type': 'long'},
'last_transfer_type': {'key': 'lastTransferType', 'type': 'str'},
'total_transfer_bytes': {'key': 'totalTransferBytes', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(BackupStatus, self).__init__(**kwargs)
self.healthy = None
self.relationship_status = None
self.mirror_state = None
self.unhealthy_reason = None
self.error_message = None
self.last_transfer_size = None
self.last_transfer_type = None
self.total_transfer_bytes = None
class BreakReplicationRequest(msrest.serialization.Model):
"""Break replication request.
:param force_break_replication: If replication is in status transferring and you want to force
break the replication, set to true.
:type force_break_replication: bool
"""
_attribute_map = {
'force_break_replication': {'key': 'forceBreakReplication', 'type': 'bool'},
}
def __init__(
self,
*,
force_break_replication: Optional[bool] = None,
**kwargs
):
super(BreakReplicationRequest, self).__init__(**kwargs)
self.force_break_replication = force_break_replication
class CapacityPool(msrest.serialization.Model):
"""Capacity pool resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar pool_id: UUID v4 used to identify the Pool.
:vartype pool_id: str
:param size: Required. Provisioned size of the pool (in bytes). Allowed values are in 4TiB
chunks (value must be multiply of 4398046511104).
:type size: long
:param service_level: Required. The service level of the file system. Possible values include:
"Standard", "Premium", "Ultra", "StandardZRS". Default value: "Premium".
:type service_level: str or ~azure.mgmt.netapp.models.ServiceLevel
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:ivar total_throughput_mibps: Total throughput of pool in Mibps.
:vartype total_throughput_mibps: float
:ivar utilized_throughput_mibps: Utilized throughput of pool in Mibps.
:vartype utilized_throughput_mibps: float
:param qos_type: The qos type of the pool. Possible values include: "Auto", "Manual".
:type qos_type: str or ~azure.mgmt.netapp.models.QosType
:param cool_access: If enabled (true) the pool can contain cool Access enabled volumes.
:type cool_access: bool
:param encryption_type: Encryption type of the capacity pool, set encryption type for data at
rest for this pool and all volumes in it. This value can only be set when creating new pool.
Possible values include: "Single", "Double". Default value: "Single".
:type encryption_type: str or ~azure.mgmt.netapp.models.EncryptionType
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
'pool_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'size': {'required': True, 'maximum': 549755813888000, 'minimum': 4398046511104},
'service_level': {'required': True},
'provisioning_state': {'readonly': True},
'total_throughput_mibps': {'readonly': True},
'utilized_throughput_mibps': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'pool_id': {'key': 'properties.poolId', 'type': 'str'},
'size': {'key': 'properties.size', 'type': 'long'},
'service_level': {'key': 'properties.serviceLevel', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'total_throughput_mibps': {'key': 'properties.totalThroughputMibps', 'type': 'float'},
'utilized_throughput_mibps': {'key': 'properties.utilizedThroughputMibps', 'type': 'float'},
'qos_type': {'key': 'properties.qosType', 'type': 'str'},
'cool_access': {'key': 'properties.coolAccess', 'type': 'bool'},
'encryption_type': {'key': 'properties.encryptionType', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
size: int,
service_level: Union[str, "ServiceLevel"] = "Premium",
tags: Optional[Dict[str, str]] = None,
qos_type: Optional[Union[str, "QosType"]] = None,
cool_access: Optional[bool] = False,
encryption_type: Optional[Union[str, "EncryptionType"]] = "Single",
**kwargs
):
super(CapacityPool, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
self.pool_id = None
self.size = size
self.service_level = service_level
self.provisioning_state = None
self.total_throughput_mibps = None
self.utilized_throughput_mibps = None
self.qos_type = qos_type
self.cool_access = cool_access
self.encryption_type = encryption_type
class CapacityPoolList(msrest.serialization.Model):
"""List of capacity pool resources.
:param value: List of Capacity pools.
:type value: list[~azure.mgmt.netapp.models.CapacityPool]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CapacityPool]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CapacityPool"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(CapacityPoolList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CapacityPoolPatch(msrest.serialization.Model):
"""Capacity pool patch resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param location: Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param size: Provisioned size of the pool (in bytes). Allowed values are in 4TiB chunks (value
must be multiply of 4398046511104).
:type size: long
:param qos_type: The qos type of the pool. Possible values include: "Auto", "Manual".
:type qos_type: str or ~azure.mgmt.netapp.models.QosType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'size': {'maximum': 549755813888000, 'minimum': 4398046511104},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'size': {'key': 'properties.size', 'type': 'long'},
'qos_type': {'key': 'properties.qosType', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
size: Optional[int] = 4398046511104,
qos_type: Optional[Union[str, "QosType"]] = None,
**kwargs
):
super(CapacityPoolPatch, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.size = size
self.qos_type = qos_type
class CheckAvailabilityResponse(msrest.serialization.Model):
"""Information regarding availability of a resource.
:param is_available: :code:`<code>true</code>` indicates name is valid and available.
:code:`<code>false</code>` indicates the name is invalid, unavailable, or both.
:type is_available: bool
:param reason: :code:`<code>Invalid</code>` indicates the name provided does not match Azure
App Service naming requirements. :code:`<code>AlreadyExists</code>` indicates that the name is
already in use and is therefore unavailable. Possible values include: "Invalid",
"AlreadyExists".
:type reason: str or ~azure.mgmt.netapp.models.InAvailabilityReasonType
:param message: If reason == invalid, provide the user with the reason why the given name is
invalid, and provide the resource naming requirements so that the user can select a valid name.
If reason == AlreadyExists, explain that resource name is already in use, and direct them to
select a different name.
:type message: str
"""
_attribute_map = {
'is_available': {'key': 'isAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
is_available: Optional[bool] = None,
reason: Optional[Union[str, "InAvailabilityReasonType"]] = None,
message: Optional[str] = None,
**kwargs
):
super(CheckAvailabilityResponse, self).__init__(**kwargs)
self.is_available = is_available
self.reason = reason
self.message = message
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
class DailySchedule(msrest.serialization.Model):
"""Daily Schedule properties.
:param snapshots_to_keep: Daily snapshot count to keep.
:type snapshots_to_keep: int
:param hour: Indicates which hour in UTC timezone a snapshot should be taken.
:type hour: int
:param minute: Indicates which minute snapshot should be taken.
:type minute: int
:param used_bytes: Resource size in bytes, current storage usage for the volume in bytes.
:type used_bytes: long
"""
_attribute_map = {
'snapshots_to_keep': {'key': 'snapshotsToKeep', 'type': 'int'},
'hour': {'key': 'hour', 'type': 'int'},
'minute': {'key': 'minute', 'type': 'int'},
'used_bytes': {'key': 'usedBytes', 'type': 'long'},
}
def __init__(
self,
*,
snapshots_to_keep: Optional[int] = None,
hour: Optional[int] = None,
minute: Optional[int] = None,
used_bytes: Optional[int] = None,
**kwargs
):
super(DailySchedule, self).__init__(**kwargs)
self.snapshots_to_keep = snapshots_to_keep
self.hour = hour
self.minute = minute
self.used_bytes = used_bytes
class Dimension(msrest.serialization.Model):
"""Dimension of blobs, possibly be blob type or access tier.
:param name: Display name of dimension.
:type name: str
:param display_name: Display name of dimension.
:type display_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
class ExportPolicyRule(msrest.serialization.Model):
"""Volume Export Policy Rule.
:param rule_index: Order index.
:type rule_index: int
:param unix_read_only: Read only access.
:type unix_read_only: bool
:param unix_read_write: Read and write access.
:type unix_read_write: bool
:param kerberos5_read_only: Kerberos5 Read only access. To be use with swagger version
2020-05-01 or later.
:type kerberos5_read_only: bool
:param kerberos5_read_write: Kerberos5 Read and write access. To be use with swagger version
2020-05-01 or later.
:type kerberos5_read_write: bool
:param kerberos5_i_read_only: Kerberos5i Read only access. To be use with swagger version
2020-05-01 or later.
:type kerberos5_i_read_only: bool
:param kerberos5_i_read_write: Kerberos5i Read and write access. To be use with swagger version
2020-05-01 or later.
:type kerberos5_i_read_write: bool
:param kerberos5_p_read_only: Kerberos5p Read only access. To be use with swagger version
2020-05-01 or later.
:type kerberos5_p_read_only: bool
:param kerberos5_p_read_write: Kerberos5p Read and write access. To be use with swagger version
2020-05-01 or later.
:type kerberos5_p_read_write: bool
:param cifs: Allows CIFS protocol.
:type cifs: bool
:param nfsv3: Allows NFSv3 protocol. Enable only for NFSv3 type volumes.
:type nfsv3: bool
:param nfsv41: Allows NFSv4.1 protocol. Enable only for NFSv4.1 type volumes.
:type nfsv41: bool
:param allowed_clients: Client ingress specification as comma separated string with IPv4 CIDRs,
IPv4 host addresses and host names.
:type allowed_clients: str
:param has_root_access: Has root access to volume.
:type has_root_access: bool
:param chown_mode: This parameter specifies who is authorized to change the ownership of a
file. restricted - Only root user can change the ownership of the file. unrestricted - Non-root
users can change ownership of files that they own. Possible values include: "Restricted",
"Unrestricted". Default value: "Restricted".
:type chown_mode: str or ~azure.mgmt.netapp.models.ChownMode
"""
_attribute_map = {
'rule_index': {'key': 'ruleIndex', 'type': 'int'},
'unix_read_only': {'key': 'unixReadOnly', 'type': 'bool'},
'unix_read_write': {'key': 'unixReadWrite', 'type': 'bool'},
'kerberos5_read_only': {'key': 'kerberos5ReadOnly', 'type': 'bool'},
'kerberos5_read_write': {'key': 'kerberos5ReadWrite', 'type': 'bool'},
'kerberos5_i_read_only': {'key': 'kerberos5iReadOnly', 'type': 'bool'},
'kerberos5_i_read_write': {'key': 'kerberos5iReadWrite', 'type': 'bool'},
'kerberos5_p_read_only': {'key': 'kerberos5pReadOnly', 'type': 'bool'},
'kerberos5_p_read_write': {'key': 'kerberos5pReadWrite', 'type': 'bool'},
'cifs': {'key': 'cifs', 'type': 'bool'},
'nfsv3': {'key': 'nfsv3', 'type': 'bool'},
'nfsv41': {'key': 'nfsv41', 'type': 'bool'},
'allowed_clients': {'key': 'allowedClients', 'type': 'str'},
'has_root_access': {'key': 'hasRootAccess', 'type': 'bool'},
'chown_mode': {'key': 'chownMode', 'type': 'str'},
}
def __init__(
self,
*,
rule_index: Optional[int] = None,
unix_read_only: Optional[bool] = None,
unix_read_write: Optional[bool] = None,
kerberos5_read_only: Optional[bool] = False,
kerberos5_read_write: Optional[bool] = False,
kerberos5_i_read_only: Optional[bool] = False,
kerberos5_i_read_write: Optional[bool] = False,
kerberos5_p_read_only: Optional[bool] = False,
kerberos5_p_read_write: Optional[bool] = False,
cifs: Optional[bool] = None,
nfsv3: Optional[bool] = None,
nfsv41: Optional[bool] = None,
allowed_clients: Optional[str] = None,
has_root_access: Optional[bool] = True,
chown_mode: Optional[Union[str, "ChownMode"]] = "Restricted",
**kwargs
):
super(ExportPolicyRule, self).__init__(**kwargs)
self.rule_index = rule_index
self.unix_read_only = unix_read_only
self.unix_read_write = unix_read_write
self.kerberos5_read_only = kerberos5_read_only
self.kerberos5_read_write = kerberos5_read_write
self.kerberos5_i_read_only = kerberos5_i_read_only
self.kerberos5_i_read_write = kerberos5_i_read_write
self.kerberos5_p_read_only = kerberos5_p_read_only
self.kerberos5_p_read_write = kerberos5_p_read_write
self.cifs = cifs
self.nfsv3 = nfsv3
self.nfsv41 = nfsv41
self.allowed_clients = allowed_clients
self.has_root_access = has_root_access
self.chown_mode = chown_mode
class FilePathAvailabilityRequest(msrest.serialization.Model):
"""File path availability request content - availability is based on the name and the subnetId.
All required parameters must be populated in order to send to Azure.
:param name: Required. File path to verify.
:type name: str
:param subnet_id: Required. The Azure Resource URI for a delegated subnet. Must have the
delegation Microsoft.NetApp/volumes.
:type subnet_id: str
"""
_validation = {
'name': {'required': True},
'subnet_id': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'subnet_id': {'key': 'subnetId', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
subnet_id: str,
**kwargs
):
super(FilePathAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.subnet_id = subnet_id
class HourlySchedule(msrest.serialization.Model):
"""Hourly Schedule properties.
:param snapshots_to_keep: Hourly snapshot count to keep.
:type snapshots_to_keep: int
:param minute: Indicates which minute snapshot should be taken.
:type minute: int
:param used_bytes: Resource size in bytes, current storage usage for the volume in bytes.
:type used_bytes: long
"""
_attribute_map = {
'snapshots_to_keep': {'key': 'snapshotsToKeep', 'type': 'int'},
'minute': {'key': 'minute', 'type': 'int'},
'used_bytes': {'key': 'usedBytes', 'type': 'long'},
}
def __init__(
self,
*,
snapshots_to_keep: Optional[int] = None,
minute: Optional[int] = None,
used_bytes: Optional[int] = None,
**kwargs
):
super(HourlySchedule, self).__init__(**kwargs)
self.snapshots_to_keep = snapshots_to_keep
self.minute = minute
self.used_bytes = used_bytes
class LogSpecification(msrest.serialization.Model):
"""Log Definition of a single resource metric.
:param name:
:type name: str
:param display_name:
:type display_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
class MetricSpecification(msrest.serialization.Model):
"""Metric specification of operation.
:param name: Name of metric specification.
:type name: str
:param display_name: Display name of metric specification.
:type display_name: str
:param display_description: Display description of metric specification.
:type display_description: str
:param unit: Unit could be Bytes or Count.
:type unit: str
:param supported_aggregation_types: Support metric aggregation type.
:type supported_aggregation_types: list[str or ~azure.mgmt.netapp.models.MetricAggregationType]
:param supported_time_grain_types: The supported time grain types for the metrics.
:type supported_time_grain_types: list[str]
:param internal_metric_name: The internal metric name.
:type internal_metric_name: str
:param enable_regional_mdm_account: Whether or not the service is using regional MDM accounts.
:type enable_regional_mdm_account: bool
:param source_mdm_account: The source MDM account.
:type source_mdm_account: str
:param source_mdm_namespace: The source MDM namespace.
:type source_mdm_namespace: str
:param dimensions: Dimensions of blobs, including blob type and access tier.
:type dimensions: list[~azure.mgmt.netapp.models.Dimension]
:param aggregation_type: Aggregation type could be Average.
:type aggregation_type: str
:param fill_gap_with_zero: The property to decide fill gap with zero or not.
:type fill_gap_with_zero: bool
:param category: The category this metric specification belong to, could be Capacity.
:type category: str
:param resource_id_dimension_name_override: Account Resource Id.
:type resource_id_dimension_name_override: str
:param is_internal: Whether the metric is internal.
:type is_internal: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
'internal_metric_name': {'key': 'internalMetricName', 'type': 'str'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'category': {'key': 'category', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
supported_aggregation_types: Optional[List[Union[str, "MetricAggregationType"]]] = None,
supported_time_grain_types: Optional[List[str]] = None,
internal_metric_name: Optional[str] = None,
enable_regional_mdm_account: Optional[bool] = None,
source_mdm_account: Optional[str] = None,
source_mdm_namespace: Optional[str] = None,
dimensions: Optional[List["Dimension"]] = None,
aggregation_type: Optional[str] = None,
fill_gap_with_zero: Optional[bool] = None,
category: Optional[str] = None,
resource_id_dimension_name_override: Optional[str] = None,
is_internal: Optional[bool] = None,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.supported_aggregation_types = supported_aggregation_types
self.supported_time_grain_types = supported_time_grain_types
self.internal_metric_name = internal_metric_name
self.enable_regional_mdm_account = enable_regional_mdm_account
self.source_mdm_account = source_mdm_account
self.source_mdm_namespace = source_mdm_namespace
self.dimensions = dimensions
self.aggregation_type = aggregation_type
self.fill_gap_with_zero = fill_gap_with_zero
self.category = category
self.resource_id_dimension_name_override = resource_id_dimension_name_override
self.is_internal = is_internal
class MonthlySchedule(msrest.serialization.Model):
"""Monthly Schedule properties.
:param snapshots_to_keep: Monthly snapshot count to keep.
:type snapshots_to_keep: int
:param days_of_month: Indicates which days of the month snapshot should be taken. A comma
delimited string.
:type days_of_month: str
:param hour: Indicates which hour in UTC timezone a snapshot should be taken.
:type hour: int
:param minute: Indicates which minute snapshot should be taken.
:type minute: int
:param used_bytes: Resource size in bytes, current storage usage for the volume in bytes.
:type used_bytes: long
"""
_attribute_map = {
'snapshots_to_keep': {'key': 'snapshotsToKeep', 'type': 'int'},
'days_of_month': {'key': 'daysOfMonth', 'type': 'str'},
'hour': {'key': 'hour', 'type': 'int'},
'minute': {'key': 'minute', 'type': 'int'},
'used_bytes': {'key': 'usedBytes', 'type': 'long'},
}
def __init__(
self,
*,
snapshots_to_keep: Optional[int] = None,
days_of_month: Optional[str] = None,
hour: Optional[int] = None,
minute: Optional[int] = None,
used_bytes: Optional[int] = None,
**kwargs
):
super(MonthlySchedule, self).__init__(**kwargs)
self.snapshots_to_keep = snapshots_to_keep
self.days_of_month = days_of_month
self.hour = hour
self.minute = minute
self.used_bytes = used_bytes
class MountTarget(msrest.serialization.Model):
"""Mount Target.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar mount_target_id: UUID v4 used to identify the MountTarget.
:vartype mount_target_id: str
:param file_system_id: Required. UUID v4 used to identify the MountTarget.
:type file_system_id: str
:ivar ip_address: The mount target's IPv4 address.
:vartype ip_address: str
:param smb_server_fqdn: The SMB server's Fully Qualified Domain Name, FQDN.
:type smb_server_fqdn: str
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'mount_target_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'file_system_id': {'required': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'ip_address': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'mount_target_id': {'key': 'properties.mountTargetId', 'type': 'str'},
'file_system_id': {'key': 'properties.fileSystemId', 'type': 'str'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'str'},
'smb_server_fqdn': {'key': 'properties.smbServerFqdn', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
file_system_id: str,
tags: Optional[Dict[str, str]] = None,
smb_server_fqdn: Optional[str] = None,
**kwargs
):
super(MountTarget, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.mount_target_id = None
self.file_system_id = file_system_id
self.ip_address = None
self.smb_server_fqdn = smb_server_fqdn
class MountTargetProperties(msrest.serialization.Model):
"""Mount target properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar mount_target_id: UUID v4 used to identify the MountTarget.
:vartype mount_target_id: str
:param file_system_id: Required. UUID v4 used to identify the MountTarget.
:type file_system_id: str
:ivar ip_address: The mount target's IPv4 address.
:vartype ip_address: str
:param smb_server_fqdn: The SMB server's Fully Qualified Domain Name, FQDN.
:type smb_server_fqdn: str
"""
_validation = {
'mount_target_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'file_system_id': {'required': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'ip_address': {'readonly': True},
}
_attribute_map = {
'mount_target_id': {'key': 'mountTargetId', 'type': 'str'},
'file_system_id': {'key': 'fileSystemId', 'type': 'str'},
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'smb_server_fqdn': {'key': 'smbServerFqdn', 'type': 'str'},
}
def __init__(
self,
*,
file_system_id: str,
smb_server_fqdn: Optional[str] = None,
**kwargs
):
super(MountTargetProperties, self).__init__(**kwargs)
self.mount_target_id = None
self.file_system_id = file_system_id
self.ip_address = None
self.smb_server_fqdn = smb_server_fqdn
class NetAppAccount(msrest.serialization.Model):
"""NetApp account resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~azure.mgmt.netapp.models.SystemData
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:param active_directories: Active Directories.
:type active_directories: list[~azure.mgmt.netapp.models.ActiveDirectory]
:param encryption: Encryption settings.
:type encryption: ~azure.mgmt.netapp.models.AccountEncryption
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'active_directories': {'key': 'properties.activeDirectories', 'type': '[ActiveDirectory]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
active_directories: Optional[List["ActiveDirectory"]] = None,
encryption: Optional["AccountEncryption"] = None,
**kwargs
):
super(NetAppAccount, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
self.system_data = None
self.provisioning_state = None
self.active_directories = active_directories
self.encryption = encryption
class NetAppAccountList(msrest.serialization.Model):
"""List of NetApp account resources.
:param value: Multiple NetApp accounts.
:type value: list[~azure.mgmt.netapp.models.NetAppAccount]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[NetAppAccount]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["NetAppAccount"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(NetAppAccountList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class NetAppAccountPatch(msrest.serialization.Model):
"""NetApp account patch resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param location: Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:param active_directories: Active Directories.
:type active_directories: list[~azure.mgmt.netapp.models.ActiveDirectory]
:param encryption: Encryption settings.
:type encryption: ~azure.mgmt.netapp.models.AccountEncryption
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'active_directories': {'key': 'properties.activeDirectories', 'type': '[ActiveDirectory]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
active_directories: Optional[List["ActiveDirectory"]] = None,
encryption: Optional["AccountEncryption"] = None,
**kwargs
):
super(NetAppAccountPatch, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.provisioning_state = None
self.active_directories = active_directories
self.encryption = encryption
class Operation(msrest.serialization.Model):
"""Microsoft.NetApp REST API operation definition.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: Display metadata associated with the operation.
:type display: ~azure.mgmt.netapp.models.OperationDisplay
:param origin: The origin of operations.
:type origin: str
:param service_specification: One property of operation, include metric specifications.
:type service_specification: ~azure.mgmt.netapp.models.ServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
origin: Optional[str] = None,
service_specification: Optional["ServiceSpecification"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
self.origin = origin
self.service_specification = service_specification
class OperationDisplay(msrest.serialization.Model):
"""Display metadata associated with the operation.
:param provider: Service provider: Microsoft NetApp.
:type provider: str
:param resource: Resource on which the operation is performed etc.
:type resource: str
:param operation: Type of operation: get, read, delete, etc.
:type operation: str
:param description: Operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list Cloud Volume operations. It contains a list of operations and a URL link to get the next set of results.
:param value: List of Storage operations supported by the Storage resource provider.
:type value: list[~azure.mgmt.netapp.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
class PoolChangeRequest(msrest.serialization.Model):
"""Pool change request.
All required parameters must be populated in order to send to Azure.
:param new_pool_resource_id: Required. Resource id of the pool to move volume to.
:type new_pool_resource_id: str
"""
_validation = {
'new_pool_resource_id': {'required': True},
}
_attribute_map = {
'new_pool_resource_id': {'key': 'newPoolResourceId', 'type': 'str'},
}
def __init__(
self,
*,
new_pool_resource_id: str,
**kwargs
):
super(PoolChangeRequest, self).__init__(**kwargs)
self.new_pool_resource_id = new_pool_resource_id
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class QuotaAvailabilityRequest(msrest.serialization.Model):
"""Quota availability request content.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the resource to verify.
:type name: str
:param type: Required. Resource type used for verification. Possible values include:
"Microsoft.NetApp/netAppAccounts", "Microsoft.NetApp/netAppAccounts/capacityPools",
"Microsoft.NetApp/netAppAccounts/capacityPools/volumes",
"Microsoft.NetApp/netAppAccounts/capacityPools/volumes/snapshots".
:type type: str or ~azure.mgmt.netapp.models.CheckQuotaNameResourceTypes
:param resource_group: Required. Resource group name.
:type resource_group: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'resource_group': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "CheckQuotaNameResourceTypes"],
resource_group: str,
**kwargs
):
super(QuotaAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.type = type
self.resource_group = resource_group
class ReplicationObject(msrest.serialization.Model):
"""Replication properties.
All required parameters must be populated in order to send to Azure.
:param replication_id: Id.
:type replication_id: str
:param endpoint_type: Indicates whether the local volume is the source or destination for the
Volume Replication. Possible values include: "src", "dst".
:type endpoint_type: str or ~azure.mgmt.netapp.models.EndpointType
:param replication_schedule: Schedule. Possible values include: "_10minutely", "hourly",
"daily".
:type replication_schedule: str or ~azure.mgmt.netapp.models.ReplicationSchedule
:param remote_volume_resource_id: Required. The resource ID of the remote volume.
:type remote_volume_resource_id: str
:param remote_volume_region: The remote region for the other end of the Volume Replication.
:type remote_volume_region: str
"""
_validation = {
'remote_volume_resource_id': {'required': True},
}
_attribute_map = {
'replication_id': {'key': 'replicationId', 'type': 'str'},
'endpoint_type': {'key': 'endpointType', 'type': 'str'},
'replication_schedule': {'key': 'replicationSchedule', 'type': 'str'},
'remote_volume_resource_id': {'key': 'remoteVolumeResourceId', 'type': 'str'},
'remote_volume_region': {'key': 'remoteVolumeRegion', 'type': 'str'},
}
def __init__(
self,
*,
remote_volume_resource_id: str,
replication_id: Optional[str] = None,
endpoint_type: Optional[Union[str, "EndpointType"]] = None,
replication_schedule: Optional[Union[str, "ReplicationSchedule"]] = None,
remote_volume_region: Optional[str] = None,
**kwargs
):
super(ReplicationObject, self).__init__(**kwargs)
self.replication_id = replication_id
self.endpoint_type = endpoint_type
self.replication_schedule = replication_schedule
self.remote_volume_resource_id = remote_volume_resource_id
self.remote_volume_region = remote_volume_region
class ReplicationStatus(msrest.serialization.Model):
"""Replication status.
:param healthy: Replication health check.
:type healthy: bool
:param relationship_status: Status of the mirror relationship. Possible values include: "Idle",
"Transferring".
:type relationship_status: str or ~azure.mgmt.netapp.models.RelationshipStatus
:param mirror_state: The status of the replication. Possible values include: "Uninitialized",
"Mirrored", "Broken".
:type mirror_state: str or ~azure.mgmt.netapp.models.MirrorState
:param total_progress: The progress of the replication.
:type total_progress: str
:param error_message: Displays error message if the replication is in an error state.
:type error_message: str
"""
_attribute_map = {
'healthy': {'key': 'healthy', 'type': 'bool'},
'relationship_status': {'key': 'relationshipStatus', 'type': 'str'},
'mirror_state': {'key': 'mirrorState', 'type': 'str'},
'total_progress': {'key': 'totalProgress', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
}
def __init__(
self,
*,
healthy: Optional[bool] = None,
relationship_status: Optional[Union[str, "RelationshipStatus"]] = None,
mirror_state: Optional[Union[str, "MirrorState"]] = None,
total_progress: Optional[str] = None,
error_message: Optional[str] = None,
**kwargs
):
super(ReplicationStatus, self).__init__(**kwargs)
self.healthy = healthy
self.relationship_status = relationship_status
self.mirror_state = mirror_state
self.total_progress = total_progress
self.error_message = error_message
class ResourceIdentity(msrest.serialization.Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: Object id of the identity resource.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the resource.
:vartype tenant_id: str
:param type: Type of Identity. Supported values are: 'None', 'SystemAssigned'.
:type type: str
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[str] = None,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
class ResourceNameAvailabilityRequest(msrest.serialization.Model):
"""Resource name availability request content.
All required parameters must be populated in order to send to Azure.
:param name: Required. Resource name to verify.
:type name: str
:param type: Required. Resource type used for verification. Possible values include:
"Microsoft.NetApp/netAppAccounts", "Microsoft.NetApp/netAppAccounts/capacityPools",
"Microsoft.NetApp/netAppAccounts/capacityPools/volumes",
"Microsoft.NetApp/netAppAccounts/capacityPools/volumes/snapshots".
:type type: str or ~azure.mgmt.netapp.models.CheckNameResourceTypes
:param resource_group: Required. Resource group name.
:type resource_group: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
'resource_group': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: Union[str, "CheckNameResourceTypes"],
resource_group: str,
**kwargs
):
super(ResourceNameAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.type = type
self.resource_group = resource_group
class RestoreStatus(msrest.serialization.Model):
"""Restore status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar healthy: Restore health status.
:vartype healthy: bool
:ivar relationship_status: Status of the restore SnapMirror relationship. Possible values
include: "Idle", "Transferring".
:vartype relationship_status: str or ~azure.mgmt.netapp.models.RelationshipStatus
:ivar mirror_state: The status of the restore. Possible values include: "Uninitialized",
"Mirrored", "Broken".
:vartype mirror_state: str or ~azure.mgmt.netapp.models.MirrorState
:ivar unhealthy_reason: Reason for the unhealthy restore relationship.
:vartype unhealthy_reason: str
:ivar error_message: Displays error message if the restore is in an error state.
:vartype error_message: str
:ivar total_transfer_bytes: Displays the total bytes transferred.
:vartype total_transfer_bytes: long
"""
_validation = {
'healthy': {'readonly': True},
'relationship_status': {'readonly': True},
'mirror_state': {'readonly': True},
'unhealthy_reason': {'readonly': True},
'error_message': {'readonly': True},
'total_transfer_bytes': {'readonly': True},
}
_attribute_map = {
'healthy': {'key': 'healthy', 'type': 'bool'},
'relationship_status': {'key': 'relationshipStatus', 'type': 'str'},
'mirror_state': {'key': 'mirrorState', 'type': 'str'},
'unhealthy_reason': {'key': 'unhealthyReason', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'total_transfer_bytes': {'key': 'totalTransferBytes', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(RestoreStatus, self).__init__(**kwargs)
self.healthy = None
self.relationship_status = None
self.mirror_state = None
self.unhealthy_reason = None
self.error_message = None
self.total_transfer_bytes = None
class ServiceSpecification(msrest.serialization.Model):
"""One property of operation, include metric specifications.
:param metric_specifications: Metric specifications of operation.
:type metric_specifications: list[~azure.mgmt.netapp.models.MetricSpecification]
:param log_specifications:
:type log_specifications: list[~azure.mgmt.netapp.models.LogSpecification]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
}
def __init__(
self,
*,
metric_specifications: Optional[List["MetricSpecification"]] = None,
log_specifications: Optional[List["LogSpecification"]] = None,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = metric_specifications
self.log_specifications = log_specifications
class Snapshot(msrest.serialization.Model):
"""Snapshot of a Volume.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar snapshot_id: UUID v4 used to identify the Snapshot.
:vartype snapshot_id: str
:ivar created: The creation date of the snapshot.
:vartype created: ~datetime.datetime
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'snapshot_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'created': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'snapshot_id': {'key': 'properties.snapshotId', 'type': 'str'},
'created': {'key': 'properties.created', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
**kwargs
):
super(Snapshot, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.snapshot_id = None
self.created = None
self.provisioning_state = None
class SnapshotPoliciesList(msrest.serialization.Model):
"""List of Snapshot Policies.
:param value: A list of snapshot policies.
:type value: list[~azure.mgmt.netapp.models.SnapshotPolicy]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SnapshotPolicy]'},
}
def __init__(
self,
*,
value: Optional[List["SnapshotPolicy"]] = None,
**kwargs
):
super(SnapshotPoliciesList, self).__init__(**kwargs)
self.value = value
class SnapshotPolicy(msrest.serialization.Model):
"""Snapshot policy information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param hourly_schedule: Schedule for hourly snapshots.
:type hourly_schedule: ~azure.mgmt.netapp.models.HourlySchedule
:param daily_schedule: Schedule for daily snapshots.
:type daily_schedule: ~azure.mgmt.netapp.models.DailySchedule
:param weekly_schedule: Schedule for weekly snapshots.
:type weekly_schedule: ~azure.mgmt.netapp.models.WeeklySchedule
:param monthly_schedule: Schedule for monthly snapshots.
:type monthly_schedule: ~azure.mgmt.netapp.models.MonthlySchedule
:param enabled: The property to decide policy is enabled or not.
:type enabled: bool
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'hourly_schedule': {'key': 'properties.hourlySchedule', 'type': 'HourlySchedule'},
'daily_schedule': {'key': 'properties.dailySchedule', 'type': 'DailySchedule'},
'weekly_schedule': {'key': 'properties.weeklySchedule', 'type': 'WeeklySchedule'},
'monthly_schedule': {'key': 'properties.monthlySchedule', 'type': 'MonthlySchedule'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
hourly_schedule: Optional["HourlySchedule"] = None,
daily_schedule: Optional["DailySchedule"] = None,
weekly_schedule: Optional["WeeklySchedule"] = None,
monthly_schedule: Optional["MonthlySchedule"] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(SnapshotPolicy, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
self.hourly_schedule = hourly_schedule
self.daily_schedule = daily_schedule
self.weekly_schedule = weekly_schedule
self.monthly_schedule = monthly_schedule
self.enabled = enabled
self.provisioning_state = None
class SnapshotPolicyDetails(msrest.serialization.Model):
"""Snapshot policy properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param location: Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param hourly_schedule: Schedule for hourly snapshots.
:type hourly_schedule: ~azure.mgmt.netapp.models.HourlySchedule
:param daily_schedule: Schedule for daily snapshots.
:type daily_schedule: ~azure.mgmt.netapp.models.DailySchedule
:param weekly_schedule: Schedule for weekly snapshots.
:type weekly_schedule: ~azure.mgmt.netapp.models.WeeklySchedule
:param monthly_schedule: Schedule for monthly snapshots.
:type monthly_schedule: ~azure.mgmt.netapp.models.MonthlySchedule
:param enabled: The property to decide policy is enabled or not.
:type enabled: bool
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'hourly_schedule': {'key': 'properties.hourlySchedule', 'type': 'HourlySchedule'},
'daily_schedule': {'key': 'properties.dailySchedule', 'type': 'DailySchedule'},
'weekly_schedule': {'key': 'properties.weeklySchedule', 'type': 'WeeklySchedule'},
'monthly_schedule': {'key': 'properties.monthlySchedule', 'type': 'MonthlySchedule'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
hourly_schedule: Optional["HourlySchedule"] = None,
daily_schedule: Optional["DailySchedule"] = None,
weekly_schedule: Optional["WeeklySchedule"] = None,
monthly_schedule: Optional["MonthlySchedule"] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(SnapshotPolicyDetails, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.hourly_schedule = hourly_schedule
self.daily_schedule = daily_schedule
self.weekly_schedule = weekly_schedule
self.monthly_schedule = monthly_schedule
self.enabled = enabled
self.provisioning_state = None
class SnapshotPolicyPatch(msrest.serialization.Model):
"""Snapshot policy Details for create and update.
Variables are only populated by the server, and will be ignored when sending a request.
:param location: Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param hourly_schedule: Schedule for hourly snapshots.
:type hourly_schedule: ~azure.mgmt.netapp.models.HourlySchedule
:param daily_schedule: Schedule for daily snapshots.
:type daily_schedule: ~azure.mgmt.netapp.models.DailySchedule
:param weekly_schedule: Schedule for weekly snapshots.
:type weekly_schedule: ~azure.mgmt.netapp.models.WeeklySchedule
:param monthly_schedule: Schedule for monthly snapshots.
:type monthly_schedule: ~azure.mgmt.netapp.models.MonthlySchedule
:param enabled: The property to decide policy is enabled or not.
:type enabled: bool
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'hourly_schedule': {'key': 'properties.hourlySchedule', 'type': 'HourlySchedule'},
'daily_schedule': {'key': 'properties.dailySchedule', 'type': 'DailySchedule'},
'weekly_schedule': {'key': 'properties.weeklySchedule', 'type': 'WeeklySchedule'},
'monthly_schedule': {'key': 'properties.monthlySchedule', 'type': 'MonthlySchedule'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
hourly_schedule: Optional["HourlySchedule"] = None,
daily_schedule: Optional["DailySchedule"] = None,
weekly_schedule: Optional["WeeklySchedule"] = None,
monthly_schedule: Optional["MonthlySchedule"] = None,
enabled: Optional[bool] = None,
**kwargs
):
super(SnapshotPolicyPatch, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.hourly_schedule = hourly_schedule
self.daily_schedule = daily_schedule
self.weekly_schedule = weekly_schedule
self.monthly_schedule = monthly_schedule
self.enabled = enabled
self.provisioning_state = None
class SnapshotPolicyVolumeList(msrest.serialization.Model):
"""Volumes associated with snapshot policy.
:param value: List of volumes.
:type value: list[any]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[object]'},
}
def __init__(
self,
*,
value: Optional[List[Any]] = None,
**kwargs
):
super(SnapshotPolicyVolumeList, self).__init__(**kwargs)
self.value = value
class SnapshotsList(msrest.serialization.Model):
"""List of Snapshots.
:param value: A list of Snapshots.
:type value: list[~azure.mgmt.netapp.models.Snapshot]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
}
def __init__(
self,
*,
value: Optional[List["Snapshot"]] = None,
**kwargs
):
super(SnapshotsList, self).__init__(**kwargs)
self.value = value
class SubscriptionQuotaItem(ProxyResource):
"""Information regarding Subscription Quota Item.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: The system meta data relating to this resource.
:vartype system_data: ~azure.mgmt.netapp.models.SystemData
:ivar name_properties_name: Quota Item name.
:vartype name_properties_name: str
:ivar current: The current quota value.
:vartype current: int
:ivar default: The default quota value.
:vartype default: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'name_properties_name': {'readonly': True},
'current': {'readonly': True},
'default': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'name_properties_name': {'key': 'properties.name', 'type': 'str'},
'current': {'key': 'properties.current', 'type': 'int'},
'default': {'key': 'properties.default', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionQuotaItem, self).__init__(**kwargs)
self.system_data = None
self.name_properties_name = None
self.current = None
self.default = None
class SubscriptionQuotaItemList(msrest.serialization.Model):
"""List of Subscription Quota Items.
:param value: A list of SubscriptionQuotaItems.
:type value: list[~azure.mgmt.netapp.models.SubscriptionQuotaItem]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SubscriptionQuotaItem]'},
}
def __init__(
self,
*,
value: Optional[List["SubscriptionQuotaItem"]] = None,
**kwargs
):
super(SubscriptionQuotaItemList, self).__init__(**kwargs)
self.value = value
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~azure.mgmt.netapp.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~azure.mgmt.netapp.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class Vault(msrest.serialization.Model):
"""Vault information.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param vault_name: Vault Name.
:type vault_name: str
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'vault_name': {'key': 'properties.vaultName', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
vault_name: Optional[str] = None,
**kwargs
):
super(Vault, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.vault_name = vault_name
class VaultList(msrest.serialization.Model):
"""List of Vaults.
:param value: A list of vaults.
:type value: list[~azure.mgmt.netapp.models.Vault]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Vault]'},
}
def __init__(
self,
*,
value: Optional[List["Vault"]] = None,
**kwargs
):
super(VaultList, self).__init__(**kwargs)
self.value = value
class Volume(msrest.serialization.Model):
"""Volume resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar file_system_id: Unique FileSystem Identifier.
:vartype file_system_id: str
:param creation_token: Required. A unique file path for the volume. Used when creating mount
targets.
:type creation_token: str
:param service_level: The service level of the file system. Possible values include:
"Standard", "Premium", "Ultra", "StandardZRS". Default value: "Premium".
:type service_level: str or ~azure.mgmt.netapp.models.ServiceLevel
:param usage_threshold: Required. Maximum storage quota allowed for a file system in bytes.
This is a soft quota used for alerting only. Minimum size is 100 GiB. Upper limit is 100TiB.
Specified in bytes.
:type usage_threshold: long
:param export_policy: Set of export policy rules.
:type export_policy: ~azure.mgmt.netapp.models.VolumePropertiesExportPolicy
:param protocol_types: Set of protocol types, default NFSv3, CIFS for SMB protocol.
:type protocol_types: list[str]
:ivar provisioning_state: Azure lifecycle management.
:vartype provisioning_state: str
:param snapshot_id: UUID v4 or resource identifier used to identify the Snapshot.
:type snapshot_id: str
:param backup_id: UUID v4 or resource identifier used to identify the Backup.
:type backup_id: str
:ivar baremetal_tenant_id: Unique Baremetal Tenant Identifier.
:vartype baremetal_tenant_id: str
:param subnet_id: Required. The Azure Resource URI for a delegated subnet. Must have the
delegation Microsoft.NetApp/volumes.
:type subnet_id: str
:param network_features: Basic network, or Standard features available to the volume. Possible
values include: "Basic", "Standard". Default value: "Basic".
:type network_features: str or ~azure.mgmt.netapp.models.NetworkFeatures
:ivar network_sibling_set_id: Network Sibling Set ID for the the group of volumes sharing
networking resources.
:vartype network_sibling_set_id: str
:ivar storage_to_network_proximity: Provides storage to network proximity information for the
volume. Possible values include: "Default", "T1", "T2".
:vartype storage_to_network_proximity: str or
~azure.mgmt.netapp.models.VolumeStorageToNetworkProximity
:ivar mount_targets: List of mount targets.
:vartype mount_targets: list[~azure.mgmt.netapp.models.MountTargetProperties]
:param volume_type: What type of volume is this. For destination volumes in Cross Region
Replication, set type to DataProtection.
:type volume_type: str
:param data_protection: DataProtection type volumes include an object containing details of the
replication.
:type data_protection: ~azure.mgmt.netapp.models.VolumePropertiesDataProtection
:param is_restoring: Restoring.
:type is_restoring: bool
:param snapshot_directory_visible: If enabled (true) the volume will contain a read-only
snapshot directory which provides access to each of the volume's snapshots (default to true).
:type snapshot_directory_visible: bool
:param kerberos_enabled: Describe if a volume is KerberosEnabled. To be use with swagger
version 2020-05-01 or later.
:type kerberos_enabled: bool
:param security_style: The security style of volume, default unix, defaults to ntfs for dual
protocol or CIFS protocol. Possible values include: "ntfs", "unix". Default value: "unix".
:type security_style: str or ~azure.mgmt.netapp.models.SecurityStyle
:param smb_encryption: Enables encryption for in-flight smb3 data. Only applicable for
SMB/DualProtocol volume. To be used with swagger version 2020-08-01 or later.
:type smb_encryption: bool
:param smb_continuously_available: Enables continuously available share property for smb
volume. Only applicable for SMB volume.
:type smb_continuously_available: bool
:param throughput_mibps: Maximum throughput in Mibps that can be achieved by this volume.
:type throughput_mibps: float
:param encryption_key_source: Encryption Key Source. Possible values are: 'Microsoft.NetApp'.
:type encryption_key_source: str
:param ldap_enabled: Specifies whether LDAP is enabled or not for a given NFS volume.
:type ldap_enabled: bool
:param cool_access: Specifies whether Cool Access(tiering) is enabled for the volume.
:type cool_access: bool
:param coolness_period: Specifies the number of days after which data that is not accessed by
clients will be tiered.
:type coolness_period: int
:param unix_permissions: UNIX permissions for NFS volume accepted in octal 4 digit format.
First digit selects the set user ID(4), set group ID (2) and sticky (1) attributes. Second
digit selects permission for the owner of the file: read (4), write (2) and execute (1). Third
selects permissions for other users in the same group. the fourth for other users not in the
group. 0755 - gives read/write/execute permissions to owner and read/execute to group and other
users.
:type unix_permissions: str
:ivar clone_progress: When a volume is being restored from another volume's snapshot, will show
the percentage completion of this cloning process. When this value is empty/null there is no
cloning process currently happening on this volume. This value will update every 5 minutes
during cloning.
:vartype clone_progress: int
:param avs_data_store: Specifies whether the volume is enabled for Azure VMware Solution (AVS)
datastore purpose. Possible values include: "Enabled", "Disabled". Default value: "Disabled".
:type avs_data_store: str or ~azure.mgmt.netapp.models.AvsDataStore
:param is_default_quota_enabled: Specifies if default quota is enabled for the volume.
:type is_default_quota_enabled: bool
:param default_user_quota_in_ki_bs: Default user quota for volume in KiBs. If
isDefaultQuotaEnabled is set, the minimum value of 4 KiBs applies .
:type default_user_quota_in_ki_bs: long
:param default_group_quota_in_ki_bs: Default group quota for volume in KiBs. If
isDefaultQuotaEnabled is set, the minimum value of 4 KiBs applies.
:type default_group_quota_in_ki_bs: long
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
'file_system_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'creation_token': {'required': True, 'max_length': 80, 'min_length': 1, 'pattern': r'^[a-zA-Z][a-zA-Z0-9\-]{0,79}$'},
'usage_threshold': {'required': True, 'maximum': 109951162777600, 'minimum': 107374182400},
'provisioning_state': {'readonly': True},
'snapshot_id': {'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}|(\\?([^\/]*[\/])*)([^\/]+)$'},
'backup_id': {'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}|(\\?([^\/]*[\/])*)([^\/]+)$'},
'baremetal_tenant_id': {'readonly': True},
'subnet_id': {'required': True},
'network_sibling_set_id': {'readonly': True, 'max_length': 36, 'min_length': 36, 'pattern': r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'},
'storage_to_network_proximity': {'readonly': True},
'mount_targets': {'readonly': True},
'throughput_mibps': {'maximum': 4500, 'minimum': 0},
'coolness_period': {'maximum': 63, 'minimum': 7},
'unix_permissions': {'max_length': 4, 'min_length': 4},
'clone_progress': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'file_system_id': {'key': 'properties.fileSystemId', 'type': 'str'},
'creation_token': {'key': 'properties.creationToken', 'type': 'str'},
'service_level': {'key': 'properties.serviceLevel', 'type': 'str'},
'usage_threshold': {'key': 'properties.usageThreshold', 'type': 'long'},
'export_policy': {'key': 'properties.exportPolicy', 'type': 'VolumePropertiesExportPolicy'},
'protocol_types': {'key': 'properties.protocolTypes', 'type': '[str]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'snapshot_id': {'key': 'properties.snapshotId', 'type': 'str'},
'backup_id': {'key': 'properties.backupId', 'type': 'str'},
'baremetal_tenant_id': {'key': 'properties.baremetalTenantId', 'type': 'str'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'network_features': {'key': 'properties.networkFeatures', 'type': 'str'},
'network_sibling_set_id': {'key': 'properties.networkSiblingSetId', 'type': 'str'},
'storage_to_network_proximity': {'key': 'properties.storageToNetworkProximity', 'type': 'str'},
'mount_targets': {'key': 'properties.mountTargets', 'type': '[MountTargetProperties]'},
'volume_type': {'key': 'properties.volumeType', 'type': 'str'},
'data_protection': {'key': 'properties.dataProtection', 'type': 'VolumePropertiesDataProtection'},
'is_restoring': {'key': 'properties.isRestoring', 'type': 'bool'},
'snapshot_directory_visible': {'key': 'properties.snapshotDirectoryVisible', 'type': 'bool'},
'kerberos_enabled': {'key': 'properties.kerberosEnabled', 'type': 'bool'},
'security_style': {'key': 'properties.securityStyle', 'type': 'str'},
'smb_encryption': {'key': 'properties.smbEncryption', 'type': 'bool'},
'smb_continuously_available': {'key': 'properties.smbContinuouslyAvailable', 'type': 'bool'},
'throughput_mibps': {'key': 'properties.throughputMibps', 'type': 'float'},
'encryption_key_source': {'key': 'properties.encryptionKeySource', 'type': 'str'},
'ldap_enabled': {'key': 'properties.ldapEnabled', 'type': 'bool'},
'cool_access': {'key': 'properties.coolAccess', 'type': 'bool'},
'coolness_period': {'key': 'properties.coolnessPeriod', 'type': 'int'},
'unix_permissions': {'key': 'properties.unixPermissions', 'type': 'str'},
'clone_progress': {'key': 'properties.cloneProgress', 'type': 'int'},
'avs_data_store': {'key': 'properties.avsDataStore', 'type': 'str'},
'is_default_quota_enabled': {'key': 'properties.isDefaultQuotaEnabled', 'type': 'bool'},
'default_user_quota_in_ki_bs': {'key': 'properties.defaultUserQuotaInKiBs', 'type': 'long'},
'default_group_quota_in_ki_bs': {'key': 'properties.defaultGroupQuotaInKiBs', 'type': 'long'},
}
def __init__(
self,
*,
location: str,
creation_token: str,
usage_threshold: int = 107374182400,
subnet_id: str,
tags: Optional[Dict[str, str]] = None,
service_level: Optional[Union[str, "ServiceLevel"]] = "Premium",
export_policy: Optional["VolumePropertiesExportPolicy"] = None,
protocol_types: Optional[List[str]] = None,
snapshot_id: Optional[str] = None,
backup_id: Optional[str] = None,
network_features: Optional[Union[str, "NetworkFeatures"]] = "Basic",
volume_type: Optional[str] = None,
data_protection: Optional["VolumePropertiesDataProtection"] = None,
is_restoring: Optional[bool] = None,
snapshot_directory_visible: Optional[bool] = True,
kerberos_enabled: Optional[bool] = False,
security_style: Optional[Union[str, "SecurityStyle"]] = "unix",
smb_encryption: Optional[bool] = False,
smb_continuously_available: Optional[bool] = False,
throughput_mibps: Optional[float] = 0,
encryption_key_source: Optional[str] = None,
ldap_enabled: Optional[bool] = False,
cool_access: Optional[bool] = False,
coolness_period: Optional[int] = None,
unix_permissions: Optional[str] = "0770",
avs_data_store: Optional[Union[str, "AvsDataStore"]] = "Disabled",
is_default_quota_enabled: Optional[bool] = False,
default_user_quota_in_ki_bs: Optional[int] = 0,
default_group_quota_in_ki_bs: Optional[int] = 0,
**kwargs
):
super(Volume, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.etag = None
self.type = None
self.tags = tags
self.file_system_id = None
self.creation_token = creation_token
self.service_level = service_level
self.usage_threshold = usage_threshold
self.export_policy = export_policy
self.protocol_types = protocol_types
self.provisioning_state = None
self.snapshot_id = snapshot_id
self.backup_id = backup_id
self.baremetal_tenant_id = None
self.subnet_id = subnet_id
self.network_features = network_features
self.network_sibling_set_id = None
self.storage_to_network_proximity = None
self.mount_targets = None
self.volume_type = volume_type
self.data_protection = data_protection
self.is_restoring = is_restoring
self.snapshot_directory_visible = snapshot_directory_visible
self.kerberos_enabled = kerberos_enabled
self.security_style = security_style
self.smb_encryption = smb_encryption
self.smb_continuously_available = smb_continuously_available
self.throughput_mibps = throughput_mibps
self.encryption_key_source = encryption_key_source
self.ldap_enabled = ldap_enabled
self.cool_access = cool_access
self.coolness_period = coolness_period
self.unix_permissions = unix_permissions
self.clone_progress = None
self.avs_data_store = avs_data_store
self.is_default_quota_enabled = is_default_quota_enabled
self.default_user_quota_in_ki_bs = default_user_quota_in_ki_bs
self.default_group_quota_in_ki_bs = default_group_quota_in_ki_bs
class VolumeBackupProperties(msrest.serialization.Model):
"""Volume Backup Properties.
:param backup_policy_id: Backup Policy Resource ID.
:type backup_policy_id: str
:param policy_enforced: Policy Enforced.
:type policy_enforced: bool
:param vault_id: Vault Resource ID.
:type vault_id: str
:param backup_enabled: Backup Enabled.
:type backup_enabled: bool
"""
_attribute_map = {
'backup_policy_id': {'key': 'backupPolicyId', 'type': 'str'},
'policy_enforced': {'key': 'policyEnforced', 'type': 'bool'},
'vault_id': {'key': 'vaultId', 'type': 'str'},
'backup_enabled': {'key': 'backupEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
backup_policy_id: Optional[str] = None,
policy_enforced: Optional[bool] = None,
vault_id: Optional[str] = None,
backup_enabled: Optional[bool] = None,
**kwargs
):
super(VolumeBackupProperties, self).__init__(**kwargs)
self.backup_policy_id = backup_policy_id
self.policy_enforced = policy_enforced
self.vault_id = vault_id
self.backup_enabled = backup_enabled
class VolumeBackups(msrest.serialization.Model):
"""Volume details using the backup policy.
:param volume_name: Volume name.
:type volume_name: str
:param backups_count: Total count of backups for volume.
:type backups_count: int
:param policy_enabled: Policy enabled.
:type policy_enabled: bool
"""
_attribute_map = {
'volume_name': {'key': 'volumeName', 'type': 'str'},
'backups_count': {'key': 'backupsCount', 'type': 'int'},
'policy_enabled': {'key': 'policyEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
volume_name: Optional[str] = None,
backups_count: Optional[int] = None,
policy_enabled: Optional[bool] = None,
**kwargs
):
super(VolumeBackups, self).__init__(**kwargs)
self.volume_name = volume_name
self.backups_count = backups_count
self.policy_enabled = policy_enabled
class VolumeList(msrest.serialization.Model):
"""List of volume resources.
:param value: List of volumes.
:type value: list[~azure.mgmt.netapp.models.Volume]
:param next_link: URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Volume]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Volume"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(VolumeList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class VolumePatch(msrest.serialization.Model):
"""Volume patch resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param location: Resource location.
:type location: str
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param service_level: The service level of the file system. Possible values include:
"Standard", "Premium", "Ultra", "StandardZRS". Default value: "Premium".
:type service_level: str or ~azure.mgmt.netapp.models.ServiceLevel
:param usage_threshold: Maximum storage quota allowed for a file system in bytes. This is a
soft quota used for alerting only. Minimum size is 100 GiB. Upper limit is 100TiB. Specified in
bytes.
:type usage_threshold: long
:param export_policy: Set of export policy rules.
:type export_policy: ~azure.mgmt.netapp.models.VolumePatchPropertiesExportPolicy
:param throughput_mibps: Maximum throughput in Mibps that can be achieved by this volume.
:type throughput_mibps: float
:param data_protection: DataProtection type volumes include an object containing details of the
replication.
:type data_protection: ~azure.mgmt.netapp.models.VolumePatchPropertiesDataProtection
:param is_default_quota_enabled: Specifies if default quota is enabled for the volume.
:type is_default_quota_enabled: bool
:param default_user_quota_in_ki_bs: Default user quota for volume in KiBs. If
isDefaultQuotaEnabled is set, the minimum value of 4 KiBs applies .
:type default_user_quota_in_ki_bs: long
:param default_group_quota_in_ki_bs: Default group quota for volume in KiBs. If
isDefaultQuotaEnabled is set, the minimum value of 4 KiBs applies.
:type default_group_quota_in_ki_bs: long
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'usage_threshold': {'maximum': 109951162777600, 'minimum': 107374182400},
'throughput_mibps': {'maximum': 4500, 'minimum': 1},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'service_level': {'key': 'properties.serviceLevel', 'type': 'str'},
'usage_threshold': {'key': 'properties.usageThreshold', 'type': 'long'},
'export_policy': {'key': 'properties.exportPolicy', 'type': 'VolumePatchPropertiesExportPolicy'},
'throughput_mibps': {'key': 'properties.throughputMibps', 'type': 'float'},
'data_protection': {'key': 'properties.dataProtection', 'type': 'VolumePatchPropertiesDataProtection'},
'is_default_quota_enabled': {'key': 'properties.isDefaultQuotaEnabled', 'type': 'bool'},
'default_user_quota_in_ki_bs': {'key': 'properties.defaultUserQuotaInKiBs', 'type': 'long'},
'default_group_quota_in_ki_bs': {'key': 'properties.defaultGroupQuotaInKiBs', 'type': 'long'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
service_level: Optional[Union[str, "ServiceLevel"]] = "Premium",
usage_threshold: Optional[int] = 107374182400,
export_policy: Optional["VolumePatchPropertiesExportPolicy"] = None,
throughput_mibps: Optional[float] = None,
data_protection: Optional["VolumePatchPropertiesDataProtection"] = None,
is_default_quota_enabled: Optional[bool] = False,
default_user_quota_in_ki_bs: Optional[int] = 0,
default_group_quota_in_ki_bs: Optional[int] = 0,
**kwargs
):
super(VolumePatch, self).__init__(**kwargs)
self.location = location
self.id = None
self.name = None
self.type = None
self.tags = tags
self.service_level = service_level
self.usage_threshold = usage_threshold
self.export_policy = export_policy
self.throughput_mibps = throughput_mibps
self.data_protection = data_protection
self.is_default_quota_enabled = is_default_quota_enabled
self.default_user_quota_in_ki_bs = default_user_quota_in_ki_bs
self.default_group_quota_in_ki_bs = default_group_quota_in_ki_bs
class VolumePatchPropertiesDataProtection(msrest.serialization.Model):
"""DataProtection type volumes include an object containing details of the replication.
:param backup: Backup Properties.
:type backup: ~azure.mgmt.netapp.models.VolumeBackupProperties
:param snapshot: Snapshot properties.
:type snapshot: ~azure.mgmt.netapp.models.VolumeSnapshotProperties
"""
_attribute_map = {
'backup': {'key': 'backup', 'type': 'VolumeBackupProperties'},
'snapshot': {'key': 'snapshot', 'type': 'VolumeSnapshotProperties'},
}
def __init__(
self,
*,
backup: Optional["VolumeBackupProperties"] = None,
snapshot: Optional["VolumeSnapshotProperties"] = None,
**kwargs
):
super(VolumePatchPropertiesDataProtection, self).__init__(**kwargs)
self.backup = backup
self.snapshot = snapshot
class VolumePatchPropertiesExportPolicy(msrest.serialization.Model):
"""Set of export policy rules.
:param rules: Export policy rule.
:type rules: list[~azure.mgmt.netapp.models.ExportPolicyRule]
"""
_attribute_map = {
'rules': {'key': 'rules', 'type': '[ExportPolicyRule]'},
}
def __init__(
self,
*,
rules: Optional[List["ExportPolicyRule"]] = None,
**kwargs
):
super(VolumePatchPropertiesExportPolicy, self).__init__(**kwargs)
self.rules = rules
class VolumePropertiesDataProtection(msrest.serialization.Model):
"""DataProtection type volumes include an object containing details of the replication.
:param backup: Backup Properties.
:type backup: ~azure.mgmt.netapp.models.VolumeBackupProperties
:param replication: Replication properties.
:type replication: ~azure.mgmt.netapp.models.ReplicationObject
:param snapshot: Snapshot properties.
:type snapshot: ~azure.mgmt.netapp.models.VolumeSnapshotProperties
"""
_attribute_map = {
'backup': {'key': 'backup', 'type': 'VolumeBackupProperties'},
'replication': {'key': 'replication', 'type': 'ReplicationObject'},
'snapshot': {'key': 'snapshot', 'type': 'VolumeSnapshotProperties'},
}
def __init__(
self,
*,
backup: Optional["VolumeBackupProperties"] = None,
replication: Optional["ReplicationObject"] = None,
snapshot: Optional["VolumeSnapshotProperties"] = None,
**kwargs
):
super(VolumePropertiesDataProtection, self).__init__(**kwargs)
self.backup = backup
self.replication = replication
self.snapshot = snapshot
class VolumePropertiesExportPolicy(msrest.serialization.Model):
"""Set of export policy rules.
:param rules: Export policy rule.
:type rules: list[~azure.mgmt.netapp.models.ExportPolicyRule]
"""
_attribute_map = {
'rules': {'key': 'rules', 'type': '[ExportPolicyRule]'},
}
def __init__(
self,
*,
rules: Optional[List["ExportPolicyRule"]] = None,
**kwargs
):
super(VolumePropertiesExportPolicy, self).__init__(**kwargs)
self.rules = rules
class VolumeRevert(msrest.serialization.Model):
"""revert a volume to the snapshot.
:param snapshot_id: Resource id of the snapshot.
:type snapshot_id: str
"""
_attribute_map = {
'snapshot_id': {'key': 'snapshotId', 'type': 'str'},
}
def __init__(
self,
*,
snapshot_id: Optional[str] = None,
**kwargs
):
super(VolumeRevert, self).__init__(**kwargs)
self.snapshot_id = snapshot_id
class VolumeSnapshotProperties(msrest.serialization.Model):
"""Volume Snapshot Properties.
:param snapshot_policy_id: Snapshot Policy ResourceId.
:type snapshot_policy_id: str
"""
_attribute_map = {
'snapshot_policy_id': {'key': 'snapshotPolicyId', 'type': 'str'},
}
def __init__(
self,
*,
snapshot_policy_id: Optional[str] = None,
**kwargs
):
super(VolumeSnapshotProperties, self).__init__(**kwargs)
self.snapshot_policy_id = snapshot_policy_id
class WeeklySchedule(msrest.serialization.Model):
"""Weekly Schedule properties, make a snapshot every week at a specific day or days.
:param snapshots_to_keep: Weekly snapshot count to keep.
:type snapshots_to_keep: int
:param day: Indicates which weekdays snapshot should be taken, accepts a comma separated list
of week day names in english.
:type day: str
:param hour: Indicates which hour in UTC timezone a snapshot should be taken.
:type hour: int
:param minute: Indicates which minute snapshot should be taken.
:type minute: int
:param used_bytes: Resource size in bytes, current storage usage for the volume in bytes.
:type used_bytes: long
"""
_attribute_map = {
'snapshots_to_keep': {'key': 'snapshotsToKeep', 'type': 'int'},
'day': {'key': 'day', 'type': 'str'},
'hour': {'key': 'hour', 'type': 'int'},
'minute': {'key': 'minute', 'type': 'int'},
'used_bytes': {'key': 'usedBytes', 'type': 'long'},
}
def __init__(
self,
*,
snapshots_to_keep: Optional[int] = None,
day: Optional[str] = None,
hour: Optional[int] = None,
minute: Optional[int] = None,
used_bytes: Optional[int] = None,
**kwargs
):
super(WeeklySchedule, self).__init__(**kwargs)
self.snapshots_to_keep = snapshots_to_keep
self.day = day
self.hour = hour
self.minute = minute
self.used_bytes = used_bytes
|
toolkit/plugins/software/odex2jar.py | PleXone2019/IoT-Implant-Toolkit | 147 | 11078052 | <reponame>PleXone2019/IoT-Implant-Toolkit<filename>toolkit/plugins/software/odex2jar.py
#!/usr/bin/python3
'''
odex2jar class defination
not finish yet
'''
import os
from toolkit.core.basic import Plugin
class Odex2Jar(Plugin):
'''
inherit from class Plugin
'''
def __init__(self):
super().__init__(name = "odex2jar",
description = "odex to jar for Android",
classname = "Odex2Jar",
author = "<NAME>",
ref = "https://github.com/arthastang/IoT-Implant-Toolkit",
category = "Software Analysis",
usage = 'Run "run odex2jar --input [odex folder]" will convert odex file to jar file.Run "run odex2jar help" to see more parameters.')
self.argparser.add_argument("--input", help="input odex file")
self.argparser.add_argument("--output", default="./outputs/new.jar", help="output java file")
def execute(self):
#print("Run plugin with parameter {}".format(str(self.args)))
os.system("java -jar oat2dex.jar -o outputs/ odex {}".format(self.args.input))
dexname = self.args.input.replace("odex", "dex")
os.system("tookit/tools/dex-tools-2.1/d2j-dex2jar.sh {} -o {}".format(dexname, self.args.output))
|
experiments/bayesopt/run_cmaes_surrogate.py | lebrice/RoBO | 455 | 11078057 | import os
import sys
import cma
import json
import numpy as np
from robo.initial_design import init_random_uniform
from hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM
from hpolib.benchmarks.ml.surrogate_cnn import SurrogateCNN
from hpolib.benchmarks.ml.surrogate_fcnet import SurrogateFCNet
run_id = int(sys.argv[1])
benchmark = sys.argv[2]
n_iters = 50
output_path = "./experiments/RoBO/surrogates/"
if benchmark == "svm_mnist":
b = SurrogateSVM(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "cnn_cifar10":
b = SurrogateCNN(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "fcnet_mnist":
b = SurrogateFCNet(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
info = b.get_meta_information()
X = []
y = []
def wrapper(x):
X.append(x.tolist())
y_ = b.objective_function(x)['function_value']
y.append(y_)
return y_
# Dimension and bounds of the function
bounds = np.array(info['bounds'])
dimensions = len(bounds)
lower = bounds[:, 0]
upper = bounds[:, 1]
start_point = init_random_uniform(lower, upper, 1)[0]
# Evolution Strategy
es = cma.CMAEvolutionStrategy(start_point, 0.6, {'bounds': [lower, upper],
"maxfevals": n_iters})
es.optimize(wrapper, n_iters)
X = X[:n_iters]
y = y[:n_iters]
fvals = np.array(y)
incs = []
incumbent_val = []
curr_inc_val = sys.float_info.max
inc = None
for i, f in enumerate(fvals):
if curr_inc_val > f:
curr_inc_val = f
inc = X[i]
incumbent_val.append(curr_inc_val)
incs.append(inc)
# Offline Evaluation
test_error = []
runtime = []
cum_cost = 0
results = dict()
for i, inc in enumerate(incs):
y = b.objective_function_test(np.array(inc))["function_value"]
test_error.append(y)
# Compute the time it would have taken to evaluate this configuration
c = b.objective_function(np.array(X[i]))["cost"]
cum_cost += c
runtime.append(cum_cost)
# Estimate the runtime as the optimization overhead + estimated cost
results["runtime"] = runtime
results["test_error"] = test_error
results["method"] = "cmaes"
results["benchmark"] = benchmark
results["run_id"] = run_id
results["incumbents"] = incs
results["incumbent_values"] = incumbent_val
results["X"] = X
results["y"] = y
print(test_error)
p = os.path.join(output_path, benchmark, "cmaes")
os.makedirs(p, exist_ok=True)
fh = open(os.path.join(p, '%s_run_%d.json' % (benchmark, run_id)), 'w')
json.dump(results, fh)
|
tests/caliban/platform/gke/test_util.py | Anon-Artist/caliban | 425 | 11078060 | <filename>tests/caliban/platform/gke/test_util.py<gh_stars>100-1000
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""unit tests for gke utilities"""
import random
import unittest
from typing import List, Optional, Dict
from unittest import mock
import uuid
import json
import os
import tempfile
import yaml
import hypothesis.strategies as st
from hypothesis import given, settings
from kubernetes.client.api_client import ApiClient
from kubernetes.client import V1Job
import pytest
import argparse
import google
import caliban.platform.cloud.types as ct
import caliban.platform.gke.constants as k
import caliban.platform.gke.util as util
from caliban.platform.gke.types import NodeImage, OpStatus
from caliban.platform.gke.util import trap
# ----------------------------------------------------------------------------
def everything():
"""hypothesis utility to generate, well, everything"""
return st.from_type(type).flatmap(st.from_type)
# ----------------------------------------------------------------------------
def everything_except(excluded_types):
"""hypothesis utility to generate everything but the types in excluded_types"""
return everything().filter(lambda x: not isinstance(x, tuple(excluded_types)))
# ----------------------------------------------------------------------------
class UtilTestSuite(unittest.TestCase):
"""tests for caliban.platform.gke.util"""
# --------------------------------------------------------------------------
@given(
st.lists(st.integers(min_value=0, max_value=4),
min_size=len(ct.GPU),
max_size=len(ct.GPU)), st.sampled_from(ct.GPU),
st.integers(min_value=1, max_value=8))
def test_validate_gpu_spec_against_limits(
self,
limits: List[int],
gpu_type: ct.GPU,
count: int,
):
"""tests gpu validation against limits"""
gpu_list = [g for g in ct.GPU]
gpu_limits = dict([
(gpu_list[i], limits[i]) for i in range(len(limits)) if limits[i]
])
spec = ct.GPUSpec(gpu_type, count)
valid = util.validate_gpu_spec_against_limits(spec, gpu_limits, 'test')
if spec.gpu not in gpu_limits:
self.assertFalse(valid)
else:
self.assertTrue(valid == (spec.count <= gpu_limits[spec.gpu]))
return
# --------------------------------------------------------------------------
def test_validate_gpu_spec_against_limits_deterministic(self):
'''deterministic test to make sure we get full coverage'''
# gpu not supported
cfg = {
'gpu_spec': ct.GPUSpec(ct.GPU.K80, 1),
'gpu_limits': {
ct.GPU.P100: 1
},
'limit_type': 'zone',
}
assert not util.validate_gpu_spec_against_limits(**cfg)
# request above limit
cfg = {
'gpu_spec': ct.GPUSpec(ct.GPU.K80, 2),
'gpu_limits': {
ct.GPU.P100: 1,
ct.GPU.K80: 1,
},
'limit_type': 'zone',
}
assert not util.validate_gpu_spec_against_limits(**cfg)
# valid request
cfg = {
'gpu_spec': ct.GPUSpec(ct.GPU.K80, 1),
'gpu_limits': {
ct.GPU.P100: 1,
ct.GPU.K80: 1,
},
'limit_type': 'zone',
}
assert util.validate_gpu_spec_against_limits(**cfg)
# --------------------------------------------------------------------------
def test_nvidia_daemonset_url(self):
"""tests nvidia driver daemonset url generation"""
VALID_NODE_IMAGES = [NodeImage.COS, NodeImage.UBUNTU]
for n in NodeImage:
url = util.nvidia_daemonset_url(n)
if n in VALID_NODE_IMAGES:
self.assertIsNotNone(url)
else:
self.assertIsNone(url)
return
# --------------------------------------------------------------------------
@mock.patch('caliban.platform.gke.util.input', create=True)
@given(st.lists(st.from_regex('^[^yYnN]+$'), min_size=0, max_size=8))
def test_user_verify(
self,
mocked_input,
invalid_strings,
):
"""tests user verify method"""
# verify different defaults
for default in [True, False]:
# default input
mocked_input.side_effect = ['']
self.assertEqual(util.user_verify('test default', default=default),
default)
# upper/lower true input
for x in ['y', 'Y']:
mocked_input.side_effect = invalid_strings + [x]
self.assertTrue(util.user_verify('y input', default=default))
# upper/lower false input
for x in ['n', 'N']:
mocked_input.side_effect = invalid_strings + [x]
self.assertFalse(util.user_verify('n input', default=default))
return
# --------------------------------------------------------------------------
@given(everything())
def test_trap(self, return_val):
"""tests trap decorator"""
def _raises():
raise Exception('exception!')
# make sure the test function works..testing the tester
with self.assertRaises(Exception) as e:
_raises()
valid_return = '42'
def _no_raise():
return valid_return
# ibid
self.assertEqual(valid_return, _no_raise())
print('testing return_val = {}'.format(return_val))
@trap(return_val)
def _test_raises():
_raises()
@trap(return_val)
def _test_no_raise():
return _no_raise()
# test for types where we can't test equality
try:
if return_val != return_val:
return
except:
return
if valid_return == return_val:
return
self.assertEqual(return_val, _test_raises())
self.assertEqual(valid_return, _test_no_raise())
return
# --------------------------------------------------------------------------
@given(
st.lists(st.sampled_from(list(OpStatus)), min_size=1, max_size=8),
st.sets(st.sampled_from(list(OpStatus)),
min_size=1,
max_size=len(OpStatus)),
st.sets(st.from_regex('\A_[a-zA-Z0-9]+\Z'), min_size=1, max_size=4),
)
@settings(deadline=1000) # in ms
def test_wait_for_operation(self, results, conds, invalid_cond):
"""tests wait_for_operation method"""
class mock_api:
def projects(self):
return self
def locations(self):
return self
def operations(self):
return self
def get(self, name):
return self
def _raises():
raise Exception('exception')
def _return_results():
for r in results:
yield {'status': r.value}
_raises()
api = mock_api()
api.execute = _raises
# we run without the wait spinner here as it causes the tests
# to take about a factor of 100 longer
# empty condition list
self.assertIsNone(util.wait_for_operation(api, 'name', [], spinner=False))
# exception
self.assertIsNone(
util.wait_for_operation(api, 'name', list(conds), 0, spinner=False))
# normal operation
rsp_generator = _return_results()
api.execute = lambda: next(rsp_generator)
expected_response = None
for r in results:
if r in conds:
expected_response = r.value
break
if expected_response is not None:
self.assertEqual({'status': expected_response},
util.wait_for_operation(api,
'name',
list(conds),
0,
spinner=True))
else:
self.assertIsNone(
util.wait_for_operation(api, 'name', list(conds), 0, spinner=False))
return
# --------------------------------------------------------------------------
@given(
st.sets(
st.tuples(st.integers(min_value=1, max_value=32),
st.sampled_from(ct.TPU))),
st.sets(st.from_regex('\A_[a-z0-9]+-[0-9]+\Z')),
)
def test_get_zone_tpu_types(self, tpu_types, invalid_types):
"""tests get_zone_tpu_types"""
tpus = ['{}-{}'.format(x[1].name.lower(), x[0]) for x in tpu_types]
invalid_types = list(invalid_types)
responses = tpus + invalid_types
random.shuffle(responses)
class mock_api:
def projects(self):
return self
def locations(self):
return self
def acceleratorTypes(self):
return self
def list(self, parent):
return self
def _raises():
raise Exception('exception')
def _response():
return {'acceleratorTypes': [{'type': x} for x in responses]}
def _invalid_response():
return {'foo': 'bar'}
api = mock_api()
# exception handling
api.execute = _raises
self.assertIsNone(util.get_zone_tpu_types(api, 'p', 'z'))
# invalid response
api.execute = _invalid_response
self.assertIsNone(util.get_zone_tpu_types(api, 'p', 'z'))
# normal mode
api.execute = _response
self.assertEqual(
sorted(tpus),
sorted([
'{}-{}'.format(x.name.lower(), x.count)
for x in util.get_zone_tpu_types(api, 'p', 'z')
]))
return
# --------------------------------------------------------------------------
@given(st.text())
def test_sanitize_job_name(self, job_name):
"""test job name sanitizer"""
def valid(x):
return k.DNS_1123_RE.match(x) is not None
sanitized = util.sanitize_job_name(job_name)
if valid(job_name):
self.assertEqual(job_name, sanitized)
else:
self.assertTrue(valid(sanitized))
# idempotency check
self.assertEqual(sanitized, util.sanitize_job_name(sanitized))
# ensure coverage, first char must be alnum, last must be alnum
x = '_' + sanitized + '-'
assert valid(util.sanitize_job_name(x))
return
# --------------------------------------------------------------------------
@given(
st.lists(st.integers(min_value=0, max_value=32),
min_size=len(ct.GPU),
max_size=len(ct.GPU)),
st.sets(
st.tuples(st.from_regex('\A[a-z0-9]+\Z'),
st.integers(min_value=1, max_value=32))),
)
def test_get_zone_gpu_types(self, gpu_counts, invalid_types):
"""tests get_zone_gpu_types"""
gpu_types = ['nvidia-tesla-{}'.format(x.name.lower()) for x in ct.GPU]
gpus = [{
'name': gpu_types[i],
'maximumCardsPerInstance': c
} for i, c in enumerate(gpu_counts) if c > 0]
invalid = [{
'name': x[0],
'maximumCardsPerInstance': x[1]
} for x in invalid_types]
class mock_api:
def acceleratorTypes(self):
return self
def list(self, project, zone):
return self
def _raises():
raise Exception('exception')
def _response():
return {'items': gpus + invalid}
def _invalid_response():
return {'foo': 'bar'}
api = mock_api()
# exception handling
api.execute = _raises
self.assertIsNone(util.get_zone_gpu_types(api, 'p', 'z'))
# invalid response
api.execute = _invalid_response
self.assertIsNone(util.get_zone_gpu_types(api, 'p', 'z'))
# normal execution
api.execute = _response
self.assertEqual(
sorted([
'{}-{}'.format(x["name"], x["maximumCardsPerInstance"])
for x in gpus
]),
sorted([
'nvidia-tesla-{}-{}'.format(x.gpu.name.lower(), x.count)
for x in util.get_zone_gpu_types(api, 'p', 'z')
]))
return
# --------------------------------------------------------------------------
def test_get_region_quotas(self):
"""tests get region quotas"""
class mock_api:
def regions(self):
return self
def get(self, project, region):
return self
def _raises():
raise Exception('exception')
def _normal():
return {
'quotas': [{
'limit': 4,
'metric': 'CPUS',
'usage': 1
}, {
'limit': 1024,
'metric': 'NVIDIA_K80_GPUS',
'usage': 0
}]
}
def _invalid():
return {'foo': 'bar'}
api = mock_api()
# exception handling
api.execute = _raises
self.assertIsNone(util.get_region_quotas(api, 'p', 'r'))
# invalid return
api.execute = _invalid
self.assertEqual([], util.get_region_quotas(api, 'p', 'r'))
# normal execution
api.execute = _normal
self.assertEqual(_normal()['quotas'], util.get_region_quotas(api, 'p', 'r'))
return
# --------------------------------------------------------------------------
def test_generate_resource_limits(self):
"""tests generation of resource limits"""
class mock_api:
def regions(self):
return self
def get(self, project, region):
return self
def _raises():
raise Exception('exception')
def _normal():
return {
'quotas': [{
'limit': 4,
'metric': 'CPUS',
'usage': 1
}, {
'limit': 1024,
'metric': 'NVIDIA_K80_GPUS',
'usage': 0
}]
}
def _invalid():
return {'foo': 'bar'}
api = mock_api()
# exception handling
api.execute = _raises
self.assertIsNone(util.generate_resource_limits(api, 'p', 'r'))
# invalid return
api.execute = _invalid
self.assertEqual([], util.generate_resource_limits(api, 'p', 'r'))
# normal execution
api.execute = _normal
quotas = _normal()['quotas']
expected = ([{
'resourceType': 'cpu',
'maximum': str(quotas[0]['limit'])
}] + [{
'resourceType': 'memory',
'maximum': str(quotas[0]['limit'] * k.MAX_GB_PER_CPU)
}] + [{
'resourceType': 'nvidia-tesla-k80',
'maximum': str(quotas[1]['limit'])
}])
self.assertEqual(expected, util.generate_resource_limits(api, 'p', 'r'))
return
# --------------------------------------------------------------------------
@given(st.lists(st.from_regex('[a-zA-Z0-9]+')),
st.from_regex('_[a-zA-Z0-9]+'))
def test_get_gke_cluster(self, names, invalid):
"""test getting gke cluster"""
class mock_cluster:
def __init__(self, name):
self.name = name
return
class mock_cluster_list:
def __init__(self):
self.clusters = [mock_cluster(x) for x in names]
return
class mock_api:
self.throws = False
def list_clusters(self, project_id, zone):
if self.throws:
raise Exception('exception')
return mock_cluster_list()
api = mock_api()
api.throws = True
# exception handling
self.assertIsNone(util.get_gke_cluster(api, 'foo', 'p'))
api.throws = False
# single cluster
if len(names) > 0:
cname = names[random.randint(0, len(names) - 1)]
self.assertEqual(cname, util.get_gke_cluster(api, cname, 'p').name)
# name not in name list
self.assertIsNone(util.get_gke_cluster(api, invalid, 'p'))
return
# --------------------------------------------------------------------------
def _validate_nonnull_dict(self, d: dict, ref: dict):
"""helper method for testing nonnull_dict, nonnull_list"""
for k, v in d.items():
self.assertIsNotNone(v)
self.assertTrue(k in ref)
self.assertEqual(type(v), type(ref[k]))
if trap(True)(lambda z: z != z)(v):
continue
elif type(v) == dict:
self._validate_nonnull_dict(v, ref[k])
elif type(v) == list:
self._validate_nonnull_list(v, ref[k])
else:
self.assertEqual(v, ref[k])
# --------------------------------------------------------------------------
def _validate_nonnull_list(self, lst: list, ref: list):
"""helper method for testing nonnull_dict, nonnull_list"""
ref = [x for x in ref if x is not None]
self.assertEqual(len(lst), len(ref))
for i, x in enumerate(lst):
self.assertIsNotNone(x)
self.assertEqual(type(x), type(ref[i]))
if trap(True)(lambda z: z != z)(x):
continue
elif type(x) == list:
self._validate_nonnull_list(x, ref[i])
elif type(x) == dict:
self._validate_nonnull_dict(x, ref[i])
else:
self.assertEqual(x, ref[i])
# --------------------------------------------------------------------------
@given(st.dictionaries(
keys=st.from_regex('\A[a-z]+\Z'),
values=everything(),
))
def test_nonnull_dict(self, input_dict):
input_dict[str(uuid.uuid1())] = {'x': None, 'y': 7} # ensure coverage
input_dict[str(uuid.uuid1())] = [1, 2, None, 3]
self._validate_nonnull_dict(util.nonnull_dict(input_dict), input_dict)
return
# --------------------------------------------------------------------------
@given(st.lists(everything()))
def test_nonnull_list(self, input_list):
input_list.append({'x': None, 'y': 7}) # ensure coverage
input_list.append([1, 2, None, 3]) # ensure coverage
self._validate_nonnull_list(util.nonnull_list(input_list), input_list)
return
# --------------------------------------------------------------------------
@given(
st.sampled_from(
list(set.union(ct.US_REGIONS, ct.EURO_REGIONS, ct.ASIA_REGIONS))),
st.sets(st.from_regex('\A[a-z]\Z')))
def test_get_zones_in_region(self, region, zone_ids):
'''test get_zones_in_region'''
class mock_api:
def regions(self):
return self
def get(self, project, region):
return self
def _raises():
raise Exception('exception')
url = 'https://www.googleapis.com/compute/v1/projects/foo/zones/'
zones = ['{}-{}'.format(region, x) for x in zone_ids]
def _normal():
return {'zones': ['{}{}'.format(url, x) for x in zones]}
def _invalid():
return {'foo': 'bar'}
api = mock_api()
# exception handling
api.execute = _raises
self.assertIsNone(util.get_zones_in_region(api, 'p', region))
# invalid return
api.execute = _invalid
self.assertIsNone(util.get_zones_in_region(api, 'p', region))
# normal execution
api.execute = _normal
self.assertEqual(zones, util.get_zones_in_region(api, 'p', region))
# ----------------------------------------------------------------------------
def test_dashboard_cluster_url():
cfg = {
'cluster_id': 'foo',
'zone': 'us-central1-a',
'project_id': 'bar',
}
url = util.dashboard_cluster_url(**cfg)
assert url is not None
assert url == (f'{k.DASHBOARD_CLUSTER_URL}/{cfg["zone"]}/{cfg["cluster_id"]}'
f'?project={cfg["project_id"]}')
# ----------------------------------------------------------------------------
def test_get_tpu_drivers(monkeypatch):
class MockApi():
def __init__(self,
drivers: Optional[Dict[str, Dict[str, List[str]]]] = None):
self._drivers = drivers
def projects(self):
return self
def locations(self):
return self
def tensorflowVersions(self):
return self
def list(self, parent):
return self
def execute(self):
return self._drivers
# test no response behavior
cfg = {'tpu_api': MockApi(), 'project_id': 'foo', 'zone': 'us-central1-a'}
assert util.get_tpu_drivers(**cfg) is None
# test valid response
drivers = ['foo', 'bar']
cfg['tpu_api'] = MockApi(
drivers={'tensorflowVersions': [{
'version': x
} for x in drivers]})
assert util.get_tpu_drivers(**cfg) == drivers
# ----------------------------------------------------------------------------
def test_resource_limits_from_quotas():
# valid, all quota > 0
counts = {'cpu': 1, 'nvidia-tesla-p100': 2, 'memory': k.MAX_GB_PER_CPU}
quotas = [('CPUS', counts['cpu']),
('NVIDIA_P100_GPUS', counts['nvidia-tesla-p100']), ('bogus', 5)]
cfg = {'quotas': [{'metric': x[0], 'limit': x[1]} for x in quotas]}
q = util.resource_limits_from_quotas(**cfg)
assert len(q) == len(counts)
for d in q:
assert counts[d['resourceType']] == int(d['maximum'])
# valid, gpu quota == 0
counts = {'cpu': 1, 'nvidia-tesla-p100': 0, 'memory': k.MAX_GB_PER_CPU}
quotas = [('CPUS', counts['cpu']),
('NVIDIA_P100_GPUS', counts['nvidia-tesla-p100'])]
cfg = {'quotas': [{'metric': x[0], 'limit': x[1]} for x in quotas]}
q = util.resource_limits_from_quotas(**cfg)
assert len(q) == len(counts) - 1
for d in q:
assert d['resourceType'] != 'nvidia-tesla-p100'
assert counts[d['resourceType']] == int(d['maximum'])
# ----------------------------------------------------------------------------
def test_job_to_dict():
j = V1Job(api_version='abc', kind='foo')
d = util.job_to_dict(j)
assert d is not None
assert isinstance(d, dict)
assert d == ApiClient().sanitize_for_serialization(j)
# ----------------------------------------------------------------------------
def test_job_str():
j = V1Job(api_version='abc', kind='foo')
s = util.job_str(j)
assert s is not None
assert isinstance(s, str)
# ----------------------------------------------------------------------------
def test_validate_job_filename():
for x in k.VALID_JOB_FILE_EXT:
fname = str(uuid.uuid1()) + f'.{x}'
s = util.validate_job_filename(fname)
assert s == fname
with pytest.raises(argparse.ArgumentTypeError):
fname = str(uuid.uuid1()) + '.' + str(uuid.uuid1())
util.validate_job_filename(fname)
# ----------------------------------------------------------------------------
def test_export_job():
with tempfile.TemporaryDirectory() as tmpdir:
j = V1Job(api_version='abc', kind='foo')
nnd = util.nonnull_dict(util.job_to_dict(j))
fname = os.path.join(tmpdir, 'foo.json')
assert util.export_job(j, fname)
assert os.path.exists(fname)
with open(fname, 'r') as f:
x = json.load(f)
assert x == nnd
fname = os.path.join(tmpdir, 'foo.yaml')
assert util.export_job(j, fname)
assert os.path.exists(fname)
with open(fname, 'r') as f:
x = yaml.load(f)
assert x == nnd
fname = os.path.join(tmpdir, 'foo.xyz')
assert not util.export_job(j, fname)
# ----------------------------------------------------------------------------
def test_application_default_credentials_path(monkeypatch):
adc = 'foo'
# monkeypatch can't set things in underscore-prefixed modules, so we
# cheat a bit here
monkeypatch.setattr(util, 'get_application_default_credentials_path',
lambda: adc)
assert util.application_default_credentials_path() == adc
# ----------------------------------------------------------------------------
def test_default_credentials(monkeypatch):
class MockCreds():
def refresh(self, req):
pass
creds = MockCreds()
project_id = 'project-foo'
def mock_default(scopes):
return (creds, project_id)
monkeypatch.setattr(google.auth, 'default', mock_default)
monkeypatch.setattr(google.auth.transport.requests, 'Request', lambda: None)
cd = util.default_credentials()
assert cd.credentials == creds
assert cd.project_id == project_id
# ----------------------------------------------------------------------------
def test_credentials_from_file(monkeypatch):
class MockCreds():
def refresh(self, req):
pass
creds = MockCreds()
project_id = 'foo-project'
def mock_from_service_account_file(f, scopes):
return creds
def mock_load_credentials_from_file(f):
return (creds, project_id)
monkeypatch.setattr(google.auth.transport.requests, 'Request', lambda: None)
monkeypatch.setattr(google.oauth2.service_account.Credentials,
'from_service_account_file',
mock_from_service_account_file)
# ugh, I feel dirty, but monkeypatching google.auth._default.load_credentials_from_file
# doesn't work
monkeypatch.setattr(util, 'load_credentials_from_file',
mock_load_credentials_from_file)
# test service account file
creds_type = util._SERVICE_ACCOUNT_TYPE
with tempfile.TemporaryDirectory() as tmpdir:
creds_dict = {'type': creds_type, 'project_id': project_id}
creds_file = os.path.join(tmpdir, 'creds.json')
with open(creds_file, 'w') as f:
json.dump(creds_dict, f)
cd = util.credentials_from_file(creds_file)
assert cd.credentials == creds
assert cd.project_id == project_id
# test authorized user file
creds_type = util._AUTHORIZED_USER_TYPE
with tempfile.TemporaryDirectory() as tmpdir:
creds_dict = {'type': creds_type, 'project_id': project_id}
creds_file = os.path.join(tmpdir, 'creds.json')
with open(creds_file, 'w') as f:
json.dump(creds_dict, f)
cd = util.credentials_from_file(creds_file)
assert cd.credentials == creds
assert cd.project_id == project_id
# test invalid file
creds_type = str(uuid.uuid1())
with tempfile.TemporaryDirectory() as tmpdir:
creds_dict = {'type': creds_type, 'project_id': project_id}
creds_file = os.path.join(tmpdir, 'creds.json')
with open(creds_file, 'w') as f:
json.dump(creds_dict, f)
cd = util.credentials_from_file(creds_file)
assert cd.credentials is None
assert cd.project_id is None
# ----------------------------------------------------------------------------
def test_credentials(monkeypatch):
class MockCreds():
def refresh(self, req):
pass
creds = MockCreds()
project_id = 'project-foo'
def mock_default(scopes):
return (creds, project_id)
def mock_from_service_account_file(f, scopes):
return creds
monkeypatch.setattr(google.auth, 'default', mock_default)
monkeypatch.setattr(google.auth.transport.requests, 'Request', lambda: None)
monkeypatch.setattr(google.oauth2.service_account.Credentials,
'from_service_account_file',
mock_from_service_account_file)
# test default creds
cd = util.credentials()
assert cd.credentials == creds
assert cd.project_id == project_id
# test creds file
creds_type = util._SERVICE_ACCOUNT_TYPE
with tempfile.TemporaryDirectory() as tmpdir:
creds_dict = {'type': creds_type, 'project_id': project_id}
creds_file = os.path.join(tmpdir, 'creds.json')
with open(creds_file, 'w') as f:
json.dump(creds_dict, f)
cd = util.credentials(creds_file)
assert cd.credentials == creds
assert cd.project_id == project_id
# ----------------------------------------------------------------------------
def test_parse_job_file():
# test invalid file extension
with tempfile.TemporaryDirectory() as tmpdir:
cfg = {'foo': 1, 'bar': '2'}
fname = os.path.join(tmpdir, f'job.{str(uuid.uuid1())}')
with open(fname, 'w') as f:
json.dump(cfg, f)
assert os.path.exists(fname)
d = util.parse_job_file(fname)
assert d is None
# test missing file
d = util.parse_job_file(f'{str(uuid.uuid1())}.json')
assert d is None
# test json file
with tempfile.TemporaryDirectory() as tmpdir:
cfg = {'foo': 1, 'bar': '2'}
fname = os.path.join(tmpdir, f'job.json')
with open(fname, 'w') as f:
json.dump(cfg, f)
assert os.path.exists(fname)
d = util.parse_job_file(fname)
assert d == cfg
# test yaml file
with tempfile.TemporaryDirectory() as tmpdir:
cfg = {'foo': 1, 'bar': '2'}
fname = os.path.join(tmpdir, f'job.yaml')
with open(fname, 'w') as f:
yaml.dump(cfg, f)
assert os.path.exists(fname)
d = util.parse_job_file(fname)
assert d == cfg
# test bad formatting
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, f'job.json')
with open(fname, 'w') as f:
f.write('this is invalid json')
assert os.path.exists(fname)
d = util.parse_job_file(fname)
assert d is None
|
armi/utils/densityTools.py | celikten/armi | 162 | 11078061 | <filename>armi/utils/densityTools.py
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List, Dict
from armi.nucDirectory import nucDir, nuclideBases, elements
from armi.utils import units
from armi import runLog
def getNDensFromMasses(rho, massFracs, normalize=False):
"""
Convert density (g/cc) and massFracs vector into a number densities vector (#/bn-cm).
Parameters
----------
rho : float
density in (g/cc)
massFracs : dict
vector of mass fractions -- normalized to 1 -- keyed by their nuclide
name
Returns
-------
numberDensities : dict
vector of number densities (#/bn-cm) keyed by their nuclide name
"""
if normalize:
massFracs = normalizeNuclideList(massFracs, normalization=normalize)
numberDensities = {}
rho = rho * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
for nucName, massFrac in massFracs.items():
atomicWeight = nuclideBases.byName[nucName].weight
numberDensities[nucName] = massFrac * rho / atomicWeight
return numberDensities
def getMassFractions(numberDensities):
"""
Convert number densities (#/bn-cm) into mass fractions.
Parameters
----------
numberDensities : dict
number densities (#/bn-cm) keyed by their nuclide name
Returns
-------
massFracs : dict
mass fractions -- normalized to 1 -- keyed by their nuclide
name
"""
nucMassFracs = {}
totalWeight = 0.0
for nucName, numDensity in numberDensities.items():
weightI = numDensity * nucDir.getAtomicWeight(nucName)
nucMassFracs[nucName] = weightI # will be normalized at end
totalWeight += weightI
if totalWeight != 0:
for nucName in numberDensities:
nucMassFracs[nucName] /= totalWeight
else:
for nucName in numberDensities:
nucMassFracs[nucName] = 0.0
return nucMassFracs
def calculateMassDensity(numberDensities):
"""
Calculates the mass density.
Parameters
----------
numberDensities : dict
vector of number densities (atom/bn-cm) indexed by nuclides names
Returns
-------
rho : float
density in (g/cc)
"""
rho = 0
for nucName, nDensity in numberDensities.items():
atomicWeight = nuclideBases.byName[nucName].weight
rho += nDensity * atomicWeight / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
return rho
def calculateNumberDensity(nucName, mass, volume):
"""
Calculates the number density.
Parameters
----------
mass : float
volume : volume
nucName : armi nuclide name -- e.g. 'U235'
Returns
-------
number density : float
number density (#/bn-cm)
See Also
--------
armi.reactor.blocks.Block.setMass
"""
A = nucDir.getAtomicWeight(nucName)
try:
return units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * mass / (volume * A)
except ZeroDivisionError:
if mass == 0 and volume == 0:
return 0
raise ValueError(
"Could not calculate number density with input.\n"
"mass : {}\nvolume : {}\natomic weight : {}\n".format(mass, volume, A)
)
def getMassInGrams(nucName, volume, numberDensity=None):
"""
Gets mass of a nuclide of a known volume and know number density.
Parameters
----------
nucName : str
name of nuclide -- e.g. 'U235'
volume : float
volume in (cm3)
numberDensity : float
number density in (at/bn-cm)
Returns
-------
mass : float
mass of nuclide (g)
"""
if not numberDensity:
return 0.0
A = nucDir.getAtomicWeight(nucName)
return numberDensity * volume * A / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM
def formatMaterialCard(
densities,
matNum=0,
minDens=1e-15,
sigFigs=8,
mcnp6Compatible=False,
mcnpLibrary=None,
):
"""
Formats nuclides and densities into a MCNP material card.
Parameters
----------
densities : dict
number densities indexed by nuclideBase
matNum : int
mcnp material number
minDens : float
minimum density
sigFigs : int
significant figures for the material card
Returns
-------
mCard : list
list of material card strings
"""
if all(
isinstance(nuc, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase))
for nuc in densities
):
return [] # no valid nuclides to write
mCard = ["m{matNum}\n".format(matNum=matNum)]
for nuc, dens in sorted(densities.items()):
# skip LFPs and Dummies.
if isinstance(nuc, (nuclideBases.LumpNuclideBase)):
runLog.important(
"The material card returned will ignore LFPs.", single=True
)
continue
elif isinstance(nuc, nuclideBases.DummyNuclideBase):
runLog.info("Omitting dummy nuclides such as {}".format(nuc), single=True)
continue
mcnpNucName = nuc.getMcnpId()
newEntry = (" {nucName:5d} {ndens:." + str(sigFigs) + "e}\n").format(
nucName=int(mcnpNucName), ndens=max(dens, minDens)
) # 0 dens is invalid
mCard.append(newEntry)
if mcnp6Compatible:
mCard.append(" nlib={lib}c\n".format(lib=mcnpLibrary))
return mCard
def filterNuclideList(nuclideVector, nuclides):
"""
Filter out nuclides not in the nuclide vector.
Parameters
----------
nuclideVector : dict
dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases
nuclides : list
list of nuclide identifiers
Returns
-------
nuclideVector : dict
dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases
"""
if not isinstance(list(nuclideVector.keys())[0], nuclides[0].__class__):
raise ValueError(
"nuclide vector is indexed by {} where as the nuclides list is {}".format(
nuclideVector.keys()[0].__class__, nuclides[0].__class__
)
)
for nucName in list(nuclideVector.keys()):
if nucName not in nuclides:
del nuclideVector[nucName]
return nuclideVector
def normalizeNuclideList(nuclideVector, normalization=1.0):
"""
normalize the nuclide vector.
Parameters
----------
nuclideVector : dict
dictionary of values -- e.g. floats, ints -- indexed by nuclide identifiers -- e.g. nucNames or nuclideBases
normalization : float
Returns
-------
nuclideVector : dict
dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases
"""
normalizationFactor = sum(nuclideVector.values()) / normalization
for nucName, mFrac in nuclideVector.items():
nuclideVector[nucName] = mFrac / normalizationFactor
return nuclideVector
def expandElementalMassFracsToNuclides(
massFracs: dict,
elementExpansionPairs: Tuple[elements.Element, List[nuclideBases.NuclideBase]],
):
"""
Expand elemental mass fractions to natural nuclides.
Modifies the input ``massFracs`` in place to contain nuclides.
Notes
-----
This indirectly updates number densities through mass fractions.
Parameters
----------
massFracs : dict(str, float)
dictionary of nuclide or element names with mass fractions.
Elements will be expanded in place using natural isotopics.
elementExpansionPairs : (Element, [NuclideBase]) pairs
element objects to expand (from nuclidBase.element) and list
of NuclideBases to expand into (or None for all natural)
"""
# expand elements
for element, isotopicSubset in elementExpansionPairs:
massFrac = massFracs.pop(element.symbol, None)
if massFrac is None:
continue
expandedNucs = expandElementalNuclideMassFracs(
element, massFrac, isotopicSubset
)
massFracs.update(expandedNucs)
total = sum(expandedNucs.values())
if massFrac > 0.0 and abs(total - massFrac) / massFrac > 1e-6:
raise ValueError(
"Mass fractions not normalized properly {}!".format((total, massFrac))
)
def expandElementalNuclideMassFracs(
element: elements.Element,
massFrac: dict,
isotopicSubset: List[nuclideBases.NuclideBase] = None,
):
"""
Return a dictionary of nuclide names to isotopic mass fractions.
If an isotopic subset is passed in, the mass fractions get scaled up
s.t. the total mass fraction remains constant.
Parameters
----------
element : Element
The element to expand to natural isotopics
massFrac : float
Mass fraction of the initial element
isotopicSubset : list of NuclideBases
Natural isotopes to include in the expansion. Useful e.g. for
excluding O18 from an expansion of Oxygen.
"""
elementNucBases = element.getNaturalIsotopics()
if isotopicSubset:
expandedNucBases = [nb for nb in elementNucBases if nb in isotopicSubset]
else:
expandedNucBases = elementNucBases
elementalWeightGperMole = sum(nb.weight * nb.abundance for nb in expandedNucBases)
if not any(expandedNucBases):
raise ValueError(
"Cannot expand element `{}` into isotopes: `{}`"
"".format(element, expandedNucBases)
)
expanded = {}
for nb in expandedNucBases:
expanded[nb.name] = (
massFrac * nb.abundance * nb.weight / elementalWeightGperMole
)
return expanded
def getChemicals(nuclideInventory):
"""
Groups the inventories of nuclides by their elements.
Parameters
----------
nuclideInventory : dict
nuclide inventories indexed by nuc -- either nucNames or nuclideBases
Returns
-------
chemicals : dict
inventory of elements indexed by element symbol -- e.g. 'U' or 'PU'
"""
chemicals = {}
for nuc, N in nuclideInventory.items():
nb = nuc if isinstance(nuc, nuclideBases.INuclide) else nuclideBases.byName[nuc]
if nb.element.symbol in chemicals:
chemicals[nb.element.symbol] += N
else:
chemicals[nb.element.symbol] = N
return chemicals
def applyIsotopicsMix(
material, enrichedMassFracs: Dict[str, float], fertileMassFracs: Dict[str, float]
):
"""
Update material heavy metal mass fractions based on its enrichment and two nuclide feeds.
This will remix the heavy metal in a Material object based on the object's
``class1_wt_frac`` parameter and the input nuclide information.
This can be used for inputting mixtures of two external custom isotopic feeds
as well as for fabricating assemblies from two closed-cycle collections
of material.
See Also
--------
armi.materials.material.FuelMaterial
Parameters
----------
material : material.Material
The object to modify. Must have a ``class1_wt_frac`` param set
enrichedMassFracs : dict
Nuclide names and weight fractions of the class 1 nuclides
fertileMassFracs : dict
Nuclide names and weight fractions of the class 2 nuclides
"""
total = sum(material.p.massFrac.values())
hm = 0.0
for nucName, massFrac in material.p.massFrac.items():
nb = nuclideBases.byName[nucName]
if nb.isHeavyMetal():
hm += massFrac
hmFrac = hm / total
hmEnrich = material.p.class1_wt_frac
for nucName in (
set(enrichedMassFracs.keys())
.union(set(fertileMassFracs.keys()))
.union(set(material.p.massFrac.keys()))
):
nb = nuclideBases.byName[nucName]
if nb.isHeavyMetal():
material.p.massFrac[nucName] = hmFrac * (
hmEnrich * enrichedMassFracs.get(nucName, 0.0)
+ (1 - hmEnrich) * fertileMassFracs.get(nucName, 0.0)
)
|
tests/server/scheduler/test_create.py | catherine917/cloudburst | 174 | 11078108 | # Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from anna.lattices import LWWPairLattice, SingleKeyCausalLattice
from cloudburst.server.scheduler.create import (
create_dag,
create_function,
delete_dag
)
from cloudburst.server.scheduler.policy.default_policy import (
DefaultCloudburstSchedulerPolicy
)
from cloudburst.server.scheduler.utils import get_pin_address, get_unpin_address
import cloudburst.server.utils as sutils
from cloudburst.shared.proto.cloudburst_pb2 import (
Dag,
Function,
GenericResponse,
NORMAL, MULTI, # Cloudburst's consistency modes
DAG_ALREADY_EXISTS, NO_RESOURCES, NO_SUCH_DAG # Cloudburst's error modes
)
from cloudburst.shared.proto.internal_pb2 import PinFunction
from cloudburst.shared.serializer import Serializer
from tests.mock import kvs_client, zmq_utils
from tests.server.utils import create_linear_dag
serializer = Serializer()
class TestSchedulerCreate(unittest.TestCase):
'''
This test suite ensures that the scheduler correctly creates individual
functions by storing them in the KVS and also correctly creates DAGs by
sending the correct pin messages to executors and updating the server-side
metadata when appropriate.
'''
def setUp(self):
self.pusher_cache = zmq_utils.MockPusherCache()
self.socket = zmq_utils.MockZmqSocket()
self.pin_socket = zmq_utils.MockZmqSocket()
self.kvs_client = kvs_client.MockAnnaClient()
self.ip = '127.0.0.1'
self.policy = DefaultCloudburstSchedulerPolicy(self.pin_socket,
self.pusher_cache,
self.kvs_client, self.ip,
policy='random',
random_threshold=0)
'''
INDIVIDUAL FUNCTION CREATION TESTS
'''
def test_create_function(self):
'''
This test creates a new function and checks that it is persisted in
the KVS in the expected format.
'''
# Create a new function message and add it to our socket.
def func(_, x): x + 1
function = Function()
function.name = 'function'
function.body = serializer.dump(func)
self.socket.inbox.append(function.SerializeToString())
# Call the function creation script.
create_function(self.socket, self.kvs_client, consistency=NORMAL)
# Check that the function was created correctly.
kvs_name = sutils.get_func_kvs_name(function.name)
result = self.kvs_client.get(kvs_name)
created = result[kvs_name]
self.assertTrue(created is not None)
self.assertEqual(type(created), LWWPairLattice)
created = serializer.load_lattice(created)
self.assertEqual(func('', 1), created('', 1))
def test_create_causal_function(self):
'''
This test creates a new function and checks that it is persisted in
the KVS in the expected format in causal mode.
'''
# Create a new function message and add it to our socket.
def func(_, x): x + 1
function = Function()
function.name = 'function'
function.body = serializer.dump(func)
self.socket.inbox.append(function.SerializeToString())
# Call the function creation script.
create_function(self.socket, self.kvs_client, consistency=MULTI)
# Check that the function was created correctly.
kvs_name = sutils.get_func_kvs_name(function.name)
result = self.kvs_client.get(kvs_name)
created = result[kvs_name]
self.assertTrue(created is not None)
self.assertEqual(type(created), SingleKeyCausalLattice)
self.assertEqual(created.vector_clock, sutils.DEFAULT_VC)
self.assertEqual(len(created.reveal()), 1)
created = serializer.load_lattice(created)[0]
self.assertEqual(func('', 1), created('', 1))
'''
DAG CREATION TESTS
'''
def test_create_dag(self):
'''
This test creates a new DAG, checking that the correct pin messages are
sent to executors and that it is persisted in the KVS correctly. It
also checks that the server metadata was updated as expected.
'''
# Create a simple two-function DAG and add it to the inbound socket.
source = 'source'
sink = 'sink'
dag_name = 'dag'
dag = create_linear_dag([None, None], [source, sink], self.kvs_client,
dag_name)
self.socket.inbox.append(dag.SerializeToString())
# Add relevant metadata to the policy engine.
address_set = {(self.ip, 1), (self.ip, 2)}
self.policy.unpinned_cpu_executors.update(address_set)
# Prepopulate the pin_accept socket with sufficient success messages.
self.pin_socket.inbox.append(sutils.ok_resp)
self.pin_socket.inbox.append(sutils.ok_resp)
# Call the DAG creation method.
dags = {}
call_frequency = {}
create_dag(self.socket, self.pusher_cache, self.kvs_client, dags,
self.policy, call_frequency)
# Test that the correct metadata was created.
self.assertTrue(dag_name in dags)
created, dag_source = dags[dag_name]
self.assertEqual(created, dag)
self.assertEqual(len(dag_source), 1)
self.assertEqual(list(dag_source)[0], source)
self.assertTrue(source in call_frequency)
self.assertTrue(sink in call_frequency)
self.assertEqual(call_frequency[source], 0)
self.assertEqual(call_frequency[sink], 0)
# Test that the DAG is stored in the KVS correctly.
result = self.kvs_client.get(dag_name)[dag_name]
created = Dag()
created.ParseFromString(result.reveal())
self.assertEqual(created, dag)
# Test that the correct response was returned to the user.
self.assertTrue(len(self.socket.outbox), 1)
response = GenericResponse()
response.ParseFromString(self.socket.outbox.pop())
self.assertTrue(response.success)
# Test that the correct pin messages were sent.
self.assertEqual(len(self.pusher_cache.socket.outbox), 2)
messages = self.pusher_cache.socket.outbox
function_set = {source, sink}
for message in messages:
pin_msg = PinFunction()
pin_msg.ParseFromString(message)
self.assertEqual(pin_msg.response_address, self.ip)
self.assertTrue(pin_msg.name in function_set)
function_set.discard(pin_msg.name)
self.assertEqual(len(function_set), 0)
for address in address_set:
self.assertTrue(get_pin_address(*address) in
self.pusher_cache.addresses)
# Test that the policy engine has the correct metadata stored.
self.assertEqual(len(self.policy.unpinned_cpu_executors), 0)
self.assertEqual(len(self.policy.pending_dags), 0)
self.assertTrue(source in self.policy.function_locations)
self.assertTrue(sink in self.policy.function_locations)
self.assertEqual(len(self.policy.function_locations[source]), 1)
self.assertEqual(len(self.policy.function_locations[sink]), 1)
def test_create_dag_already_exists(self):
'''
This test attempts to create a DAG that already exists and makes sure
that the server correctly rejects the request.
'''
# Create a simple two-function DAG and add it to the inbound socket.
source = 'source'
sink = 'sink'
dag_name = 'dag'
dag = create_linear_dag([None, None], [source, sink], self.kvs_client,
dag_name)
self.socket.inbox.append(dag.SerializeToString())
# Add this to the existing server metadata.
dags = {dag.name: (dag, {source})}
# Add relevant metadata to the policy engine.
address_set = {(self.ip, 1), (self.ip, 2)}
self.policy.unpinned_cpu_executors.update(address_set)
# Attempt to create the DAG.
call_frequency = {}
create_dag(self.socket, self.pusher_cache, self.kvs_client, dags,
self.policy, call_frequency)
# Check that an error was returned to the user.
self.assertEqual(len(self.socket.outbox), 1)
response = GenericResponse()
response.ParseFromString(self.socket.outbox[0])
self.assertFalse(response.success)
self.assertEqual(response.error, DAG_ALREADY_EXISTS)
# Check that no additional metadata was created or sent.
self.assertEqual(len(self.pusher_cache.socket.outbox), 0)
self.assertEqual(len(self.policy.unpinned_cpu_executors), 2)
self.assertEqual(len(self.policy.function_locations), 0)
self.assertEqual(len(self.policy.pending_dags), 0)
def test_create_dag_insufficient_resources(self):
'''
This test attempts to create a DAG even though there are not enough
free executors in the system. It checks that a pin message is attempted
to be sent, we run out of resources, and then the request is rejected.
We check that the metadata is properly restored back to its original
state.
'''
# Create a simple two-function DAG and add it to the inbound socket.
source = 'source'
sink = 'sink'
dag_name = 'dag'
dag = create_linear_dag([None, None], [source, sink], self.kvs_client,
dag_name)
self.socket.inbox.append(dag.SerializeToString())
# Add relevant metadata to the policy engine, but set the number of
# executors to fewer than needed.
address_set = {(self.ip, 1)}
self.policy.unpinned_cpu_executors.update(address_set)
# Prepopulate the pin_accept socket with sufficient success messages.
self.pin_socket.inbox.append(sutils.ok_resp)
# Attempt to create the DAG.
dags = {}
call_frequency = {}
create_dag(self.socket, self.pusher_cache, self.kvs_client, dags,
self.policy, call_frequency)
# Check that an error was returned to the user.
self.assertEqual(len(self.socket.outbox), 1)
response = GenericResponse()
response.ParseFromString(self.socket.outbox[0])
self.assertFalse(response.success)
self.assertEqual(response.error, NO_RESOURCES)
# Test that the correct pin messages were sent.
self.assertEqual(len(self.pusher_cache.socket.outbox), 2)
messages = self.pusher_cache.socket.outbox
# Checks for the pin message.
pin_msg = PinFunction()
pin_msg.ParseFromString(messages[0])
self.assertEqual(pin_msg.response_address, self.ip)
self.assertEqual(pin_msg.name, source)
# Checks for the unpin message.
self.assertEqual(messages[1], source)
address = random.sample(address_set, 1)[0]
addresses = self.pusher_cache.addresses
self.assertEqual(get_pin_address(*address), addresses[0])
self.assertEqual(get_unpin_address(*address), addresses[1])
# Check that no additional messages were sent.
self.assertEqual(len(self.policy.unpinned_cpu_executors), 0)
self.assertEqual(len(self.policy.function_locations), 0)
self.assertEqual(len(self.policy.pending_dags), 0)
# Check that no additional metadata was created or sent.
self.assertEqual(len(call_frequency), 0)
self.assertEqual(len(dags), 0)
def test_delete_dag(self):
'''
We attempt to delete a DAG that has already been created and check to
ensure that the correct unpin messages are sent to executors and that
the metadata is updated appropriately.
'''
# Create a simple two fucntion DAG and add it to the system metadata.
source = 'source'
sink = 'sink'
dag_name = 'dag'
dag = create_linear_dag([None, None], [source, sink], self.kvs_client,
dag_name)
dags = {}
call_frequency = {}
dags[dag.name] = (dag, {source})
call_frequency[source] = 100
call_frequency[sink] = 100
# Add the correct metadata to the policy engine.
source_location = (self.ip, 1)
sink_location = (self.ip, 2)
self.policy.function_locations[source] = {source_location}
self.policy.function_locations[sink] = {sink_location}
self.socket.inbox.append(dag.name)
# Attempt to delete the DAG.
delete_dag(self.socket, dags, self.policy, call_frequency)
# Check that the correct unpin messages were sent.
messages = self.pusher_cache.socket.outbox
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0], source)
self.assertEqual(messages[1], sink)
addresses = self.pusher_cache.addresses
self.assertEqual(len(addresses), 2)
self.assertEqual(addresses[0], get_unpin_address(*source_location))
self.assertEqual(addresses[1], get_unpin_address(*sink_location))
# Check that the server metadata was updated correctly.
self.assertEqual(len(dags), 0)
self.assertEqual(len(call_frequency), 0)
# Check that the correct message was sent to the user.
self.assertEqual(len(self.socket.outbox), 1)
response = GenericResponse()
response.ParseFromString(self.socket.outbox.pop())
self.assertTrue(response.success)
def test_delete_nonexistent_dag(self):
'''
This test attempts to delete a nonexistent DAG and ensures that no
metadata is affected by the failed operation.
'''
# Make a request to delete an unknown DAG.
self.socket.inbox.append('dag')
delete_dag(self.socket, {}, self.policy, {})
# Ensure that an error response is sent to the user.
self.assertEqual(len(self.socket.outbox), 1)
response = GenericResponse()
response.ParseFromString(self.socket.outbox[0])
self.assertFalse(response.success)
self.assertEqual(response.error, NO_SUCH_DAG)
# Check that no additional messages were sent and no metadata changed.
self.assertEqual(len(self.pusher_cache.socket.outbox), 0)
self.assertEqual(len(self.policy.function_locations), 0)
self.assertEqual(len(self.policy.unpinned_cpu_executors), 0)
def test_create_gpu_dag_no_resources(self):
# Create a simple two-function DAG and add it to the inbound socket.
dag_name = 'dag'
dag = create_linear_dag([None], ['fn'], self.kvs_client,
dag_name)
dag.functions[0].gpu = True
self.socket.inbox.append(dag.SerializeToString())
dags = {}
call_frequency = {}
create_dag(self.socket, self.pusher_cache, self.kvs_client, dags,
self.policy, call_frequency)
# Check that an error was returned to the user.
self.assertEqual(len(self.socket.outbox), 1)
response = GenericResponse()
response.ParseFromString(self.socket.outbox[0])
self.assertFalse(response.success)
self.assertEqual(response.error, NO_RESOURCES)
# Test that the correct pin messages were sent.
self.assertEqual(len(self.pusher_cache.socket.outbox), 0)
# Check that no additional messages were sent.
self.assertEqual(len(self.policy.unpinned_cpu_executors), 0)
self.assertEqual(len(self.policy.function_locations), 0)
self.assertEqual(len(self.policy.pending_dags), 0)
# Check that no additional metadata was created or sent.
self.assertEqual(len(call_frequency), 0)
self.assertEqual(len(dags), 0)
def test_create_gpu_dag(self):
# Create a simple two-function DAG and add it to the inbound socket.
dag_name = 'dag'
fn = 'fn'
dag = create_linear_dag([None], [fn], self.kvs_client,
dag_name)
dag.functions[0].gpu = True
self.socket.inbox.append(dag.SerializeToString())
dags = {}
call_frequency = {}
address_set = {(self.ip, 1)}
self.policy.unpinned_gpu_executors.update(address_set)
self.pin_socket.inbox.append(sutils.ok_resp)
create_dag(self.socket, self.pusher_cache, self.kvs_client, dags,
self.policy, call_frequency)
# Test that the correct metadata was created.
self.assertTrue(dag_name in dags)
created, dag_source = dags[dag_name]
self.assertEqual(created, dag)
self.assertEqual(len(dag_source), 1)
self.assertEqual(list(dag_source)[0], fn)
self.assertTrue(fn in call_frequency)
self.assertEqual(call_frequency[fn], 0)
# Test that the DAG is stored in the KVS correctly.
result = self.kvs_client.get(dag_name)[dag_name]
created = Dag()
created.ParseFromString(result.reveal())
self.assertEqual(created, dag)
# Test that the correct response was returned to the user.
self.assertTrue(len(self.socket.outbox), 1)
response = GenericResponse()
response.ParseFromString(self.socket.outbox.pop())
self.assertTrue(response.success)
# Test that the correct pin messages were sent.
self.assertEqual(len(self.pusher_cache.socket.outbox), 1)
messages = self.pusher_cache.socket.outbox
function_set = {fn}
for message in messages:
pin_msg = PinFunction()
pin_msg.ParseFromString(message)
self.assertEqual(pin_msg.response_address, self.ip)
self.assertTrue(pin_msg.name in function_set)
function_set.discard(pin_msg.name)
self.assertEqual(len(function_set), 0)
for address in address_set:
self.assertTrue(get_pin_address(*address) in
self.pusher_cache.addresses)
# Test that the policy engine has the correct metadata stored.
self.assertEqual(len(self.policy.unpinned_cpu_executors), 0)
self.assertEqual(len(self.policy.pending_dags), 0)
self.assertTrue(fn in self.policy.function_locations)
self.assertEqual(len(self.policy.function_locations[fn]), 1)
|
tests/test_provider_davidji99_ultradns.py | mjuenema/python-terrascript | 507 | 11078128 | <filename>tests/test_provider_davidji99_ultradns.py
# tests/test_provider_davidji99_ultradns.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:29:41 UTC)
def test_provider_import():
import terrascript.provider.davidji99.ultradns
def test_resource_import():
from terrascript.resource.davidji99.ultradns import ultradns_dirpool
from terrascript.resource.davidji99.ultradns import ultradns_probe_http
from terrascript.resource.davidji99.ultradns import ultradns_probe_ping
from terrascript.resource.davidji99.ultradns import ultradns_rdpool
from terrascript.resource.davidji99.ultradns import ultradns_record
from terrascript.resource.davidji99.ultradns import ultradns_tcpool
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.davidji99.ultradns
#
# t = terrascript.provider.davidji99.ultradns.ultradns()
# s = str(t)
#
# assert 'https://github.com/davidji99/terraform-provider-ultradns' in s
# assert '2.1.0' in s
|
src/sage/features/join_feature.py | UCD4IDS/sage | 1,742 | 11078152 | <gh_stars>1000+
r"""
Join features
"""
from . import Feature, FeatureTestResult
class JoinFeature(Feature):
r"""
Join of several :class:`~sage.features.Feature` instances.
EXAMPLES::
sage: from sage.features import Executable
sage: from sage.features.join_feature import JoinFeature
sage: F = JoinFeature("shell-boolean",
....: (Executable('shell-true', 'true'),
....: Executable('shell-false', 'false')))
sage: F.is_present()
FeatureTestResult('shell-boolean', True)
sage: F = JoinFeature("asdfghjkl",
....: (Executable('shell-true', 'true'),
....: Executable('xxyyyy', 'xxyyyy-does-not-exist')))
sage: F.is_present()
FeatureTestResult('xxyyyy', False)
"""
def __init__(self, name, features, spkg=None, url=None, description=None):
"""
TESTS:
The empty join feature is present::
sage: from sage.features.join_feature import JoinFeature
sage: JoinFeature("empty", ()).is_present()
FeatureTestResult('empty', True)
"""
if spkg is None:
spkgs = set(f.spkg for f in features if f.spkg)
if len(spkgs) > 1:
raise ValueError('given features have more than one spkg; provide spkg argument')
elif len(spkgs) == 1:
spkg = next(iter(spkgs))
if url is None:
urls = set(f.url for f in features if f.url)
if len(urls) > 1:
raise ValueError('given features have more than one url; provide url argument')
elif len(urls) == 1:
url = next(iter(urls))
super().__init__(name, spkg=spkg, url=url, description=description)
self._features = features
def _is_present(self):
r"""
Test for the presence of the join feature.
EXAMPLES::
sage: from sage.features.latte import Latte
sage: Latte()._is_present() # optional - latte_int
FeatureTestResult('latte_int', True)
"""
for f in self._features:
test = f._is_present()
if not test:
return test
return FeatureTestResult(self, True)
def is_functional(self):
r"""
Test whether the join feature is functional.
EXAMPLES::
sage: from sage.features.latte import Latte
sage: Latte().is_functional() # optional - latte_int
FeatureTestResult('latte_int', True)
"""
for f in self._features:
test = f.is_functional()
if not test:
return test
return FeatureTestResult(self, True)
|
tests/compat.py | azmeuk/webtest | 239 | 11078175 | import unittest # noqa
try:
unicode()
except NameError:
b = bytes
def u(value):
if isinstance(value, bytes):
return value.decode('utf-8')
return value
else:
def b(value):
return str(value)
def u(value):
if isinstance(value, unicode):
return value
return unicode(value, 'utf-8')
|
bindings/python/cntk/train/__init__.py | shyamalschandra/CNTK | 17,702 | 11078180 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
"""
Utilities for training.
"""
from .trainer import *
from .training_session import *
from .distributed import *
|
tests/torch/nn/parallel/tensor_parallel/_parallel_1d/_utils.py | lipovsek/oslo | 249 | 11078183 | <reponame>lipovsek/oslo
import torch
import torch.distributed as dist
from oslo.torch.distributed import ParallelMode
def split_1d(parallel_context, tensor, world_size, dim):
tensor = tensor.chunk(world_size, dim=dim)[
parallel_context.get_local_rank(ParallelMode.TENSOR_1D)
]
return tensor
def gather_1d(parallel_context, tensor, world_size, dim):
tensor_list = [torch.zeros_like(tensor) for _ in range(world_size)]
dist.all_gather(
tensor_list,
tensor.contiguous(),
parallel_context.get_group(ParallelMode.TENSOR_1D),
)
tensor = torch.cat(tensor_list, dim=dim)
return tensor
|
tests/resources/_kktpm/generate.py | jarreguit/pymoo | 762 | 11078184 | import numpy as np
from pymoo.factory import get_problem
from pymoo.optimize import minimize
def generate_test_data():
for str_problem in ["osy"]:
problem = get_problem(str_problem)
X = []
# define a callback function that prints the X and F value of the best individual
def my_callback(algorithm):
pop = algorithm.pop
_X = pop.get("X")[np.random.permutation(len(pop))[:10]]
X.append(_X)
minimize(problem,
method='nsga2',
method_args={'pop_size': 100},
termination=('n_gen', 100),
callback=my_callback,
pf=problem.pareto_front(),
disp=True,
seed=1)
np.savetxt("%s.x" % str_problem, np.concatenate(X, axis=0), delimiter=",")
generate_test_data()
|
spyne/util/tdict.py | edustaff/spyne | 786 | 11078212 | <reponame>edustaff/spyne
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The typed dict module"""
from itertools import chain
class tdict(dict):
def __init__(self, kt=None, vt=None, data=None):
"""This is a typed dict implementation that optionally enforces given
types on contained values on assignment."""
self._kt = kt
self._vt = vt
if kt is None and vt is None:
self.check = self._check_noop
elif kt is None:
self.check = self._check_v
elif vt is None:
self.check = self._check_k
else:
self.check = self._check_kv
if data is not None:
self.update(data)
def _check_noop(self, *_):
pass
def _check_k(self, key, _):
if not isinstance(key, self._kt):
raise TypeError(repr(key))
def _check_v(self, _, value):
if not isinstance(value, self._vt):
raise TypeError(repr(value))
def _check_kv(self, key, value):
if not isinstance(key, self._kt):
raise TypeError(repr(key))
if not isinstance(value, self._vt):
raise TypeError(repr(value))
def __setitem__(self, key, value):
self.check(key, value)
super(tdict, self).__setitem__(key, value)
def update(self, E=None, **F):
try:
it = chain(E.items(), F.items())
except AttributeError:
it = chain(E, F)
for k, v in it:
self[k] = v
def setdefault(self, k, d=None):
self._check_k(k, d) if self._kt is None else None
self._check_v(k, d) if self._vt is None else None
super(tdict, self).setdefault(k, d)
@classmethod
def fromkeys(cls, S, v=None):
kt = vt = None
if len(S) > 0:
kt, = set((type(s) for s in S))
if v is not None:
vt = type(v)
retval = tdict(kt, vt)
for s in S:
retval[s] = v
return retval
def repr(self):
return "tdict(kt=%s, vt=%s, data=%s)" % \
(self._kt, self._vt, super(tdict, self).__repr__())
|
tests/components/abode/test_switch.py | andersop91/core | 22,481 | 11078230 | """Tests for the Abode switch device."""
from unittest.mock import patch
from homeassistant.components.abode import (
DOMAIN as ABODE_DOMAIN,
SERVICE_TRIGGER_AUTOMATION,
)
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers import entity_registry as er
from .common import setup_platform
AUTOMATION_ID = "switch.test_automation"
AUTOMATION_UID = "47fae27488f74f55b964a81a066c3a01"
DEVICE_ID = "switch.test_switch"
DEVICE_UID = "0012a4d3614cb7e2b8c9abea31d2fb2a"
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, SWITCH_DOMAIN)
entity_registry = er.async_get(hass)
entry = entity_registry.async_get(AUTOMATION_ID)
assert entry.unique_id == AUTOMATION_UID
entry = entity_registry.async_get(DEVICE_ID)
assert entry.unique_id == DEVICE_UID
async def test_attributes(hass):
"""Test the switch attributes are correct."""
await setup_platform(hass, SWITCH_DOMAIN)
state = hass.states.get(DEVICE_ID)
assert state.state == STATE_OFF
async def test_switch_on(hass):
"""Test the switch can be turned on."""
await setup_platform(hass, SWITCH_DOMAIN)
with patch("abodepy.AbodeSwitch.switch_on") as mock_switch_on:
assert await hass.services.async_call(
SWITCH_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_on.assert_called_once()
async def test_switch_off(hass):
"""Test the switch can be turned off."""
await setup_platform(hass, SWITCH_DOMAIN)
with patch("abodepy.AbodeSwitch.switch_off") as mock_switch_off:
assert await hass.services.async_call(
SWITCH_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_off.assert_called_once()
async def test_automation_attributes(hass):
"""Test the automation attributes are correct."""
await setup_platform(hass, SWITCH_DOMAIN)
state = hass.states.get(AUTOMATION_ID)
# State is set based on "enabled" key in automation JSON.
assert state.state == STATE_ON
async def test_turn_automation_off(hass):
"""Test the automation can be turned off."""
with patch("abodepy.AbodeAutomation.enable") as mock_trigger:
await setup_platform(hass, SWITCH_DOMAIN)
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: AUTOMATION_ID},
blocking=True,
)
await hass.async_block_till_done()
mock_trigger.assert_called_once_with(False)
async def test_turn_automation_on(hass):
"""Test the automation can be turned on."""
with patch("abodepy.AbodeAutomation.enable") as mock_trigger:
await setup_platform(hass, SWITCH_DOMAIN)
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: AUTOMATION_ID},
blocking=True,
)
await hass.async_block_till_done()
mock_trigger.assert_called_once_with(True)
async def test_trigger_automation(hass, requests_mock):
"""Test the trigger automation service."""
await setup_platform(hass, SWITCH_DOMAIN)
with patch("abodepy.AbodeAutomation.trigger") as mock:
await hass.services.async_call(
ABODE_DOMAIN,
SERVICE_TRIGGER_AUTOMATION,
{ATTR_ENTITY_ID: AUTOMATION_ID},
blocking=True,
)
await hass.async_block_till_done()
mock.assert_called_once()
|
tests/management/commands/test_list_signals.py | KazakovDenis/django-extensions | 4,057 | 11078253 | <filename>tests/management/commands/test_list_signals.py
# -*- coding: utf-8 -*-
import re
from io import StringIO
from django.test import TestCase
from django.core.management import call_command
class ListSignalsTests(TestCase):
"""Tests for list_signals command."""
def setUp(self):
self.out = StringIO()
def test_should_print_all_signals(self):
expected_result = '''django.contrib.sites.models.Site (site)
pre_delete
django.contrib.sites.models.clear_site_cache #
pre_save
django.contrib.sites.models.clear_site_cache #
tests.testapp.models.HasOwnerModel (has owner model)
pre_save
tests.testapp.models.dummy_handler #
'''
call_command('list_signals', stdout=self.out)
# Strip line numbers to make the test less brittle
out = re.sub(r'(?<=#)\d+', '', self.out.getvalue(), re.M)
self.assertIn(expected_result, out)
|
simulation_research/traffic/point_process_model.py | deepneuralmachine/google-research | 23,901 | 11078266 | <filename>simulation_research/traffic/point_process_model.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Point process model for the traffic flow fitting and generation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import numpy as np
import scipy.interpolate
# TODO(albertyuchen): Find better basis creation tools.
def create_bspline_basis(knots, spline_order, dt=0.02):
"""Create B-spline basis."""
# The repeated boundary knots are appended as it is required for Cox de Boor
# recursive algorithm. See https://math.stackexchange.com/questions/2817170/
# what-is-the-purpose-of-having-repeated-knots-in-a-b-spline and the link
# https://en.wikipedia.org/wiki/De_Boor%27s_algorithm.
knots = list(knots)
knots = [knots[0]] * spline_order + knots + [knots[-1]] * spline_order
num_basis = len(knots) - spline_order - 1
# Query token is in format: [knots, basis coefficients, spline order]
# See https://docs.scipy.org/doc/scipy/reference/generated/
# scipy.interpolate.splev.html
query_token = [0, 0, spline_order]
query_token[0] = np.array(knots)
time_line = np.linspace(knots[0], knots[-1], int(np.round(knots[-1]/dt)) + 1)
# Add column for the constent term.
basis_matrix = np.zeros((len(time_line), num_basis + 1))
basis_matrix[:, -1] = np.ones(len(time_line)) # Constant term.
for basis_index in range(num_basis):
basis_coefficients = np.zeros(num_basis)
basis_coefficients[basis_index] = 1.0
query_token[1] = basis_coefficients.tolist()
base = scipy.interpolate.splev(time_line, query_token)
basis_matrix[:, basis_index] = base
return basis_matrix, time_line
class PointProcessModel(object):
"""Generates random traffic using inhomogeneous Poisson models."""
# def __init__(self):
@classmethod
def generator(cls, rates, time_step_size):
"""Generate events according to the rates.
If we know the underlying event rate of a point process, the number of
events in a certain time interval follows Poisson distribution with
parameter lambda. The lambda is the integral of the event rate in the time
interval. See reference:
Args:
rates:
time_step_size:
Returns:
Number of events for each bin w.r.t. the `rates`.
"""
num_events = np.zeros(len(rates))
for t, rate in enumerate(rates):
num_events[t] = np.random.poisson(time_step_size * rate, 1)
return num_events
@classmethod
def fit_homo_poisson(cls, events, time_step_size):
"""Fit the homogeneous Poisson model.
For homogeneous Poisson process, the maximum likelihood estimator for the
event rate is the mean of the data.
Args:
events: An array of numbers of evetns in each time bin.
time_step_size: Bin width. We assume the bin sizes are equal.
Returns:
Event rate.
"""
return np.mean(events) / time_step_size
# TODO(albertyuchen): Bin the time points into the
def time_bin_data(self, timestamps, time_step_size):
"""Bin the events timestamps into time bins.
This function discretize the timestamps into different time bins, so that
the model can be fitted using generalized linear models in this class.
Args:
timestamps: A list of observed events.
time_step_size: The time step size.
Returns:
events: Time binned events count.
"""
# events, _ = np.histogram(timestamps, bins=xxx)
# return events
pass
@classmethod
def fit_inhomo_poisson(cls,
events,
time_step_size,
spline_order=3,
num_knots=5):
"""Fits the inhomogeneous Poisson model.
Args:
events: A sequence of number of events.
time_step_size: Time step size.
spline_order: The order of the spline.
num_knots: Number of knots inbetween the two ends. The knots distribute
uniformly.
Returns:
Estimated event rates.
"""
# Creates knots between [0, 1]
knots = np.linspace(0, 1, num_knots + 2) # +2 to includes two ends.
# The number of sampled pooints is the same as those in the basis.
dt = 1 / (len(events) - 1)
xx, _ = create_bspline_basis(knots, spline_order, dt)
yy = events
beta = np.zeros(xx.shape[1])
max_iterations = 300
for _ in range(max_iterations):
negative_log_likelihoods = -yy.T @ (xx @ beta) + np.exp(xx @ beta)
negative_log_likelihood = np.sum(negative_log_likelihoods)
logging.info('Negative log likelihood: %s', negative_log_likelihood)
gradient = -xx.T @ yy + xx.T @ np.exp(xx @ beta)
# TODO(albertyuchen): Apply backtracking line search.
# The method is described: https://www.stat.cmu.edu/~ryantibs/convexopt/
# lectures/grad-descent.pdf.
beta -= gradient * 0.001
# TODO(albertyuchen): Apply Newton method here by multiplying the Hessian.
# The Newton's method requires careful backtracking line search.
# hessian = xx.T @ (np.exp(xx @ beta).reshape(-1, 1) * xx)
# beta -= hessian @ gradient * 0.0001
# TODO(albertyuchen): Add convergence condition.
# if |NLL(t) - NLL(t + 1)| < delta
return np.exp(xx @ beta) / time_step_size
|
src/ecdsa/eddsa.py | 592767809/python-ecdsa | 130 | 11078277 | <gh_stars>100-1000
"""Implementation of Edwards Digital Signature Algorithm."""
import hashlib
from ._sha3 import shake_256
from . import ellipticcurve
from ._compat import (
remove_whitespace,
bit_length,
bytes_to_int,
int_to_bytes,
compat26_str,
)
# edwards25519, defined in RFC7748
_p = 2 ** 255 - 19
_a = -1
_d = int(
remove_whitespace(
"370957059346694393431380835087545651895421138798432190163887855330"
"85940283555"
)
)
_h = 8
_Gx = int(
remove_whitespace(
"151122213495354007725011514095885315114540126930418572060461132"
"83949847762202"
)
)
_Gy = int(
remove_whitespace(
"463168356949264781694283940034751631413079938662562256157830336"
"03165251855960"
)
)
_r = 2 ** 252 + 0x14DEF9DEA2F79CD65812631A5CF5D3ED
def _sha512(data):
return hashlib.new("sha512", compat26_str(data)).digest()
curve_ed25519 = ellipticcurve.CurveEdTw(_p, _a, _d, _h, _sha512)
generator_ed25519 = ellipticcurve.PointEdwards(
curve_ed25519, _Gx, _Gy, 1, _Gx * _Gy % _p, _r, generator=True
)
# edwards448, defined in RFC7748
_p = 2 ** 448 - 2 ** 224 - 1
_a = 1
_d = -39081 % _p
_h = 4
_Gx = int(
remove_whitespace(
"224580040295924300187604334099896036246789641632564134246125461"
"686950415467406032909029192869357953282578032075146446173674602635"
"247710"
)
)
_Gy = int(
remove_whitespace(
"298819210078481492676017930443930673437544040154080242095928241"
"372331506189835876003536878655418784733982303233503462500531545062"
"832660"
)
)
_r = 2 ** 446 - 0x8335DC163BB124B65129C96FDE933D8D723A70AADC873D6D54A7BB0D
def _shake256(data):
return shake_256(data, 114)
curve_ed448 = ellipticcurve.CurveEdTw(_p, _a, _d, _h, _shake256)
generator_ed448 = ellipticcurve.PointEdwards(
curve_ed448, _Gx, _Gy, 1, _Gx * _Gy % _p, _r, generator=True
)
class PublicKey(object):
"""Public key for the Edwards Digital Signature Algorithm."""
def __init__(self, generator, public_key, public_point=None):
self.generator = generator
self.curve = generator.curve()
self.__encoded = public_key
# plus one for the sign bit and round up
self.baselen = (bit_length(self.curve.p()) + 1 + 7) // 8
if len(public_key) != self.baselen:
raise ValueError(
"Incorrect size of the public key, expected: {0} bytes".format(
self.baselen
)
)
if public_point:
self.__point = public_point
else:
self.__point = ellipticcurve.PointEdwards.from_bytes(
self.curve, public_key
)
def __eq__(self, other):
if isinstance(other, PublicKey):
return (
self.curve == other.curve and self.__encoded == other.__encoded
)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def point(self):
return self.__point
@point.setter
def point(self, other):
if self.__point != other:
raise ValueError("Can't change the coordinates of the point")
self.__point = other
def public_point(self):
return self.__point
def public_key(self):
return self.__encoded
def verify(self, data, signature):
"""Verify a Pure EdDSA signature over data."""
data = compat26_str(data)
if len(signature) != 2 * self.baselen:
raise ValueError(
"Invalid signature length, expected: {0} bytes".format(
2 * self.baselen
)
)
R = ellipticcurve.PointEdwards.from_bytes(
self.curve, signature[: self.baselen]
)
S = bytes_to_int(signature[self.baselen :], "little")
if S >= self.generator.order():
raise ValueError("Invalid signature")
dom = bytearray()
if self.curve == curve_ed448:
dom = bytearray(b"SigEd448" + b"\x00\x00")
k = bytes_to_int(
self.curve.hash_func(dom + R.to_bytes() + self.__encoded + data),
"little",
)
if self.generator * S != self.__point * k + R:
raise ValueError("Invalid signature")
return True
class PrivateKey(object):
"""Private key for the Edwards Digital Signature Algorithm."""
def __init__(self, generator, private_key):
self.generator = generator
self.curve = generator.curve()
# plus one for the sign bit and round up
self.baselen = (bit_length(self.curve.p()) + 1 + 7) // 8
if len(private_key) != self.baselen:
raise ValueError(
"Incorrect size of private key, expected: {0} bytes".format(
self.baselen
)
)
self.__private_key = bytes(private_key)
self.__h = bytearray(self.curve.hash_func(private_key))
self.__public_key = None
a = self.__h[: self.baselen]
a = self._key_prune(a)
scalar = bytes_to_int(a, "little")
self.__s = scalar
@property
def private_key(self):
return self.__private_key
def __eq__(self, other):
if isinstance(other, PrivateKey):
return (
self.curve == other.curve
and self.__private_key == other.__private_key
)
return NotImplemented
def __ne__(self, other):
return not self == other
def _key_prune(self, key):
# make sure the key is not in a small subgroup
h = self.curve.cofactor()
if h == 4:
h_log = 2
elif h == 8:
h_log = 3
else:
raise ValueError("Only cofactor 4 and 8 curves supported")
key[0] &= ~((1 << h_log) - 1)
# ensure the highest bit is set but no higher
l = bit_length(self.curve.p())
if l % 8 == 0:
key[-1] = 0
key[-2] |= 0x80
else:
key[-1] = key[-1] & (1 << (l % 8)) - 1 | 1 << (l % 8) - 1
return key
def public_key(self):
"""Generate the public key based on the included private key"""
if self.__public_key:
return self.__public_key
public_point = self.generator * self.__s
self.__public_key = PublicKey(
self.generator, public_point.to_bytes(), public_point
)
return self.__public_key
def sign(self, data):
"""Perform a Pure EdDSA signature over data."""
data = compat26_str(data)
A = self.public_key().public_key()
prefix = self.__h[self.baselen :]
dom = bytearray()
if self.curve == curve_ed448:
dom = bytearray(b"SigEd448" + b"\x00\x00")
r = bytes_to_int(self.curve.hash_func(dom + prefix + data), "little")
R = (self.generator * r).to_bytes()
k = bytes_to_int(self.curve.hash_func(dom + R + A + data), "little")
k %= self.generator.order()
S = (r + k * self.__s) % self.generator.order()
return R + int_to_bytes(S, self.baselen, "little")
|
boto3_type_annotations/boto3_type_annotations/snowball/client.py | cowboygneox/boto3_type_annotations | 119 | 11078289 | <gh_stars>100-1000
from typing import Optional
from botocore.client import BaseClient
from botocore.waiter import Waiter
from typing import Union
from typing import Dict
from botocore.paginate import Paginator
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def cancel_cluster(self, ClusterId: str) -> Dict:
pass
def cancel_job(self, JobId: str) -> Dict:
pass
def create_address(self, Address: Dict) -> Dict:
pass
def create_cluster(self, JobType: str, Resources: Dict, AddressId: str, RoleARN: str, ShippingOption: str, Description: str = None, KmsKeyARN: str = None, SnowballType: str = None, Notification: Dict = None, ForwardingAddressId: str = None) -> Dict:
pass
def create_job(self, JobType: str = None, Resources: Dict = None, Description: str = None, AddressId: str = None, KmsKeyARN: str = None, RoleARN: str = None, SnowballCapacityPreference: str = None, ShippingOption: str = None, Notification: Dict = None, ClusterId: str = None, SnowballType: str = None, ForwardingAddressId: str = None) -> Dict:
pass
def describe_address(self, AddressId: str) -> Dict:
pass
def describe_addresses(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def describe_cluster(self, ClusterId: str) -> Dict:
pass
def describe_job(self, JobId: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_job_manifest(self, JobId: str) -> Dict:
pass
def get_job_unlock_code(self, JobId: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_snowball_usage(self) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_cluster_jobs(self, ClusterId: str, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_clusters(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_compatible_images(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def list_jobs(self, MaxResults: int = None, NextToken: str = None) -> Dict:
pass
def update_cluster(self, ClusterId: str, RoleARN: str = None, Description: str = None, Resources: Dict = None, AddressId: str = None, ShippingOption: str = None, Notification: Dict = None, ForwardingAddressId: str = None) -> Dict:
pass
def update_job(self, JobId: str, RoleARN: str = None, Notification: Dict = None, Resources: Dict = None, AddressId: str = None, ShippingOption: str = None, Description: str = None, SnowballCapacityPreference: str = None, ForwardingAddressId: str = None) -> Dict:
pass
|
sparse_operation_kit/unit_test/test_scripts/tf2/test_all_gather_dispatcher.py | xjqbest/HugeCTR | 130 | 11078290 | """
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from single_worker_base import SingleWorkerbase
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute.values import PerReplica
import sys, os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), r"../../python/")))
import sok_unit_test_lib
class TestAllGatherDispatcher_single(SingleWorkerbase):
def __init__(self):
self.global_batch_size = 65536
super(TestAllGatherDispatcher_single, self).__init__(global_batch_size=self.global_batch_size)
def call(self):
rows_num_per_sample = 26
max_nnz = 3
all_inputs = np.random.randint(low=1, high=100, size=[self.global_batch_size * rows_num_per_sample, max_nnz])
all_mask = np.random.randint(low=0, high=2, size=[self.global_batch_size * rows_num_per_sample, max_nnz])
all_inputs *= all_mask
print("[INFO] original dense all inputs:\n", all_inputs)
all_valid_indices = tf.where(all_inputs != 0)
all_valid_values = tf.gather_nd(all_inputs, all_valid_indices)
all_inputs_sparse_tensor = tf.sparse.SparseTensor(values=all_valid_values, indices=all_valid_indices, dense_shape=all_inputs.shape)
print("[INFO] original inputs sparse tensor:\n", all_inputs_sparse_tensor)
sparse_tensors = tf.sparse.split(sp_input=all_inputs_sparse_tensor, num_split=8, axis=0)
sparse_tensors = PerReplica(sparse_tensors)
print("[INFO] to each replica sparse tensors:\n", sparse_tensors)
target_values = all_inputs_sparse_tensor.values
# target_indices = tf.concat([tf.transpose(sparse_tensor.indices, perm=[1, 0])[0]
# for sparse_tensor in sparse_tensors.values],
# axis=0)
target_indices = tf.transpose(all_inputs_sparse_tensor.indices, perm=[1, 0])[0]
target_num_elements = tf.concat([tf.shape(sparse_tensor.indices, out_type=tf.int64)[0]
for sparse_tensor in sparse_tensors.values],
axis=0)
target_total_valid_num = tf.size(target_values, out_type=tf.int64)
print("[INFO] target_values: \n", target_values)
print("[INFO] target_indcies: \n", target_indices)
print("[INFO] target_num_elements: \n", target_num_elements)
print("[INFO] target_total_valid_num: \n", target_total_valid_num)
@tf.function
def _step(sparse_tensor):
if not isinstance(sparse_tensor, tf.sparse.SparseTensor):
raise RuntimeError("sparse_tensor must be a tf.sparse.SparseTensor")
values = sparse_tensor.values # [num_of_valids,]
indices = sparse_tensor.indices
row_indices = tf.transpose(indices, perm=[1, 0])[0] # [num_of_valids]
replica_ctx = tf.distribute.get_replica_context()
values_out, indices_out, num_elements, total_valid_num = sok_unit_test_lib.all_gather_dispatcher(
replica_ctx.replica_id_in_sync_group,
replica_ctx.num_replicas_in_sync,
values,
row_indices,
global_batch_size=self.global_batch_size,
rows_num_per_sample=rows_num_per_sample,
max_nnz=max_nnz)
return values_out, indices_out, num_elements, total_valid_num
values_out, indices_out, num_elements, total_valid_num = self.strategy.run(_step, args=(sparse_tensors,))
print("[INFO]: after all gather dispatcher, values = \n", values_out)
print("[INFO]: after all gather dispatcher, indices = \n", indices_out)
print("[INFO]: after all gather dispatcher, num_elements = \n", num_elements)
print("[INFO]: after all gather dispatcher, total_valid_num = \n", total_valid_num)
for i in range(len(values_out.values)):
tf.debugging.assert_equal(target_values, values_out.values[i][:target_values.shape[0]],
message="values %d not meet target." %i)
tf.debugging.assert_equal(target_indices, indices_out.values[i][:target_indices.shape[0]],
message="indcies %d not meet target." %i)
tf.debugging.assert_equal(target_num_elements, num_elements.values[i][:target_num_elements.shape[0]],
message="num_elements %d not meet target." %i)
tf.debugging.assert_equal(target_total_valid_num, total_valid_num.values[i],
message="total_valid_num %d not meet target." %i)
if __name__ == "__main__":
TestAllGatherDispatcher_single()() |
torchdistill/losses/__init__.py | AhmedHussKhalifa/torchdistill | 576 | 11078298 | <filename>torchdistill/losses/__init__.py
from torchdistill.losses import custom, single
|
OnlineDB/SiStripESSources/python/test_FedCablingFromConfigDb_cfg.py | ckamtsikis/cmssw | 852 | 11078302 | <filename>OnlineDB/SiStripESSources/python/test_FedCablingFromConfigDb_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("testFedCablingFromConfigDb")
process.load("DQM.SiStripCommon.MessageLogger_cfi")
process.load("OnlineDB.SiStripESSources.FedCablingFromConfigDb_cff")
process.SiStripConfigDb.UsingDb = True
process.SiStripConfigDb.ConfDb = ''
process.SiStripConfigDb.Partitions.PrimaryPartition.PartitionName = ''
process.SiStripConfigDb.Partitions.PrimaryPartition.RunNumber = 0
process.FedCablingFromConfigDb.CablingSource = 'UNDEFINED'
process.load("IORawData.SiStripInputSources.EmptySource_cff")
process.maxEvents.input = 2
process.test = cms.EDAnalyzer("test_FedCablingBuilder")
process.p = cms.Path(process.test)
|
py/mazebase/items/MazeItem.py | fakeNetflix/facebook-repo-MazeBase | 263 | 11078314 | <reponame>fakeNetflix/facebook-repo-MazeBase<gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import six
from copy import deepcopy
from mazebase.utils.mazeutils import populate_kwargs
@six.add_metaclass(abc.ABCMeta)
class MazeItem(object):
'''
Maze items should not share state
PRIO determines priority of visibility when viewing the object,
and has no effect on the game. A higher priority object is always
diplayed first
'''
__properties = dict(
location=(0, 0),
visible=True,
passable=True,
)
def __init__(self, **kwargs):
populate_kwargs(self, self.__class__.__properties, kwargs)
self.game = None
self.PRIO = 0
def _get_display_symbol(self):
'''
-> (text, foreground, background, attributes)
text: should be 3 characters
foreground: see termcolor.py
background: see termcolor.py
attributes: see termcolor.py
'''
return (None, None, None, None)
def clone(self):
return deepcopy(self)
def featurize(self):
''' Return a list of the features for this item '''
return [type(self).__name__]
@classmethod
def all_features(cls):
'''
All possible features for this item.
Must implement if subclass implements featurize()
'''
return [cls.__name__]
|
custom_components/xiaomi_miot_raw/deps/xiaomi_cloud_new.py | easy8646/xiaomi_miot_raw | 1,294 | 11078323 | """
The base logic was taken from project https://github.com/squachen/micloud
I had to rewrite the code to work asynchronously and handle timeouts for
requests to the cloud.
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import base64
import hashlib
import hmac
import json
import locale
import logging
import os
import random
import string
import time
from aiohttp import ClientSession, ClientConnectorError
_LOGGER = logging.getLogger(__name__)
SERVERS = ['cn', 'de', 'i2', 'ru', 'sg', 'us']
UA = "Android-7.1.1-1.0.0-ONEPLUS A3010-136-%s APP/xiaomi.smarthome APPV/62830"
class MiCloud:
auth = None
svr = None
_fail_count = 0
def __init__(self, session: ClientSession):
self.session = session
self.device_id = get_random_string(16)
async def login(self, username: str, password: str):
try:
payload = await self._login_step1()
if isinstance(payload, Exception):
return (-2, payload)
data = await self._login_step2(username, password, payload)
if isinstance(data, Exception):
return (-2, data)
if 'notificationUrl' in data:
return (-1, data['notificationUrl'])
elif not data['location']:
return (-1, None)
token = await self._login_step3(data['location'])
if isinstance(token, Exception):
return (-2, token)
self.auth = {
'user_id': data['userId'],
'ssecurity': data['ssecurity'],
'service_token': token
}
return (0, None)
except Exception as e:
# There should be no exception here?
_LOGGER.exception(f"Can't login to MiCloud: {e}")
raise e from None
def login_by_credientals(self, userId, serviceToken, ssecurity):
self.auth = {
'user_id': userId,
'ssecurity': ssecurity,
'service_token': serviceToken
}
return True
async def _login_step1(self):
_LOGGER.debug(f"Logging in to Xiaomi Cloud (1/3)...")
try:
r = await self.session.get(
'https://account.xiaomi.com/pass/serviceLogin',
cookies={'sdkVersion': '3.8.6', 'deviceId': self.device_id},
headers={'User-Agent': UA % self.device_id},
params={'sid': 'xiaomiio', '_json': 'true'})
raw = await r.read()
resp: dict = json.loads(raw[11:])
return {k: v for k, v in resp.items()
if k in ('sid', 'qs', 'callback', '_sign')}
except ClientConnectorError as ex:
return ex
async def _login_step2(self, username: str, password: str, payload: dict):
_LOGGER.debug(f"Logging in to Xiaomi Cloud (2/3)...")
payload['user'] = username
payload['hash'] = hashlib.md5(password.encode()).hexdigest().upper()
try:
r = await self.session.post(
'https://account.xiaomi.com/pass/serviceLoginAuth2',
cookies={'sdkVersion': '3.8.6', 'deviceId': self.device_id},
data=payload,
headers={'User-Agent': UA % self.device_id},
params={'_json': 'true'})
raw = await r.read()
resp = json.loads(raw[11:])
return resp
except ClientConnectorError as ex:
return ex
async def _login_step3(self, location):
_LOGGER.debug(f"Logging in to Xiaomi Cloud (3/3)...")
try:
r = await self.session.get(location, headers={'User-Agent': UA})
service_token = r.cookies['serviceToken'].value
return service_token
except ClientConnectorError as ex:
return ex
async def get_total_devices(self, servers: list):
total = []
for server in servers:
devices = await self.get_devices(server)
if devices is None:
return None
total += devices
return total
async def get_devices(self, server: str):
assert server in SERVERS, "Wrong server: " + server
baseurl = 'https://api.io.mi.com/app' if server == 'cn' \
else f"https://{server}.api.io.mi.com/app"
url = '/home/device_list'
data = '{"getVirtualModel":false,"getHuamiDevices":0}'
nonce = gen_nonce()
signed_nonce = gen_signed_nonce(self.auth['ssecurity'], nonce)
signature = gen_signature(url, signed_nonce, nonce, data)
try:
loc = locale.getdefaultlocale()[0] or "en_US"
except Exception:
loc = "en_US"
try:
r = await self.session.post(baseurl + url, cookies={
'userId': self.auth['user_id'],
'serviceToken': self.auth['service_token'],
'locale': loc
}, headers={
'User-Agent': UA,
'x-xiaomi-protocal-flag-cli': 'PROTOCAL-HTTP2'
}, data={
'signature': signature,
'_nonce': nonce,
'data': data
}, timeout=5)
resp = await r.json(content_type=None)
assert resp['code'] == 0, resp
return resp['result']['list']
except asyncio.TimeoutError:
_LOGGER.error("Timeout while loading MiCloud device list")
except ClientConnectorError:
_LOGGER.error("Failed loading MiCloud device list")
except:
_LOGGER.exception(f"Can't load devices list")
return None
async def request_miot_api(self, api, data = None, server: str = None):
server = server or self.svr or 'cn'
api_base = 'https://api.io.mi.com/app' if server == 'cn' \
else f"https://{server}.api.io.mi.com/app"
url = api_base+api
nonce = gen_nonce()
signed_nonce = gen_signed_nonce(self.auth['ssecurity'], nonce)
signature = gen_signature(api, signed_nonce, nonce, data)
headers = {
'content-type': "application/x-www-form-urlencoded",
'x-xiaomi-protocal-flag-cli': "PROTOCAL-HTTP2",
'connection': "Keep-Alive",
'accept-encoding': "gzip",
'cache-control': "no-cache",
}
try:
r = await self.session.post(url, cookies={
'userId': self.auth['user_id'],
'serviceToken': self.auth['service_token'],
}, headers={
'User-Agent': UA,
'x-xiaomi-protocal-flag-cli': 'PROTOCAL-HTTP2'
}, data={
'signature': signature,
'_nonce': nonce,
'data': data
}, timeout=5)
self._fail_count = 0
resp = await r.json(content_type=None)
if resp.get('message') == 'auth err':
_LOGGER.error("小米账号登录信息失效")
return None
elif resp.get('code') != 0:
_LOGGER.error(f"Response of {api} from cloud: {resp}")
return resp
else:
# 注意:此处成功只代表请求是成功的,但控制设备不一定成功,
# 取决于 result 里的 code
_LOGGER.info(f"Response of {api} from cloud: {resp}")
return resp
except (asyncio.TimeoutError, ClientConnectorError) as ex:
if self._fail_count < 3 and api == "/miotspec/prop/get":
self._fail_count += 1
_LOGGER.info(f"Error while requesting MIoT api {api} : {ex} ({self._fail_count})")
else:
_LOGGER.error(f"Error while requesting MIoT api {api} : {ex}")
except:
_LOGGER.exception(f"Can't request MIoT api")
async def request_rpc(self, did, method, params: str = "", server: str = None):
data = json.dumps({
"id": 1,
"method": method,
"params": params,
}, separators=(',', ':'))
return await self.request_miot_api(f'/home/rpc/{did}', data, server)
async def get_props(self, params: str = "", server: str = None, *, use_rpc = False):
if not use_rpc:
return await self.request_miot_api('/miotspec/prop/get', params, server)
else:
p = json.loads(params).get('params')
if p:
_LOGGER.warn(p)
if 'did' in p[0]:
did = p[0]['did']
return await self.request_rpc(did, "get_properties", p, server)
_LOGGER.error("Need did!")
return None
async def set_props(self, params: str = "", server: str = None, *, use_rpc = False):
if not use_rpc:
return await self.request_miot_api('/miotspec/prop/set', params, server)
else:
p = json.loads(params).get('params')
if p:
if 'did' in p[0]:
did = p[0]['did']
return await self.request_rpc(did, "set_properties", p, server)
_LOGGER.error("Need did!")
return None
async def call_action(self, params: str = "", server: str = None, *, use_rpc = False):
return await self.request_miot_api('/miotspec/action', params, server)
async def get_user_device_data(self, did: str, key, type_, server: str = None, *, limit=5):
data = {
"uid": self.auth['user_id'],
"did": did,
"time_end": 9999999999,
"time_start": 0,
"limit": limit,
"key": key,
"type": type_,
}
params = json.dumps(data, separators=(',', ':'))
return await self.request_miot_api('/user/get_user_device_data', params, server)
def get_random_string(length: int):
seq = string.ascii_uppercase + string.digits
return ''.join((random.choice(seq) for _ in range(length)))
def gen_nonce() -> str:
"""Time based nonce."""
nonce = os.urandom(8) + int(time.time() / 60).to_bytes(4, 'big')
return base64.b64encode(nonce).decode()
def gen_signed_nonce(ssecret: str, nonce: str) -> str:
"""Nonce signed with ssecret."""
m = hashlib.sha256()
m.update(base64.b64decode(ssecret))
m.update(base64.b64decode(nonce))
return base64.b64encode(m.digest()).decode()
def gen_signature(url: str, signed_nonce: str, nonce: str, data: str) -> str:
"""Request signature based on url, signed_nonce, nonce and data."""
sign = '&'.join([url, signed_nonce, nonce, 'data=' + data])
signature = hmac.new(key=base64.b64decode(signed_nonce),
msg=sign.encode(),
digestmod=hashlib.sha256).digest()
return base64.b64encode(signature).decode()
|
tests/test_dask_image/test_ndfilters/test_cupy_ndfilters.py | anlavandier/dask-image | 144 | 11078351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import dask.array as da
import numpy as np
import pytest
import dask_image.ndfilters
cupy = pytest.importorskip("cupy", minversion="7.7.0")
@pytest.fixture
def array():
s = (10, 10)
a = da.from_array(cupy.arange(int(np.prod(s)),
dtype=cupy.float32).reshape(s), chunks=5)
return a
@pytest.mark.cupy
@pytest.mark.parametrize("func", [
dask_image.ndfilters.convolve,
dask_image.ndfilters.correlate,
])
def test_cupy_conv(array, func):
"""Test convolve & correlate filters with cupy input arrays."""
weights = cupy.ones(array.ndim * (3,), dtype=cupy.float32)
result = func(array, weights)
result.compute()
@pytest.mark.cupy
@pytest.mark.parametrize("func", [
dask_image.ndfilters.laplace,
])
def test_cupy_diff(array, func):
result = func(array)
result.compute()
@pytest.mark.cupy
@pytest.mark.parametrize("func", [
dask_image.ndfilters.prewitt,
dask_image.ndfilters.sobel,
])
def test_cupy_edge(array, func):
result = func(array)
result.compute()
@pytest.mark.cupy
@pytest.mark.parametrize("func", [
dask_image.ndfilters.gaussian_filter,
dask_image.ndfilters.gaussian_gradient_magnitude,
dask_image.ndfilters.gaussian_laplace,
])
def test_cupy_gaussian(array, func):
sigma = 1
result = func(array, sigma)
result.compute()
@pytest.mark.parametrize(
"size, footprint",
[
(1, None),
((1, 1), None),
(None, np.ones((1, 1))),
]
)
def test_cupy_generic(array, size, footprint):
my_sum = cupy.ReductionKernel(
'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum')
result = dask_image.ndfilters.generic_filter(array, my_sum, size=size,
footprint=footprint)
result.compute()
@pytest.mark.cupy
@pytest.mark.parametrize("func, extra_arg, size", [
(dask_image.ndfilters.minimum_filter, None, 3),
(dask_image.ndfilters.median_filter, None, 3),
(dask_image.ndfilters.maximum_filter, None, 3),
(dask_image.ndfilters.rank_filter, 5, 3),
(dask_image.ndfilters.percentile_filter, 50, 3),
])
def test_cupy_order(array, func, extra_arg, size):
if extra_arg is not None:
result = func(array, extra_arg, size=size)
else:
result = func(array, size=size)
result.compute()
@pytest.mark.cupy
@pytest.mark.parametrize("func", [
dask_image.ndfilters.uniform_filter,
])
def test_cupy_smooth(array, func):
result = func(array)
result.compute()
|
tool/msdf-atlas-gen/json-atlas-splitter.py | caogtaa/CCCustomVertexFomat | 123 | 11078358 | <filename>tool/msdf-atlas-gen/json-atlas-splitter.py
'''
Author: GT<<EMAIL>>
Date: 2020-12-24 11:14:35
LastEditors: GT<<EMAIL>>
LastEditTime: 2021-08-28 17:10:57
'''
import os
import shutil
import cv2
import codecs
import json
import numpy as np
import copy
import fire
from enum import IntEnum
from os import mkdir, path, walk
cur_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.join(cur_dir, os.pardir, os.pardir)
class Splitter(object):
'''break atlas into standalone files'''
def list_png_files(self, dir, output):
root, sub_dirs, file_names = next(walk(dir))
output.extend([os.path.join(root, x) for x in file_names if os.path.splitext(x)[1] == '.png'])
for sub_dir in sub_dirs:
self.list_png_files(os.path.join(root, sub_dir), output)
def dilate_dir(self, dir):
# files = []
# for (dir_path, dir_names, file_names) in walk(dir):
# files.extend(file_names)
file_paths = []
self.list_png_files(dir, file_paths)
print('\n'.join(file_paths))
for file_path in file_paths:
self.dilate(file_path, file_path)
def dilate(self, input_path, output_path):
print("[DILATOR] Dilating %s" % input_path)
# to support Unicode path, do not use cv2.imread directly
origin_data = cv2.imdecode(
np.fromfile(input_path, dtype=np.uint8),
cv2.IMREAD_UNCHANGED)
# origin_data = cv2.imread(input_path, -1)
row, col, channel = origin_data.shape
if channel == 3:
# do not process if image has no alpha channel
print("[DILATOR] Texture %s has no alpha channel, ignore" % input_path)
return
result = copy.deepcopy(origin_data)
for i in range(0, row):
for k in range(0, col):
color = origin_data[i][k]
if color[3] >= 3:
# alpha already larger than threshold
continue
# pick colors from 8 directions, ignore colors whose alpha < 30
# 这么做对同一张图重复扩边不会使扩边增长
r = 0
g = 0
b = 0
count = 0
for x in range(-1, 2):
i_x = i + x
if i_x < 0 or i_x >= row:
continue
for y in range(-1, 2):
if x == 0 and y == 0:
# ignore center point
continue
k_y = k + y
if k_y < 0 or k_y >= col:
continue
c2 = origin_data[i_x][k_y]
if c2[3] < 30:
continue
# this color is considerable
count += 1
r += c2[0]
g += c2[1]
b += c2[2]
# end 8 directions check
if count > 0:
# use average color from neighbors
# alpha = 3
out = result[i][k]
out[0] = r / count
out[1] = g / count
out[2] = b / count
out[3] = 3
# test code (blue outline) (BRG mode)
# out[0] = 255
# out[1] = 0
# out[2] = 0
# out[3] = 128
old_size = os.path.getsize(input_path)
# to support Unicode path, do not use cv2.imwrite directly
# cv2.imwrite(output_path, result, [cv2.IMWRITE_PNG_COMPRESSION, 9])
is_success, result_buf = cv2.imencode(".png", result, [cv2.IMWRITE_PNG_COMPRESSION, 9])
if is_success:
result_buf.tofile(output_path)
print("[DILATOR] Dilate '%s'->'%s' finished" % (input_path, output_path))
new_size = os.path.getsize(output_path)
print("[DILATOR] After dialtion file size %sK -> %sK" % (old_size / 1000, new_size / 1000))
else:
print("[DIALTOR] Encode image %s failed" % input_path)
def for_8_dirs(self, img_data, pred, dir_cb):
row, col, channel = img_data.shape
# 遍历图片,将遮罩的边缘r通道标红
for i in range(0, row):
for k in range(0, col):
color_c = img_data[i][k]
if not pred(color_c):
continue
for x in range(-1, 2):
i_x = i + x
if i_x < 0 or i_x >= row:
continue
for y in range(-1, 2):
if x == 0 and y == 0:
# ignore center point
continue
k_y = k + y
if k_y < 0 or k_y >= col:
continue
color_dir = img_data[i_x][k_y]
dir_cb(color_c, color_dir)
def sdf_bfs(self, img_data):
row, col, channel = img_data.shape
visited = np.zeros(shape=(row, col), dtype=np.uint8)
qu = np.zeros(shape=(row * col, 2), dtype=np.uint16) # qu[k] = (x, y)
qlen = 0
for i in range(0, row):
for k in range(0, col):
color_c = img_data[i][k]
if color_c[2] > 0:
# has red component, record it
visited[i][k] = 1
qu[qlen][0] = i
qu[qlen][1] = k
qlen += 1
index = 0 # iterator
while index < qlen:
pos = qu[index]
i = pos[0]
k = pos[1]
for x in range(-1, 2):
i_x = i + x
if i_x < 0 or i_x >= row:
continue
for y in range(-1, 2):
if x == 0 and y == 0:
# ignore center point
continue
k_y = k + y
if k_y < 0 or k_y >= col:
continue
if img_data[i_x][k_y][3] <= 3:
# transparent pixel, means not inside mask
continue
if visited[i_x][k_y] == 1:
# already visited
continue
r_v = img_data[i][k][2]
if r_v == 255:
img_data[i_x][k_y][2] = 255
else:
img_data[i_x][k_y][2] = r_v + 1
visited[i_x][k_y] = 1
qu[qlen][0] = i_x
qu[qlen][1] = k_y
qlen += 1
index += 1
# write back to img_data
vvv = True
def sdf(self, input_path, output_path):
print("[DILATOR] Calculate SDF %s" % input_path)
# to support Unicode path, do not use cv2.imread directly
origin_data = cv2.imdecode(
np.fromfile(input_path, dtype=np.uint8),
cv2.IMREAD_UNCHANGED)
# origin_data = cv2.imread(input_path, -1)
row, col, channel = origin_data.shape
if channel == 3:
# do not process if image has no alpha channel
print("[DILATOR] Texture %s has no alpha channel, ignore" % input_path)
return
result = copy.deepcopy(origin_data)
def dye_red_edge(c0, c1):
if c1[3] > 3:
c1[2] = 128 # 128为基础值
# 对图像边缘染色
self.for_8_dirs(result, lambda c: c[3] <= 3, dye_red_edge)
self.sdf_bfs(result)
# def spread_red(c0, c1):
# if c1[3] > 3 and c1[2] == 0:
# c1[2] = c0[2] + 1
# for k in range(0, 20):
# # 以r通道 > 0的像素为中心,蔓延红色
# self.for_8_dirs(result, lambda c: c[2] > 0, spread_red)
is_success, result_buf = cv2.imencode(".png", result, [cv2.IMWRITE_PNG_COMPRESSION, 9])
if is_success:
result_buf.tofile(output_path)
else:
print("[DIALTOR] Calculate SDF %s failed" % input_path)
def split(self, json_path, atlas_path, out_dir):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(json_path, "r") as input:
content = input.read()
atlas_data = cv2.imdecode(
np.fromfile(atlas_path, dtype=np.uint8),
cv2.IMREAD_UNCHANGED)
# origin_data = cv2.imread(input_path, -1)
row, col, channel = atlas_data.shape
atlas_info = json.loads(content)
glyphs = atlas_info["glyphs"]
for c in glyphs:
code = c["unicode"]
bounds = c["atlasBounds"]
self.pick_glyph_from_atlas(atlas_data, row, bounds, path.join(out_dir, "%s.png" % code))
def pick_glyph_from_atlas(self, atlas_data, height, bounds, out_path):
# todo reverse Y
l = int(bounds["left"])
r = int(bounds["right"])
t = int(height-bounds["top"])
b = int(height-bounds["bottom"])
glyph_data = atlas_data[t:b+1,l:r+1]
is_success, result_buf = cv2.imencode(".png", glyph_data, [cv2.IMWRITE_PNG_COMPRESSION, 0])
if is_success:
result_buf.tofile(out_path)
if __name__ == "__main__":
# fire.Fire(Splitter)
Splitter().split(
path.join(cur_dir, "msdf.json"),
path.join(cur_dir, "msdf.png"),
path.join(cur_dir, "out"))
|
tests/tmp.py | Pandinosaurus/d6tpipe | 184 | 11078378 | <reponame>Pandinosaurus/d6tpipe<filename>tests/tmp.py<gh_stars>100-1000
import d6tpipe
api = d6tpipe.APIClient(profile='utest-local')
api = d6tpipe.APIClient(profile='utest-d6tdev')
api = d6tpipe.APIClient(profile='utest-dev', filecfg='~/d6tpipe/cfg-utest-dev.json')
api2 = d6tpipe.APIClient(profile='utest-dev2', filecfg='~/d6tpipe/cfg-utest-dev.json')
|
rman_translators/rman_nurbs_translator.py | N500/RenderManForBlender | 432 | 11078382 | <reponame>N500/RenderManForBlender
from .rman_translator import RmanTranslator
from ..rman_sg_nodes.rman_sg_nurbs import RmanSgNurbs
from ..rfb_utils import object_utils
from ..rfb_utils import string_utils
from ..rfb_utils import property_utils
import bpy
import math
'''
Code reference from: https://blender.stackexchange.com/questions/34145/calculate-points-on-a-nurbs-curve-without-converting-to-mesh
'''
def macro_knotsu(nu):
return nu.order_u + nu.point_count_u + (nu.order_u - 1 if nu.use_cyclic_u else 0)
def macro_segmentsu(nu):
return nu.point_count_u if nu.use_cyclic_u else nu.point_count_u - 1
def makeknots(nu):
knots = [0.0] * (4 + macro_knotsu(nu))
flag = nu.use_endpoint_u + (nu.use_bezier_u << 1)
if nu.use_cyclic_u:
calcknots(knots, nu.point_count_u, nu.order_u, 0)
makecyclicknots(knots, nu.point_count_u, nu.order_u)
else:
calcknots(knots, nu.point_count_u, nu.order_u, flag)
return knots
def calcknots(knots, pnts, order, flag):
pnts_order = pnts + order
if flag == 1:
k = 0.0
for a in range(1, pnts_order + 1):
knots[a - 1] = k
if a >= order and a <= pnts:
k += 1.0
elif flag == 2:
if order == 4:
k = 0.34
for a in range(pnts_order):
knots[a] = math.floor(k)
k += (1.0 / 3.0)
elif order == 3:
k = 0.6
for a in range(pnts_order):
if a >= order and a <= pnts:
k += 0.5
knots[a] = math.floor(k)
else:
for a in range(pnts_order):
knots[a] = a
def makecyclicknots(knots, pnts, order):
order2 = order - 1
if order > 2:
b = pnts + order2
for a in range(1, order2):
if knots[b] != knots[b - a]:
break
if a == order2:
knots[pnts + order - 2] += 1.0
b = order
c = pnts + order + order2
for a in range(pnts + order2, c):
knots[a] = knots[a - 1] + (knots[b] - knots[b - 1])
b -= 1
def basisNurb(t, order, pnts, knots, basis, start, end):
i1 = i2 = 0
orderpluspnts = order + pnts
opp2 = orderpluspnts - 1
# this is for float inaccuracy
if t < knots[0]:
t = knots[0]
elif t > knots[opp2]:
t = knots[opp2]
# this part is order '1'
o2 = order + 1
for i in range(opp2):
if knots[i] != knots[i + 1] and t >= knots[i] and t <= knots[i + 1]:
basis[i] = 1.0
i1 = i - o2
if i1 < 0:
i1 = 0
i2 = i
i += 1
while i < opp2:
basis[i] = 0.0
i += 1
break
else:
basis[i] = 0.0
basis[i] = 0.0
# this is order 2, 3, ...
for j in range(2, order + 1):
if i2 + j >= orderpluspnts:
i2 = opp2 - j
for i in range(i1, i2 + 1):
if basis[i] != 0.0:
d = ((t - knots[i]) * basis[i]) / (knots[i + j - 1] - knots[i])
else:
d = 0.0
if basis[i + 1] != 0.0:
e = ((knots[i + j] - t) * basis[i + 1]) / (knots[i + j] - knots[i + 1])
else:
e = 0.0
basis[i] = d + e
start = 1000
end = 0
for i in range(i1, i2 + 1):
if basis[i] > 0.0:
end = i
if start == 1000:
start = i
return start, end
def nurb_make_curve(nu, resolu, stride):
EPS = 1e-6
coord_index = istart = iend = 0
coord_array = [0.0] * (3 * nu.resolution_u * macro_segmentsu(nu))
sum_array = [0] * nu.point_count_u
basisu = [0.0] * macro_knotsu(nu)
knots = makeknots(nu)
resolu = resolu * macro_segmentsu(nu)
ustart = knots[nu.order_u - 1]
uend = knots[nu.point_count_u + nu.order_u - 1] if nu.use_cyclic_u else \
knots[nu.point_count_u]
ustep = (uend - ustart) / (resolu - (0 if nu.use_cyclic_u else 1))
cycl = nu.order_u - 1 if nu.use_cyclic_u else 0
u = ustart
while resolu:
resolu -= 1
istart, iend = basisNurb(u, nu.order_u, nu.point_count_u + cycl, knots, basisu, istart, iend)
#/* calc sum */
sumdiv = 0.0
sum_index = 0
pt_index = istart - 1
for i in range(istart, iend + 1):
if i >= nu.point_count_u:
pt_index = i - nu.point_count_u
else:
pt_index += 1
sum_array[sum_index] = basisu[i] * nu.points[pt_index].co[3]
sumdiv += sum_array[sum_index]
sum_index += 1
if (sumdiv != 0.0) and (sumdiv < 1.0 - EPS or sumdiv > 1.0 + EPS):
sum_index = 0
for i in range(istart, iend + 1):
sum_array[sum_index] /= sumdiv
sum_index += 1
coord_array[coord_index: coord_index + 3] = (0.0, 0.0, 0.0)
sum_index = 0
pt_index = istart - 1
for i in range(istart, iend + 1):
if i >= nu.point_count_u:
pt_index = i - nu.point_count_u
else:
pt_index += 1
if sum_array[sum_index] != 0.0:
for j in range(3):
coord_array[coord_index + j] += sum_array[sum_index] * nu.points[pt_index].co[j]
sum_index += 1
coord_index += stride
u += ustep
return coord_array
class RmanNurbsTranslator(RmanTranslator):
def __init__(self, rman_scene):
super().__init__(rman_scene)
self.bl_type = 'SURFACE'
def export(self, ob, db_name):
sg_node = self.rman_scene.sg_scene.CreateNuPatch(db_name)
rman_sg_nurbs = RmanSgNurbs(self.rman_scene, sg_node, db_name)
return rman_sg_nurbs
def export_deform_sample(self, rman_sg_nurbs, ob, time_sample):
pass
def update(self, ob, rman_sg_nurbs):
spline = ob.data.splines[0]
nu = spline.point_count_u
nv = spline.point_count_v
uorder = spline.order_u
vorder = spline.order_v
if uorder == 0 or vorder == 0:
return
P = []
for pt in spline.points:
P.append(pt.co)
'''
# we currently don't support use_cyclic_u and use_cuclic_v options
pnts_order = spline.point_count_u + spline.order_u + (spline.order_u - 1 if spline.use_cyclic_u else 0)
uknots = [0.0] * pnts_order
if spline.use_cyclic_u:
calcknots(uknots, spline.point_count_u, spline.order_u, 0)
makecyclicknots(uknots, spline.point_count_u, spline.order_u)
else:
flag = spline.use_endpoint_u + (spline.use_bezier_u << 1)
calcknots(uknots, spline.point_count_u, spline.order_u, flag)
pnts_order = spline.point_count_v + spline.order_v + (spline.order_v - 1 if spline.use_cyclic_v else 0)
vknots = [0.0] * pnts_order
if spline.use_cyclic_v:
calcknots(vknots, spline.point_count_v, spline.order_v, 0)
makecyclicknots(vknots, spline.point_count_v, spline.order_v)
else:
flag = spline.use_endpoint_v + (spline.use_bezier_v << 1)
calcknots(vknots, spline.point_count_v, spline.order_v, flag)
'''
pnts_order = spline.point_count_u + spline.order_u
uknots = [0.0] * pnts_order
flag = spline.use_endpoint_u + (spline.use_bezier_u << 1)
calcknots(uknots, spline.point_count_u, spline.order_u, flag)
pnts_order = spline.point_count_v + spline.order_v
vknots = [0.0] * pnts_order
flag = spline.use_endpoint_v + (spline.use_bezier_v << 1)
calcknots(vknots, spline.point_count_v, spline.order_v, flag)
rman_sg_nurbs.sg_node.Define(nu, uorder, nv, vorder)
primvar = rman_sg_nurbs.sg_node.GetPrimVars()
primvar.SetHpointDetail(self.rman_scene.rman.Tokens.Rix.k_Pw, P, "vertex")
primvar.SetFloatArray(self.rman_scene.rman.Tokens.Rix.k_Ri_uknot, uknots, len(uknots))
primvar.SetFloatArray(self.rman_scene.rman.Tokens.Rix.k_Ri_vknot, vknots, len(vknots))
rman_sg_nurbs.sg_node.SetPrimVars(primvar)
|
pyshtools/shclasses/shgradient.py | mjc87/SHTOOLS | 251 | 11078388 | <reponame>mjc87/SHTOOLS
"""
Class for grids of the two components of the horizontal gradient.
"""
import matplotlib as _mpl
import matplotlib.pyplot as _plt
import copy as _copy
import xarray as _xr
from .shgrid import SHGrid as _SHGrid
class SHGradient(object):
"""
Class for grids of the two components of the horizontal gradient of a
scalar function. The class is initialized from a class instance of
SHCoeffs using the method gradient().
Attributes:
theta : SHGrid class instance of the theta component of the
horizontal gradient.
phi : SHGrid class instance of the phi component of the
horizontal gradient.
units : The units of the gridded data.
lmax : The maximum spherical harmonic degree resolvable by the
grids.
lmax_calc : The maximum spherical harmonic degree of the function
used in creating the grids.
nlat, nlon : The number of latitude and longitude bands in the grids.
n : The number of samples in latitude.
sampling : The longitudinal sampling for Driscoll and Healy grids.
Either 1 for equally sampled grids (nlat=nlon) or 2 for
equally spaced grids in degrees.
extend : True if the grid contains the redundant column for 360 E
and the unnecessary row for 90 S.
Methods:
plot() : Plot the two components of the horizontal gradient.
plot_theta() : Plot the theta component of the horizontal gradient.
plot_phi() : Plot the phi component of the horizontal gradient.
to_xarray() : Return an xarray DataSet of all gridded data.
copy() : Return a copy of the class instance.
info() : Print a summary of the data stored in the SHGravGrid
instance.
"""
def __init__(self, theta, phi, lmax, lmax_calc, units=None,
pot_units=None, epoch=None):
"""
Initialize the SHGradient class.
"""
self.theta = _SHGrid.from_array(theta, grid='DH', units=units)
self.phi = _SHGrid.from_array(phi, grid='DH', units=units)
self.grid = self.theta.grid
self.sampling = self.theta.sampling
self.nlat = self.theta.nlat
self.nlon = self.theta.nlon
self.n = self.theta.n
self.extend = self.theta.extend
self.lmax = lmax
self.lmax_calc = lmax_calc
self.units = units
def copy(self):
"""
Return a deep copy of the class instance.
Usage
-----
copy = x.copy()
"""
return _copy.deepcopy(self)
def info(self):
"""
Print a summary of the data stored in the SHGradient class instance.
Usage
-----
x.info()
"""
print(repr(self))
def __repr__(self):
str = ('grid = {:s}\n'
'nlat = {:d}\n'
'nlon = {:d}\n'
'n = {:d}\n'
'sampling = {:d}\n'
'extend = {}\n'
'lmax = {:d}\n'
'lmax_calc = {:d}\n'
'units = {:s}'
.format(self.grid, self.nlat, self.nlon, self.n, self.sampling,
self.extend, self.lmax, self.lmax_calc,
repr(self.units)))
return str
def plot_theta(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label='$\\theta$ component',
cb_tick_interval=None, grid=False, axes_labelsize=None,
tick_labelsize=None, show=True, ax=None, cb_offset=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
fname=None, cb_width=None):
"""
Plot the theta component of the horizontal gradient.
Usage
-----
x.plot_theta([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize,
tick_labelsize, ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\\theta$ component'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
"""
return self.theta.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
cb_width=cb_width,
show=show, fname=fname)
def plot_phi(self, projection=None, tick_interval=[30, 30],
minor_tick_interval=[None, None], xlabel=None, ylabel=None,
title=None, titlesize=None, colorbar='right',
cmap='viridis', cmap_limits=None, cmap_reverse=False,
cb_triangles='neither', cb_label='$\\phi$ component',
cb_tick_interval=None, grid=False, axes_labelsize=None,
tick_labelsize=None, show=True, ax=None, cb_offset=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_width=None, fname=None):
"""
Plot the phi component of the horizontal gradient.
Usage
-----
x.plot_phi([projection, tick_interval, minor_tick_interval, ticks,
xlabel, ylabel, title, colorbar, cmap, cmap_limits,
cmap_reverse, cb_triangles, cb_label, cb_ylabel,
cb_tick_interval, cb_minor_tick_interval, cb_offset,
cb_width, grid, titlesize, axes_labelsize, tick_labelsize,
ax, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [30, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
title : str or list, optional, default = None
The title of the plot.
colorbar : str, optional, default = 'right'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_label : str, optional, default = '$\\phi$ component'
Text label for the colorbar.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
titlesize : int, optional, default = None
The font size of the title.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
ax : matplotlib axes object, optional, default = None
A single matplotlib axes object where the plot will appear.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
"""
return self.phi.plot(projection=projection,
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=title,
titlesize=titlesize, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_offset=cb_offset,
cb_triangles=cb_triangles, cb_label=cb_label,
cb_tick_interval=cb_tick_interval, grid=grid,
axes_labelsize=axes_labelsize,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_width=cb_width,
cb_minor_tick_interval=cb_minor_tick_interval,
tick_labelsize=tick_labelsize, ax=ax,
show=show, fname=fname)
def plot(self, projection=None, tick_interval=[60, 30],
minor_tick_interval=[None, None], xlabel='Longitude',
ylabel='Latitude', colorbar='bottom', cmap='viridis',
cmap_limits=None, cmap_reverse=False, cb_triangles='neither',
cb_tick_interval=None, grid=False, axes_labelsize=9,
tick_labelsize=8, show=True, cb_offset=None,
cb_minor_tick_interval=None, ticks='WSen', cb_ylabel=None,
cb_width=None, fname=None):
"""
Plot the two vector components of the horizontal gradient.
Usage
-----
x.plot([projection, tick_interval, minor_tick_interval, ticks, xlabel,
ylabel, colorbar, cmap, cmap_limits, cmap_reverse,
cb_triangles, cb_ylabel, cb_tick_interval,
cb_minor_tick_interval, cb_offset, cb_width, grid,
axes_labelsize, tick_labelsize, show, fname])
Parameters
----------
projection : Cartopy projection class, optional, default = None
The Cartopy projection class used to project the gridded data,
for Driscoll and Healy sampled grids only.
tick_interval : list or tuple, optional, default = [60, 30]
Intervals to use when plotting the x and y ticks. If set to None,
ticks will not be plotted.
minor_tick_interval : list or tuple, optional, default = [None, None]
Intervals to use when plotting the minor x and y ticks. If set to
None, minor ticks will not be plotted.
ticks : str, optional, default = 'WSen'
Specify which axes should have ticks drawn and annotated. Capital
letters plot the ticks and annotations, whereas small letters plot
only the ticks. 'W', 'S', 'E', and 'N' denote the west, south, east
and north boundaries of the plot.
xlabel : str, optional, default = 'longitude'
Label for the longitude axis.
ylabel : str, optional, default = 'latitude'
Label for the latitude axis.
colorbar : str, optional, default = 'bottom'
Plot a colorbar along the 'top', 'right', 'bottom', or 'left' axis.
cmap : str, optional, default = 'viridis'
The color map to use when plotting the data and colorbar.
cmap_limits : list, optional, default = [self.min(), self.max()]
Set the lower and upper limits of the data used by the colormap,
and optionally an interval for each color band. If the interval is
specified, the number of discrete colors will be
(cmap_limits[1]-cmap_limits[0])/cmap_limits[2].
cmap_reverse : bool, optional, default = False
Set to True to reverse the sense of the color progression in the
color table.
cb_triangles : str, optional, default = 'neither'
Add triangles to the edges of the colorbar for minimum and maximum
values. Can be 'neither', 'both', 'min', or 'max'.
cb_ylabel : str, optional, default = None
Text label for the y axis of the colorbar
cb_tick_interval : float, optional, default = None
Colorbar major tick and annotation interval.
cb_minor_tick_interval : float, optional, default = None
Colorbar minor tick interval.
cb_offset : float or int, optional, default = None
Offset of the colorbar from the map edge in points. If None,
the offset will be calculated automatically.
cb_width : float, optional, default = None
Width of the colorbar in percent with respect to the width of the
respective image axis. Defaults are 2.5 and 5 for vertical and
horizontal colorbars, respectively.
grid : bool, optional, default = False
If True, plot major grid lines.
axes_labelsize : int, optional, default = None
The font size for the x and y axes labels.
tick_labelsize : int, optional, default = None
The font size for the x and y tick labels.
show : bool, optional, default = True
If True, plot the image to the screen.
fname : str, optional, default = None
If present, and if ax is not specified, save the image to the
specified file.
"""
if colorbar is not None:
if colorbar in set(['bottom', 'top']):
scale = 0.4
else:
scale = 0.25
else:
scale = 0.3
figsize = (_mpl.rcParams['figure.figsize'][0],
_mpl.rcParams['figure.figsize'][0] * scale)
fig, ax = _plt.subplots(1, 2, figsize=figsize)
self.plot_theta(projection=projection, ax=ax.flat[0],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=None,
titlesize=None, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_offset=cb_offset,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, show=show, fname=None)
self.plot_phi(projection=projection, ax=ax.flat[1],
tick_interval=tick_interval,
minor_tick_interval=minor_tick_interval,
xlabel=xlabel, ylabel=ylabel, title=None,
titlesize=None, colorbar=colorbar,
cmap=cmap, cmap_limits=cmap_limits,
cmap_reverse=cmap_reverse, cb_triangles=cb_triangles,
cb_tick_interval=cb_tick_interval,
grid=grid, axes_labelsize=axes_labelsize,
tick_labelsize=tick_labelsize, cb_offset=cb_offset,
cb_ylabel=cb_ylabel, ticks=ticks,
cb_minor_tick_interval=cb_minor_tick_interval,
cb_width=cb_width, show=show, fname=None)
fig.tight_layout(pad=0.5)
if fname is not None:
fig.savefig(fname)
return fig, ax
def to_xarray(self, title='', description='',
comment='pyshtools grid'):
"""
Return the horizontal gradient gridded data as an xarray DataSet.
Usage
-----
x.to_xarray([title, description, comment])
Parameters
----------
title : str, optional, default = ''
Title of the dataset.
description : str, optional, default = ''
Description of the dataset ('Remark' in gmt grd files).
comment : str, optional, default = 'pyshtools grid'
Additional information about how the data were generated.
"""
attrs = {'title': title,
'description': description,
'comment': comment,
'nlat': self.nlat,
'nlon': self.nlon,
'lmax': self.lmax,
'grid': self.grid,
'lmax_calc': self.lmax_calc,
'sampling': self.sampling,
'n': self.n,
'extend': repr(self.extend)
}
if self.epoch is not None:
attrs['epoch'] = self.epoch
_theta = self.theta.to_xarray(title='gradient (theta)',
long_name='theta component',
units=repr(self.units))
_phi = self.phi.to_xarray(title='gradient (phi)',
long_name='phi components',
units=repr(self.units))
return _xr.Dataset({'theta': _theta, 'phi': _phi}, attrs=attrs)
|
third_party/spider/baselines/nl2code/nn/utils/__init__.py | chenyangh/tensor2struct-public | 341 | 11078397 | <filename>third_party/spider/baselines/nl2code/nn/utils/__init__.py
__author__ = 'yinpengcheng'
|
alipay/aop/api/domain/AlipayMarketingActivityOrderRefundModel.py | antopen/alipay-sdk-python-all | 213 | 11078402 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.RefundActivityInfo import RefundActivityInfo
class AlipayMarketingActivityOrderRefundModel(object):
def __init__(self):
self._buyer_id = None
self._order_no = None
self._out_biz_no = None
self._refund_activity_info_list = None
self._refund_type = None
@property
def buyer_id(self):
return self._buyer_id
@buyer_id.setter
def buyer_id(self, value):
self._buyer_id = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def refund_activity_info_list(self):
return self._refund_activity_info_list
@refund_activity_info_list.setter
def refund_activity_info_list(self, value):
if isinstance(value, list):
self._refund_activity_info_list = list()
for i in value:
if isinstance(i, RefundActivityInfo):
self._refund_activity_info_list.append(i)
else:
self._refund_activity_info_list.append(RefundActivityInfo.from_alipay_dict(i))
@property
def refund_type(self):
return self._refund_type
@refund_type.setter
def refund_type(self, value):
self._refund_type = value
def to_alipay_dict(self):
params = dict()
if self.buyer_id:
if hasattr(self.buyer_id, 'to_alipay_dict'):
params['buyer_id'] = self.buyer_id.to_alipay_dict()
else:
params['buyer_id'] = self.buyer_id
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.refund_activity_info_list:
if isinstance(self.refund_activity_info_list, list):
for i in range(0, len(self.refund_activity_info_list)):
element = self.refund_activity_info_list[i]
if hasattr(element, 'to_alipay_dict'):
self.refund_activity_info_list[i] = element.to_alipay_dict()
if hasattr(self.refund_activity_info_list, 'to_alipay_dict'):
params['refund_activity_info_list'] = self.refund_activity_info_list.to_alipay_dict()
else:
params['refund_activity_info_list'] = self.refund_activity_info_list
if self.refund_type:
if hasattr(self.refund_type, 'to_alipay_dict'):
params['refund_type'] = self.refund_type.to_alipay_dict()
else:
params['refund_type'] = self.refund_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingActivityOrderRefundModel()
if 'buyer_id' in d:
o.buyer_id = d['buyer_id']
if 'order_no' in d:
o.order_no = d['order_no']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'refund_activity_info_list' in d:
o.refund_activity_info_list = d['refund_activity_info_list']
if 'refund_type' in d:
o.refund_type = d['refund_type']
return o
|
DeepHash/model/dch/__init__.py | imhmhm/DeepHash | 506 | 11078409 | <reponame>imhmhm/DeepHash
from .util import Dataset
from .dch import DCH
def train(train_img, database_img, query_img, config):
model = DCH(config)
img_database = Dataset(database_img, config.output_dim)
img_query = Dataset(query_img, config.output_dim)
img_train = Dataset(train_img, config.output_dim)
model.train(img_train)
return model.save_file
def validation(database_img, query_img, config):
model = DCH(config)
img_database = Dataset(database_img, config.output_dim)
img_query = Dataset(query_img, config.output_dim)
return model.validation(img_query, img_database, config.R)
|
compiler_opt/rl/trainer_test.py | google/ml-compiler-opt | 130 | 11078414 | # coding=utf-8
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for compiler_opt.rl.trainer."""
from absl.testing.absltest import mock
import tensorflow as tf
from tf_agents.agents.behavioral_cloning import behavioral_cloning_agent
from tf_agents.networks import q_rnn_network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
from tf_agents.trajectories import trajectory
from compiler_opt.rl import trainer
def _create_test_data(batch_size, sequence_length):
test_trajectory = trajectory.Trajectory(
step_type=tf.fill([batch_size, sequence_length], 1),
observation={
'inlining_default': tf.fill(
[batch_size, sequence_length], tf.constant(10, dtype=tf.int64))
},
action=tf.fill(
[batch_size, sequence_length], tf.constant(1, dtype=tf.int64)),
policy_info=(),
next_step_type=tf.fill([batch_size, sequence_length], 1),
reward=tf.fill([batch_size, sequence_length], 2.0),
discount=tf.fill([batch_size, sequence_length], 1.0),
)
def test_data_iterator():
while True:
yield test_trajectory
return test_data_iterator()
class TrainerTest(tf.test.TestCase):
def setUp(self):
observation_spec = {
'inlining_default':
tf.TensorSpec(dtype=tf.int64, shape=(), name='inlining_default')
}
self._time_step_spec = time_step.time_step_spec(observation_spec)
self._action_spec = tensor_spec.BoundedTensorSpec(
dtype=tf.int64,
shape=(),
minimum=0,
maximum=1,
name='inlining_decision')
self._network = q_rnn_network.QRnnNetwork(
input_tensor_spec=self._time_step_spec.observation,
action_spec=self._action_spec,
lstm_size=(40,),
preprocessing_layers={
'inlining_default': tf.keras.layers.Lambda(lambda x: x)
})
super(TrainerTest, self).setUp()
def test_trainer_initialization(self):
test_agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
self._network,
tf.compat.v1.train.AdamOptimizer(),
num_outer_dims=2)
test_trainer = trainer.Trainer(
root_dir=self.get_temp_dir(), agent=test_agent)
self.assertEqual(0, test_trainer._global_step.numpy())
def test_training(self):
test_agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
self._network,
tf.compat.v1.train.AdamOptimizer(),
num_outer_dims=2)
test_trainer = trainer.Trainer(
root_dir=self.get_temp_dir(), agent=test_agent)
self.assertEqual(0, test_trainer._global_step.numpy())
dataset_iter = _create_test_data(batch_size=3, sequence_length=3)
monitor_dict = {'default': {'test': 1}}
with mock.patch.object(
tf.summary, 'scalar', autospec=True) as mock_scalar_summary:
test_trainer.train(dataset_iter, monitor_dict, num_iterations=10)
self.assertEqual(
10,
sum(1 for c in mock_scalar_summary.mock_calls
if c[2]['name'] == 'test'))
self.assertEqual(10, test_trainer._global_step.numpy())
def test_training_with_multiple_times(self):
test_agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
self._network,
tf.compat.v1.train.AdamOptimizer(),
num_outer_dims=2)
test_trainer = trainer.Trainer(
root_dir=self.get_temp_dir(), agent=test_agent)
self.assertEqual(0, test_trainer._global_step.numpy())
dataset_iter = _create_test_data(batch_size=3, sequence_length=3)
monitor_dict = {'default': {'test': 1}}
test_trainer.train(dataset_iter, monitor_dict, num_iterations=10)
self.assertEqual(10, test_trainer._global_step.numpy())
dataset_iter = _create_test_data(batch_size=6, sequence_length=4)
test_trainer.train(dataset_iter, monitor_dict, num_iterations=10)
self.assertEqual(20, test_trainer._global_step.numpy())
def test_inference(self):
test_agent = behavioral_cloning_agent.BehavioralCloningAgent(
self._time_step_spec,
self._action_spec,
self._network,
tf.compat.v1.train.AdamOptimizer(),
num_outer_dims=2)
test_trainer = trainer.Trainer(
root_dir=self.get_temp_dir(), agent=test_agent)
inference_batch_size = 1
random_time_step = tensor_spec.sample_spec_nest(
self._time_step_spec, outer_dims=(inference_batch_size,))
initial_policy_state = test_trainer._agent.policy.get_initial_state(
inference_batch_size)
action_outputs = test_trainer._agent.policy.action(random_time_step,
initial_policy_state)
self.assertAllEqual([inference_batch_size], action_outputs.action.shape)
action_outputs = test_trainer._agent.policy.action(random_time_step,
action_outputs.state)
self.assertAllEqual([inference_batch_size], action_outputs.action.shape)
if __name__ == '__main__':
tf.test.main()
|
agentnet/resolver/probabilistic.py | mraihan19/AgentNet | 337 | 11078454 | <gh_stars>100-1000
import theano.tensor as T
import theano.tensor.shared_randomstreams as random_streams
from .base import BaseResolver
class ProbabilisticResolver(BaseResolver):
"""
instance, that:
- determines which action should be taken given policy
- samples actions with probabilities given by input layer
"""
def __init__(self, incoming, assume_normalized=False, seed=1234, output_dtype='int32',
name='ProbabilisticResolver'):
"""
:param incoming: a lasagne layer that outputs action probability vectors
WARNING! We assume that incoming probabilities are all nonnegative even if assume_normalized=False.
:type incoming: lasagne.layers.Layer
:param assume_normalized: if set to True, the incoming layer is assumed to
return outputs that add up to 1 (e.g. softmax output) along last axis
:type assume_normalized: bool
:param seed: - random seed
:type seed: int
:action_dtype: type of action (usually (u)int 32 or 64)
:type action_dtype: string or dtype
:param name: layer name (using lasagne conventions)
:type name: string
"""
# probas float[2] - probability of random and optimal action respectively
self.assume_normalized = assume_normalized
self.rng = random_streams.RandomStreams(seed)
super(ProbabilisticResolver, self).__init__(incoming, name=name,output_dtype=output_dtype)
def get_output_for(self, policy, greedy=False, **kwargs):
"""
picks the action with probabilities from policy
:param policy: probabilities for all actions (e.g. a2c actor policy or standartized Q-values)
:type policy: tensor of float[batch_id, action_id]
:returns: actions ids of actions picked
:rtype: vector of int[batch_id]
"""
if greedy:
# greedy branch
chosen_action_ids = T.argmax(policy, axis=-1).astype(self.output_dtype)
else:
# probabilistic branch
batch_size, n_actions = policy.shape
if self.assume_normalized:
probas = policy
else:
probas = policy / T.sum(policy, axis=1, keepdims=True)
# p1, p1+p2, p1+p2+p3, ... 1
cum_probas = T.cumsum(probas, axis=1)
batch_randomness = self.rng.uniform(low=0., high=1., size=[batch_size, 1])
# idea: to compute the chosen action we count how many cumulative probabilities are
# less than the random number [0,1].
# we deliberately exclude the LAST cumulative probability because it has to be equal to 1
# by definition (never being less than random[0,1]), but it can be less due to
# inaccurate float32 computation, causing algorithm to pick action id = (n_actions)+1
# which results in IndexError
chosen_action_ids = T.sum((batch_randomness > cum_probas[:, :-1]), axis=1, dtype=self.output_dtype)
return chosen_action_ids
|
tests/commands/conftest.py | MitchellTesla/datasets | 3,395 | 11078458 | import pytest
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__"
DATASET_LOADING_SCRIPT_CODE = """
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def dataset_loading_script_name():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def dataset_loading_script_code():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path):
script_name = dataset_loading_script_name
script_dir = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=True)
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(dataset_loading_script_code)
return str(script_dir)
|
nameko/web/websocket.py | mohamedmehdigara/nameko | 3,425 | 11078509 | <reponame>mohamedmehdigara/nameko<gh_stars>1000+
import json
import uuid
from collections import namedtuple
from functools import partial
from logging import getLogger
import six
import werkzeug
from eventlet.event import Event
from eventlet.websocket import WebSocketWSGI
from packaging import version
from werkzeug.routing import Rule
from nameko.exceptions import (
ConnectionNotFound, MalformedRequest, MethodNotFound, serialize
)
from nameko.extensions import (
DependencyProvider, Entrypoint, ProviderCollector, SharedExtension
)
from nameko.web.server import WebServer
# in version 2.0.0, werkzeug started correctly identifying incoming websocket
# requests, and only matching them to rules that are marked as being websocket targets.
# see https://github.com/pallets/werkzeug/issues/2052.
# all versions of werkzeug throw a 400 Bad Request error if no rules match, so we need
# to make the explicit identification of a rule as a websocket target conditional
# on the version of werkzeug.
IDENTIFY_WEBSOCKET_RULES = version.parse(werkzeug.__version__) >= version.parse("2.0.0")
_log = getLogger(__name__)
SocketInfo = namedtuple('SocketInfo', ['socket', 'data'])
class Connection(object):
def __init__(self, socket_id, context_data):
self.socket_id = socket_id
self.context_data = context_data
self.subscriptions = set()
class WebSocketServer(SharedExtension, ProviderCollector):
wsgi_server = WebServer()
def __init__(self):
super(WebSocketServer, self).__init__()
self.sockets = {}
def deserialize_ws_frame(self, payload):
try:
data = json.loads(payload)
return (
data['method'],
data.get('data') or {},
data.get('correlation_id'),
)
except Exception:
raise MalformedRequest('Invalid JSON data')
def serialize_for_ws(self, payload):
return six.text_type(json.dumps(payload))
def serialize_event(self, event, data):
return self.serialize_for_ws({
'type': 'event',
'event': event,
'data': data,
})
def get_url_rule(self):
return Rule('/ws', methods=['GET'], websocket=IDENTIFY_WEBSOCKET_RULES)
def handle_request(self, request):
context_data = self.wsgi_server.context_data_from_headers(request)
return self.websocket_mainloop(context_data)
def websocket_mainloop(self, initial_context_data):
def handler(ws):
socket_id, context_data = self.add_websocket(
ws, initial_context_data)
try:
ws.send(self.serialize_event(
'connected', {'socket_id': socket_id})
)
while 1:
raw_req = ws.wait()
if raw_req is None:
break
ws.send(self.handle_websocket_request(
socket_id, context_data, raw_req))
finally:
self.remove_socket(socket_id)
return WebSocketWSGI(handler)
def handle_websocket_request(self, socket_id, context_data, raw_req):
correlation_id = None
try:
method, data, correlation_id = self.deserialize_ws_frame(
raw_req)
provider = self.get_provider_for_method(method)
result = provider.handle_message(socket_id, data, context_data)
response = {
'type': 'result',
'success': True,
'data': result,
'correlation_id': correlation_id,
}
except Exception as exc:
error = serialize(exc)
response = {
'type': 'result',
'success': False,
'error': error,
'correlation_id': correlation_id,
}
return self.serialize_for_ws(response)
def get_provider_for_method(self, method):
for provider in self._providers:
if (
isinstance(provider, WebSocketRpc) and
provider.method_name == method
):
return provider
raise MethodNotFound()
def setup(self):
self.wsgi_server.register_provider(self)
def stop(self):
self.wsgi_server.unregister_provider(self)
super(WebSocketServer, self).stop()
def add_websocket(self, ws, initial_context_data=None):
socket_id = str(uuid.uuid4())
context_data = dict(initial_context_data or ())
self.sockets[socket_id] = SocketInfo(ws, context_data)
return socket_id, context_data
def remove_socket(self, socket_id):
self.sockets.pop(socket_id, None)
for provider in self._providers:
if isinstance(provider, WebSocketHubProvider):
provider.cleanup_websocket(socket_id)
class WebSocketHubProvider(DependencyProvider):
hub = None
server = WebSocketServer()
def setup(self):
self.hub = WebSocketHub(self.server)
self.server.register_provider(self)
def stop(self):
self.server.unregister_provider(self)
super(WebSocketHubProvider, self).stop()
def get_dependency(self, worker_ctx):
return self.hub
def cleanup_websocket(self, socket_id):
con = self.hub.connections.pop(socket_id, None)
if con is not None:
for channel in con.subscriptions:
subs = self.hub.subscriptions.get(channel)
if subs:
subs.discard(socket_id)
class WebSocketHub(object):
def __init__(self, server):
self._server = server
self.connections = {}
self.subscriptions = {}
def _get_connection(self, socket_id, create=True):
rv = self.connections.get(socket_id)
if rv is not None:
return rv
rv = self._server.sockets.get(socket_id)
if rv is None:
if not create:
return None
raise ConnectionNotFound(socket_id)
if not create:
return None
_, context_data = rv
self.connections[socket_id] = rv = Connection(socket_id, context_data)
return rv
def get_subscriptions(self, socket_id):
"""Returns a list of all the subscriptions of a socket."""
con = self._get_connection(socket_id, create=False)
if con is None:
return []
return sorted(con.subscriptions)
def subscribe(self, socket_id, channel):
"""Subscribes a socket to a channel."""
con = self._get_connection(socket_id)
self.subscriptions.setdefault(channel, set()).add(socket_id)
con.subscriptions.add(channel)
def unsubscribe(self, socket_id, channel):
"""Unsubscribes a socket from a channel."""
con = self._get_connection(socket_id, create=False)
if con is not None:
con.subscriptions.discard(channel)
try:
self.subscriptions[channel].discard(socket_id)
except KeyError:
pass
def broadcast(self, channel, event, data):
"""Broadcasts an event to all sockets listening on a channel."""
payload = self._server.serialize_event(event, data)
for socket_id in self.subscriptions.get(channel, ()):
rv = self._server.sockets.get(socket_id)
if rv is not None:
rv.socket.send(payload)
def unicast(self, socket_id, event, data):
"""Sends an event to a single socket. Returns `True` if that
worked or `False` if not.
"""
payload = self._server.serialize_event(event, data)
rv = self._server.sockets.get(socket_id)
if rv is not None:
rv.socket.send(payload)
return True
return False
class WebSocketRpc(Entrypoint):
server = WebSocketServer()
def setup(self):
self.server.register_provider(self)
def stop(self):
self.server.unregister_provider(self)
super(WebSocketRpc, self).stop()
def handle_message(self, socket_id, data, context_data):
self.check_signature((socket_id,), data)
event = Event()
self.container.spawn_worker(self, (socket_id,), data,
context_data=context_data,
handle_result=partial(
self.handle_result, event))
return event.wait()
def handle_result(self, event, worker_ctx, result, exc_info):
event.send(result, exc_info)
return result, exc_info
rpc = WebSocketRpc.decorator
|
tests/test_lambda_runtime_client.py | keshayad/aws-lambda-python-runtime-interface-client | 136 | 11078517 | <gh_stars>100-1000
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
"""
import http
import http.client
import unittest.mock
from unittest.mock import MagicMock, patch
from awslambdaric import __version__
from awslambdaric.lambda_runtime_client import (
LambdaRuntimeClient,
LambdaRuntimeClientError,
InvocationRequest,
_user_agent,
)
class TestInvocationRequest(unittest.TestCase):
def test_constructor(self):
invocation_request = InvocationRequest(
invoke_id="Lambda-Runtime-Aws-Request-Id",
x_amzn_trace_id="Lambda-Runtime-Trace-Id",
invoked_function_arn="Lambda-Runtime-Invoked-Function-Arn",
deadline_time_in_ms="Lambda-Runtime-Deadline-Ms",
client_context="Lambda-Runtime-Client-Context",
cognito_identity="Lambda-Runtime-Cognito-Identity",
content_type="Content-Type",
event_body="response_body",
)
equal_invocation_request = InvocationRequest(
invoke_id="Lambda-Runtime-Aws-Request-Id",
x_amzn_trace_id="Lambda-Runtime-Trace-Id",
invoked_function_arn="Lambda-Runtime-Invoked-Function-Arn",
deadline_time_in_ms="Lambda-Runtime-Deadline-Ms",
client_context="Lambda-Runtime-Client-Context",
cognito_identity="Lambda-Runtime-Cognito-Identity",
content_type="Content-Type",
event_body="response_body",
)
different_invocation_request = InvocationRequest(
invoke_id="Lambda-Runtime-Aws-Request-Id",
x_amzn_trace_id="Lambda-Runtime-Trace-Id",
invoked_function_arn="Lambda-Runtime-Invoked-Function-Arn",
deadline_time_in_ms="Lambda-Runtime-Deadline-Ms",
client_context="Lambda-Runtime-Client-Context",
cognito_identity="Lambda-Runtime-Cognito-Identity",
content_type="Content-Type",
event_body="another_response_body",
)
self.assertTrue(invocation_request == invocation_request)
self.assertTrue(invocation_request == equal_invocation_request)
self.assertFalse(invocation_request == different_invocation_request)
class TestLambdaRuntime(unittest.TestCase):
@patch("awslambdaric.lambda_runtime_client.runtime_client")
def test_wait_next_invocation(self, mock_runtime_client):
response_body = b"{}"
headears = {
"Lambda-Runtime-Aws-Request-Id": "RID1234",
"Lambda-Runtime-Trace-Id": "TID1234",
"Lambda-Runtime-Invoked-Function-Arn": "FARN1234",
"Lambda-Runtime-Deadline-Ms": 12,
"Lambda-Runtime-Client-Context": "client_context",
"Lambda-Runtime-Cognito-Identity": "cognito_identity",
"Content-Type": "application/json",
}
mock_runtime_client.next.return_value = response_body, headears
runtime_client = LambdaRuntimeClient("localhost:1234")
event_request = runtime_client.wait_next_invocation()
self.assertIsNotNone(event_request)
self.assertEqual(event_request.invoke_id, "RID1234")
self.assertEqual(event_request.x_amzn_trace_id, "TID1234")
self.assertEqual(event_request.invoked_function_arn, "FARN1234")
self.assertEqual(event_request.deadline_time_in_ms, 12)
self.assertEqual(event_request.client_context, "client_context")
self.assertEqual(event_request.cognito_identity, "cognito_identity")
self.assertEqual(event_request.content_type, "application/json")
self.assertEqual(event_request.event_body, response_body)
@patch("http.client.HTTPConnection", autospec=http.client.HTTPConnection)
def test_post_init_error(self, MockHTTPConnection):
mock_conn = MockHTTPConnection.return_value
mock_response = MagicMock(autospec=http.client.HTTPResponse)
mock_conn.getresponse.return_value = mock_response
mock_response.read.return_value = b""
mock_response.code = http.HTTPStatus.ACCEPTED
runtime_client = LambdaRuntimeClient("localhost:1234")
runtime_client.post_init_error("error_data")
MockHTTPConnection.assert_called_with("localhost:1234")
mock_conn.request.assert_called_once_with(
"POST", "/2018-06-01/runtime/init/error", "error_data"
)
mock_response.read.assert_called_once()
@patch("http.client.HTTPConnection", autospec=http.client.HTTPConnection)
def test_post_init_error_non_accepted_status_code(self, MockHTTPConnection):
mock_conn = MockHTTPConnection.return_value
mock_response = MagicMock(autospec=http.client.HTTPResponse)
mock_conn.getresponse.return_value = mock_response
mock_response.read.return_value = b""
mock_response.code = http.HTTPStatus.IM_USED
runtime_client = LambdaRuntimeClient("localhost:1234")
with self.assertRaises(LambdaRuntimeClientError) as cm:
runtime_client.post_init_error("error_data")
returned_exception = cm.exception
self.assertEqual(returned_exception.endpoint, "/2018-06-01/runtime/init/error")
self.assertEqual(returned_exception.response_code, http.HTTPStatus.IM_USED)
@patch("awslambdaric.lambda_runtime_client.runtime_client")
def test_post_invocation_result(self, mock_runtime_client):
runtime_client = LambdaRuntimeClient("localhost:1234")
response_data = "data"
invoke_id = "1234"
runtime_client.post_invocation_result(invoke_id, response_data)
mock_runtime_client.post_invocation_result.assert_called_once_with(
invoke_id, response_data.encode("utf-8"), "application/json"
)
@patch("awslambdaric.lambda_runtime_client.runtime_client")
def test_post_invocation_result_binary_data(self, mock_runtime_client):
runtime_client = LambdaRuntimeClient("localhost:1234")
response_data = b"binary_data"
invoke_id = "1234"
content_type = "application/octet-stream"
runtime_client.post_invocation_result(invoke_id, response_data, content_type)
mock_runtime_client.post_invocation_result.assert_called_once_with(
invoke_id, response_data, content_type
)
@patch("awslambdaric.lambda_runtime_client.runtime_client")
def test_post_invocation_result_failure(self, mock_runtime_client):
runtime_client = LambdaRuntimeClient("localhost:1234")
response_data = "data"
invoke_id = "1234"
mock_runtime_client.post_invocation_result.side_effect = RuntimeError(
"Failed to post invocation response"
)
with self.assertRaisesRegex(RuntimeError, "Failed to post invocation response"):
runtime_client.post_invocation_result(invoke_id, response_data)
@patch("awslambdaric.lambda_runtime_client.runtime_client")
def test_post_invocation_error(self, mock_runtime_client):
runtime_client = LambdaRuntimeClient("localhost:1234")
error_data = "data"
invoke_id = "1234"
xray_fault = "xray_fault"
runtime_client.post_invocation_error(invoke_id, error_data, xray_fault)
mock_runtime_client.post_error.assert_called_once_with(
invoke_id, error_data, xray_fault
)
@patch("awslambdaric.lambda_runtime_client.runtime_client")
def test_post_invocation_error_with_large_xray_cause(self, mock_runtime_client):
runtime_client = LambdaRuntimeClient("localhost:1234")
error_data = "data"
invoke_id = "1234"
large_xray_fault = ("a" * int(1024 * 1024))[:-1]
runtime_client.post_invocation_error(invoke_id, error_data, large_xray_fault)
mock_runtime_client.post_error.assert_called_once_with(
invoke_id, error_data, large_xray_fault
)
@patch("awslambdaric.lambda_runtime_client.runtime_client")
def test_post_invocation_error_with_too_large_xray_cause(self, mock_runtime_client):
runtime_client = LambdaRuntimeClient("localhost:1234")
error_data = "data"
invoke_id = "1234"
too_large_xray_fault = "a" * int(1024 * 1024)
runtime_client.post_invocation_error(
invoke_id, error_data, too_large_xray_fault
)
mock_runtime_client.post_error.assert_called_once_with(
invoke_id, error_data, ""
)
def test_connection_refused(self):
with self.assertRaises(ConnectionRefusedError):
runtime_client = LambdaRuntimeClient("127.0.0.1:1")
runtime_client.post_init_error("error")
def test_invalid_addr(self):
with self.assertRaises(OSError):
runtime_client = LambdaRuntimeClient("::::")
runtime_client.post_init_error("error")
def test_lambdaric_version(self):
self.assertTrue(_user_agent().endswith(__version__))
class TestLambdaRuntimeClientError(unittest.TestCase):
def test_constructor(self):
expected_endpoint = ""
expected_response_code = ""
expected_response_body = ""
lambda_runtime_client_error = LambdaRuntimeClientError(
expected_endpoint, expected_response_code, expected_response_body
)
self.assertIsInstance(lambda_runtime_client_error, Exception)
self.assertEqual(lambda_runtime_client_error.endpoint, expected_endpoint)
if __name__ == "__main__":
unittest.main()
|
tools/convert_policy_yaml_to_heat_template.py | openstack/tripleo-heat-templates | 135 | 11078521 | <reponame>openstack/tripleo-heat-templates
#!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import ruamel.yaml
from ruamel.yaml import YAML
# Not all policy variables across services in THT are consistent. This mapping
# assoicates the service name to the right THT variable.
_SERVICE_MAP = {
'barbican': 'BarbicanPolicies',
'cinder': 'CinderApiPolicies',
'designate': 'DesignateApiPolicies',
'glance': 'GlanceApiPolicies',
'ironic': 'IronicApiPolicies',
'keystone': 'KeystonePolicies',
'manila': 'ManilaApiPolicies',
'neutron': 'NeutronApiPolicies',
'nova': 'NovaApiPolicies',
'octavia': 'OctaviaApiPolicies',
'placement': 'PlacementPolicies'
}
_SCALAR = ruamel.yaml.scalarstring.DoubleQuotedScalarString
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--policy-dir', required=True,
help=(
'Directory containing policy.yaml files for OpenStack services. '
'This script expects files to be named $SERVICE.yaml. For example '
'nova.yaml for nova\'s policies.'
)
)
args = parser.parse_args()
heat_template = {'parameter_defaults': {'EnforceSecureRbac': False}}
for filename in os.listdir(args.policy_dir):
service = filename.split('.')[0]
tht_var_name = _SERVICE_MAP.get(service)
filepath = os.path.join(args.policy_dir, filename)
with open(filepath, 'r') as f:
safe_handler = YAML(typ='safe')
# A lot of policy files have duplicate keys, which violates YAML. Allow
# duplicate keys for the time being.
safe_handler.allow_duplicate_keys = True
policies = safe_handler.load(f)
template = {}
for name, value in policies.items():
rule = name.split(':')[-1]
rule = name.replace(':', '_')
key = service + '-' + rule
template[key] = {'key': _SCALAR(name), 'value': _SCALAR(value)}
heat_template['parameter_defaults'][tht_var_name] = template
print(
ruamel.yaml.dump(
heat_template, Dumper=ruamel.yaml.RoundTripDumper, width=500
)
)
|
bin/test_bsopt.py | hstenzel/yugabyte-db | 3,702 | 11078553 | <filename>bin/test_bsopt.py
#!/usr/bin/env python3
"""
A test for running tablet bootstrap with different combinations of options, and measuring time.
"""
import os
import threading
import time
import urllib.request
import json
import sys
import subprocess
# YugaByte directories
YUGABYTE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
YBSAMPLEAPPS_DIR = os.path.normpath(os.path.join(YUGABYTE_DIR, '..', 'yb-sample-apps'))
# Number of tablets to use
NUM_TABLETS = 2
# Time to run the CQL test for (seconds)
CQL_TEST_TIME_SEC = 10 * 60
# Number of trials, both for the optimization on and off
NUM_TRIALS = 15
def test_cluster(opt_on):
# Start the CQL stress test
args = [
"java", "-jar", YBSAMPLEAPPS_DIR + "/target/yb-sample-apps.jar", "--workload",
"CassandraBatchKeyValue",
"--nodes", "127.0.0.1:9042", "--num_threads_read", "1", "--num_threads_write", "1",
"--num_unique_keys", "100000", "--nouuid", "--value_size", "1024", "--batch_size", "64"]
proc = subprocess.Popen(args)
# After time is up, kill the test
timer = threading.Timer(CQL_TEST_TIME_SEC, lambda p: p.kill(), (proc,))
timer.start()
proc.wait()
timer.cancel()
# Use yb-admin to flush all writes to RocksDB
subprocess.check_call(
YUGABYTE_DIR +
"/build/latest/bin/yb-admin -master_addresses 127.0.0.1 flush_table " +
"ybdemo_keyspace cassandrakeyvalue 60",
shell=True)
# Restart the cluster
subprocess.check_call([YUGABYTE_DIR + "/bin/yb-ctl", "stop"])
subprocess.check_call(
YUGABYTE_DIR + "/bin/yb-ctl start --tserver_flags \"skip_flushed_entries=" +
str(opt_on).lower() + "\"",
shell=True)
time.sleep(10)
# Get the bootstrap time
for i in range(0, 10):
metrics = json.loads(urllib.request.urlopen("http://1172.16.31.10:9000/metrics").read())
for group in metrics:
if group["type"] == "server":
smetrics = group["metrics"]
for metric in smetrics:
if metric["name"] == "ts_bootstrap_time":
print(metric)
sys.stdout.flush()
if metric["total_count"] == NUM_TABLETS:
return metric["total_sum"]
else:
time.sleep(10)
# Metric not present (SHOULD NEVER REACH HERE)
return -1
# Destroy and remake the cluster from scratch
def remake_cluster():
subprocess.check_call(YUGABYTE_DIR + "/bin/yb-ctl stop", shell=True)
subprocess.check_call(YUGABYTE_DIR + "/bin/yb-ctl destroy", shell=True)
subprocess.check_call(
YUGABYTE_DIR + "/bin/yb-ctl --num_shards_per_tserver " + str(NUM_TABLETS) + " create",
shell=True)
time.sleep(5)
if __name__ == '__main__':
# Run our trials
for i in range(NUM_TRIALS):
# Trial with optimization
remake_cluster()
print("OPTIMIZE_YES: %d" % test_cluster(True))
sys.stdout.flush()
# Trial without optimization
remake_cluster()
print("OPTIMIZE_NO: %d" % test_cluster(False))
sys.stdout.flush()
|
mayan/apps/appearance/tests/mixins.py | nattangwiwat/Mayan-EDMS-recitation | 336 | 11078560 | from ..models import Theme
from .literals import TEST_THEME_LABEL, TEST_THEME_LABEL_EDITED
class ThemeTestMixin:
def _create_test_theme(self):
self.test_theme = Theme.objects.create(
label=TEST_THEME_LABEL
)
def _edit_test_theme(self):
self.test_theme.label = TEST_THEME_LABEL_EDITED
self.test_theme.save()
class ThemeViewTestMixin:
def _request_test_theme_create_view(self):
pk_list = list(Theme.objects.values_list('pk', flat=True))
response = self.post(
viewname='appearance:theme_create', data={
'label': TEST_THEME_LABEL,
}
)
self.test_theme = Theme.objects.exclude(pk__in=pk_list).first()
return response
def _request_test_theme_delete_view(self):
return self.post(
viewname='appearance:theme_delete', kwargs={
'theme_id': self.test_theme.pk
}
)
def _request_test_theme_edit_view(self):
return self.post(
viewname='appearance:theme_edit', kwargs={
'theme_id': self.test_theme.pk
}, data={
'label': TEST_THEME_LABEL_EDITED,
}
)
def _request_test_theme_list_view(self):
return self.get(viewname='appearance:theme_list')
|
sharppy/databases/sars_cal.py | skovic/SHARPpy | 163 | 11078568 | import sharppy.databases.sars as sars
import sharppy.sharptab as tab
from sharppy.io.spc_decoder import SPCDecoder
import numpy as np
from datetime import datetime
import os
def get_profile(fname, sars_type):
# Create a convective profile object
# fname - filename/SARS sounding string to load in
# sars_type - string showing what SARS database (hail/supercell) to look for the raw file
# Load in the data
try:
sars_fname = sars.getSounding(fname[0].decode('utf-8'), sars_type)
except:
print("Unable to find data file for:", fname[0])
return None
dec = SPCDecoder(sars_fname)
profs = dec.getProfiles()
prof = profs._profs[''][0]
dates = profs._dates
prof.strictQC = True
try:
new_prof = tab.profile.ConvectiveProfile.copy(prof)
except Exception as e:
print("There was a problem with the generation of the ConvectiveProfile:", str(e))
return None
return new_prof
def calc_inputs(new_prof, sars_type):
# Grab the input values for SARS that were generated by SHARPpy in the ConvectiveProfile object
# new_prof - the ConvectiveProfile object
# sars_type - type of SARS (hail/supercell)
sfc_6km_shear = tab.utils.KTS2MS( tab.utils.mag( new_prof.sfc_6km_shear[0], new_prof.sfc_6km_shear[1]) )
sfc_3km_shear = tab.utils.KTS2MS( tab.utils.mag( new_prof.sfc_3km_shear[0], new_prof.sfc_3km_shear[1]) )
sfc_9km_shear = tab.utils.KTS2MS( tab.utils.mag( new_prof.sfc_9km_shear[0], new_prof.sfc_9km_shear[1]) )
h500t = tab.interp.temp(new_prof, 500.)
lapse_rate = tab.params.lapse_rate( new_prof, 700., 500., pres=True )
right_srh3km = new_prof.right_srh3km[0]
right_srh1km = new_prof.right_srh1km[0]
left_srh3km = new_prof.left_srh3km[0]
left_srh1km = new_prof.left_srh1km[0]
mucape = new_prof.mupcl.bplus
mlcape = new_prof.mlpcl.bplus
mllcl = new_prof.mlpcl.lclhght
mumr = tab.thermo.mixratio(new_prof.mupcl.pres, new_prof.mupcl.dwpc)
#self.ship = params.ship(self)
if sars_type == 'supercell':
data = [mlcape, mllcl, h500t, lapse_rate, tab.utils.MS2KTS(sfc_6km_shear), right_srh1km, tab.utils.MS2KTS(sfc_3km_shear), tab.utils.MS2KTS(sfc_9km_shear),
right_srh3km]
else:
data = [ mumr, mucape, h500t, lapse_rate, sfc_6km_shear,
sfc_9km_shear, sfc_3km_shear, right_srh3km ]
return np.round(np.asarray(data),1)
def check_supercell_cal(use_db=True):
# Use to check the SARS supercell calibration
database_fn = os.path.join( os.path.dirname( __file__ ), 'sars_supercell.txt' )
supercell_db = np.loadtxt(database_fn, skiprows=1, dtype=bytes, comments="%%%%")
hits = 0
fa = 0
cn = 0
miss = 0
match = 0
for f in supercell_db:
mlcape = float(f[3])
mllcl = float(f[5])
h5temp = float(f[9])
lr = float(f[11])
shr = float(f[7])
srh = float(f[6])
srh3 = float(f[14])
shr3 = float(f[12])
shr9 = float(f[13])
if use_db is True:
out = sars.supercell('sars_supercell.txt', mlcape, mllcl, h5temp, lr, shr, srh, shr3, shr9, srh3)
else:
new_prof = get_profile(f, 'supercell')
if new_prof is None:
continue
out = new_prof.right_supercell_matches
m = int(f[0].decode('utf-8') in out[0])
if m == 0:
data = calc_inputs(new_prof, 'supercell')
#print("C:", data)
#print("T:", [mlcape, mllcl, h5temp, lr, shr, srh, shr3, shr9, srh3])
match += m
#print(f[0], match)
if out[-1] >= .5 and int(f[1]) > 0:
hits += 1
elif out[-1] >= .5 and int(f[1]) == 0:
fa += 1
elif out[-1] < .5 and int(f[1]) == 0:
cn += 1
elif out[-1] < .5 and int(f[1]) > 0:
miss += 1
print("--- SARS SUPERCELL CALIBRATION ---")
print_stats(hits, cn, miss, fa, match)
return {'hits': hits, 'cn': cn, 'miss':miss, 'fa': fa, 'match':match}
def print_stats(hits, cn, miss, fa, matches):
# Print out the verification stats
print("TOTAL SNDGS:", hits + cn + miss + fa )
print("SELF MATCHES:", matches)
print("HIT:", hits)
print("MISS:", miss)
print("FALSE ALARM:", fa)
print("CORRECT NULL:", cn)
print("ACCURACY: %.3f" % (float(hits+cn)/float(hits+cn+miss+fa)))
print("BIAS: %.3f" % (float(hits+fa)/float(hits+miss)))
print("POD: %.3f" % (float(hits)/float(hits+miss)))
print("FAR: %.3f" % (float(fa)/float(fa+hits)))
print("CSI: %.3f" % (float(hits)/float(hits + miss + fa)))
print("TSS: %.3f" % (float(hits)/float(hits+miss) - float(fa)/float(fa+cn)))
print()
def calc_verification(vals):
stats = {}
stats['num'] = vals['hits'] + vals['cn'] + vals['miss'] + vals['fa']
for key in vals.keys():
stats[key] = vals[key]
hits = stats['hits']
miss = stats['miss']
fa = stats['fa']
cn = stats['cn']
stats["ACCURACY"] = float(hits+cn)/float(hits+cn+miss+fa)
stats["BIAS"] = float(hits+fa)/float(hits+miss)
stats["POD"] = float(hits)/float(hits+miss)
stats["FAR"] = float(fa)/float(fa+hits)
stats["CSI"] = float(hits)/float(hits + miss + fa)
stats["TSS"] = float(hits)/float(hits+miss) - float(fa)/float(fa+cn)
return stats
def check_hail_cal(use_db=True):
# Check the calibration of the SARS hail
database_fn = os.path.join( os.path.dirname( __file__ ), 'sars_hail.txt' )
hail_db = np.loadtxt(database_fn, skiprows=1, dtype=bytes)
hits = 0
cn = 0
miss = 0
fa = 0
match = 0
for f in hail_db:
mumr = float(f[4])
mucape = float(f[3])
lr = float(f[7])
h5_temp = float(f[5])
shr = float(f[10])
shr9 = float(f[11])
shr3 = float(f[9])
srh = float(f[12])
if use_db is True:
out = sars.hail('sars_hail.txt', mumr, mucape, h5_temp, lr, shr, shr9, shr3, srh)
else:
new_prof = get_profile(f, 'hail')
if new_prof is None:
continue
out = new_prof.right_matches
m = int(f[0].decode('utf-8') in out[0])
if m == 0:
data = calc_inputs(new_prof, 'hail')
#print("C:", data)
#print("T:", [mumr, mucape, h5_temp, lr, shr, shr9, shr3, srh])
match += m
#print(f[0], match)
if out[-1] >= .5 and float(f[2]) >= 2:
hits += 1
elif out[-1] >= .5 and float(f[2]) < 2:
fa += 1
elif out[-1] < .5 and float(f[2]) < 2:
cn += 1
elif out[-1] < .5 and float(f[2]) >= 2:
miss += 1
print("--- SARS HAIL CALIBRATION ---")
print_stats(hits, cn, miss, fa, match)
return {'hits': hits, 'cn': cn, 'miss':miss, 'fa': fa, 'match':match}
#check_db = False
#check_supercell_cal(check_db)
#check_hail_cal(check_db)
|
virtual/lib/python3.8/site-packages/registration/views.py | erastus-1/GramApp | 925 | 11078572 | """
Views which allow users to create and activate accounts.
"""
from django.conf import settings
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.module_loading import import_string
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from registration.forms import ResendActivationForm
REGISTRATION_FORM_PATH = getattr(settings, 'REGISTRATION_FORM',
'registration.forms.RegistrationForm')
REGISTRATION_FORM = import_string(REGISTRATION_FORM_PATH)
ACCOUNT_AUTHENTICATED_REGISTRATION_REDIRECTS = getattr(
settings, 'ACCOUNT_AUTHENTICATED_REGISTRATION_REDIRECTS', True)
class RegistrationView(FormView):
"""
Base class for user registration views.
"""
disallowed_url = 'registration_disallowed'
form_class = REGISTRATION_FORM
http_method_names = ['get', 'post', 'head', 'options', 'trace']
success_url = None
template_name = 'registration/registration_form.html'
@method_decorator(sensitive_post_parameters('password1', 'password2'))
def dispatch(self, request, *args, **kwargs):
"""
Check that user signup is allowed and if user is logged in before even bothering to
dispatch or do other processing.
"""
if ACCOUNT_AUTHENTICATED_REGISTRATION_REDIRECTS:
if self.request.user.is_authenticated:
if settings.LOGIN_REDIRECT_URL is not None:
return redirect(settings.LOGIN_REDIRECT_URL)
else:
raise Exception((
'You must set a URL with LOGIN_REDIRECT_URL in '
'settings.py or set '
'ACCOUNT_AUTHENTICATED_REGISTRATION_REDIRECTS=False'))
if not self.registration_allowed():
return redirect(self.disallowed_url)
return super().dispatch(request, *args, **kwargs)
def form_valid(self, form):
new_user = self.register(form)
success_url = self.get_success_url(new_user)
# success_url may be a simple string, or a tuple providing the
# full argument set for redirect(). Attempting to unpack it
# tells us which one it is.
try:
to, args, kwargs = success_url
except ValueError:
return redirect(success_url)
else:
return redirect(to, *args, **kwargs)
def registration_allowed(self):
"""
Override this to enable/disable user registration, either
globally or on a per-request basis.
"""
return True
def register(self, form):
"""
Implement user-registration logic here.
"""
raise NotImplementedError
def get_success_url(self, user=None):
"""
Use the new user when constructing success_url.
"""
return super().get_success_url()
class ActivationView(TemplateView):
"""
Base class for user activation views.
"""
http_method_names = ['get']
template_name = 'registration/activate.html'
def get(self, request, *args, **kwargs):
activated_user = self.activate(*args, **kwargs)
if activated_user:
success_url = self.get_success_url(activated_user)
try:
to, args, kwargs = success_url
except ValueError:
return redirect(success_url)
else:
return redirect(to, *args, **kwargs)
return super().get(request, *args, **kwargs)
def activate(self, *args, **kwargs):
"""
Implement account-activation logic here.
"""
raise NotImplementedError
def get_success_url(self, user):
raise NotImplementedError
class ResendActivationView(FormView):
"""
Base class for resending activation views.
"""
form_class = ResendActivationForm
template_name = 'registration/resend_activation_form.html'
def form_valid(self, form):
"""
Regardless if resend_activation is successful, display the same
confirmation template.
"""
self.resend_activation(form)
return self.render_form_submitted_template(form)
def resend_activation(self, form):
"""
Implement resend activation key logic here.
"""
raise NotImplementedError
def render_form_submitted_template(self, form):
"""
Implement rendering of confirmation template here.
"""
raise NotImplementedError
class ApprovalView(TemplateView):
http_method_names = ['get']
template_name = 'registration/admin_approve.html'
def get(self, request, *args, **kwargs):
approved_user = self.approve(*args, **kwargs)
if approved_user:
success_url = self.get_success_url(approved_user)
try:
to, args, kwargs = success_url
except ValueError:
return redirect(success_url)
else:
return redirect(to, *args, **kwargs)
return super().get(request, *args, **kwargs)
def approve(self, *args, **kwargs):
"""
Implement admin-approval logic here.
"""
raise NotImplementedError
def get_success_url(self, user):
raise NotImplementedError
|
ykdl/extractors/bilibili/vc.py | Fearyncess/ykdl | 1,153 | 11078576 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from ykdl.util.html import get_content
from ykdl.util.match import match1
import json
class BiliVC(VideoExtractor):
name = u'哔哩哔哩 小视频 (Bili VC)'
def prepare(self):
info = VideoInfo(self.name)
self.vid = match1(self.url, 'video/(\d+)')
api_url = 'https://api.vc.bilibili.com/clip/v1/video/detail?video_id={}'.format(self.vid)
video_data = json.loads(get_content(api_url))
info.title = video_data['data']['item']['description']
info.artist = video_data['data']['user']['name']
info.stream_types.append('current')
info.streams['current'] = {'container': 'mp4', 'src' : [video_data['data']['item']['video_playurl']], 'size': int(video_data['data']['item']['video_size'])}
return info
site = BiliVC()
|
mpi4jax/_src/collective_ops/send.py | Thenerdstation/mpi4jax | 122 | 11078597 | import numpy as _np
from mpi4py import MPI as _MPI
from jax import core
from jax.core import Primitive
from jax.interpreters import xla
from jax.lax import create_token
from jax.lib import xla_client
from ..utils import (
HashableMPIType,
default_primitive_impl,
to_dtype_handle,
to_mpi_handle,
unpack_hashable,
wrap_as_hashable,
xla_constant_intc,
xla_constant_uintptr,
)
from ..decorators import translation_rule_cpu, translation_rule_gpu
from ..validation import enforce_types
from ..comm import get_default_comm
from ..jax_compat import Tracer, Token
# The Jax primitive
mpi_send_p = Primitive("send_mpi") # Create the primitive
mpi_send_impl = default_primitive_impl(mpi_send_p)
# This function applies the primitive to an AST
@enforce_types(
dest=_np.integer,
tag=_np.integer,
comm=(type(None), _MPI.Intracomm, HashableMPIType),
token=(type(None), Token, Tracer),
)
def send(x, dest, *, tag=0, comm=None, token=None):
"""Perform a send operation.
Arguments:
x: Array or scalar input to send.
dest (int): Rank of the destination MPI process.
tag (int): Tag of this message.
comm (mpi4py.MPI.Comm): The MPI communicator to use (defaults to
a clone of :obj:`COMM_WORLD`).
token (Token): XLA token to use to ensure correct execution order.
If not given, a new token is generated.
Returns:
Token: A new, modified token, that depends on this operation.
"""
if token is None:
token = create_token(x)
if comm is None:
comm = get_default_comm()
comm = wrap_as_hashable(comm)
return mpi_send_p.bind(x, token, dest=dest, tag=tag, comm=comm)
# This function compiles the operation
@translation_rule_cpu
def mpi_send_xla_encode_cpu(c, x, token, dest, tag, comm):
comm = unpack_hashable(comm)
x_shape = c.GetShape(x)
dtype = x_shape.element_type()
dims = x_shape.dimensions()
# compute total number of elements in array
nitems = _np.prod(dims, dtype=int)
dtype_handle = to_dtype_handle(dtype)
# ensure void** out type
sh = xla_client.Shape.tuple_shape([xla_client.Shape.token_shape()])
out = xla_client.ops.CustomCall(
c,
b"mpi_send",
operands=(
xla_constant_intc(c, nitems),
x,
xla_client.ops.Constant(c, _np.intc(dest)),
xla_constant_intc(c, tag),
xla_constant_uintptr(c, to_mpi_handle(comm)),
xla_constant_uintptr(c, dtype_handle),
token,
),
shape=sh,
has_side_effect=True,
)
return xla_client.ops.GetTupleElement(out, 0)
@translation_rule_gpu
def mpi_send_xla_encode_gpu(c, x, token, dest, tag, comm):
from ..xla_bridge.mpi_xla_bridge_gpu import build_send_descriptor
comm = unpack_hashable(comm)
x_shape = c.GetShape(x)
dtype = x_shape.element_type()
dims = x_shape.dimensions()
# compute total number of elements in array
nitems = _np.prod(dims, dtype=int)
dtype_handle = to_dtype_handle(dtype)
# ensure void** out type
sh = xla_client.Shape.tuple_shape([xla_client.Shape.token_shape()])
descriptor = build_send_descriptor(
nitems,
dest,
tag,
to_mpi_handle(comm),
dtype_handle,
)
out = xla_client.ops.CustomCall(
c,
b"mpi_send",
operands=(
x,
token,
),
shape=sh,
opaque=descriptor,
has_side_effect=True,
)
return xla_client.ops.GetTupleElement(out, 0)
# This function evaluates only the shapes during AST construction
def mpi_send_abstract_eval(xs, token, dest, tag, comm):
return core.abstract_token
mpi_send_p.def_impl(mpi_send_impl)
mpi_send_p.def_abstract_eval(mpi_send_abstract_eval)
# assign to the primitive the correct encoder
xla.backend_specific_translations["cpu"][mpi_send_p] = mpi_send_xla_encode_cpu
xla.backend_specific_translations["gpu"][mpi_send_p] = mpi_send_xla_encode_gpu
|
robot-server/tests/protocols/__init__.py | anuwrag/opentrons | 235 | 11078605 | <gh_stars>100-1000
"""Tests for the robot_server.protocols module."""
|
RecoMuon/MuonSeedGenerator/python/ptSeedParameterization_38T_cfi.py | ckamtsikis/cmssw | 852 | 11078625 | import FWCore.ParameterSet.Config as cms
ptSeedParameterization = cms.PSet(
SMB_21 = cms.vdouble(0.918425, -0.141199, 0.0, 0.254515, -0.111848, 0.0),
SMB_20 = cms.vdouble(0.861314, -0.16233, 0.0, 0.248879, -0.113879, 0.0),
SMB_22 = cms.vdouble(1.308565, -0.701634, 0.0, -0.302861, 0.675785, 0.0),
OL_2213 = cms.vdouble(0.563218, -0.493991, 0.0, 0.943776, -0.591751, 0.0),
# Sigle CSC Segments
# CSCDet_Id p1 p2 p3 ep1 ep2 ep3
#------------------------------------------------------------------------ #
SME_11 = cms.vdouble(2.39479, -0.888663, 0.0, -4.604546, 3.623464, 0.0),
SME_13 = cms.vdouble(0.398851, 0.028176, 0.0, 0.567015, 2.623232, 0.0),
SME_12 = cms.vdouble(-0.277294, 0.7616, 0.0, -0.243326, 1.446792, 0.0),
SME_32 = cms.vdouble(-0.021912, -0.008995, 0.0, -49.779764, 30.780972, 0.0),
SME_31 = cms.vdouble(-0.588188, 0.316961, 0.0, -95.261732, 45.444051, 0.0),
SME_42 = cms.vdouble(-0.021912, -0.008995, 0.0, -49.779764, 30.780972, 0.0),
# OL Parameters
# Det_Layers p1 p2 p3 ep1 ep2 ep3
#------------------------------------------------------------------------ #
OL_1213 = cms.vdouble(0.960544, -0.75644, 0.0, 0.1636, 0.114178, 0.0),
DT_13 = cms.vdouble(0.298842, 0.076531, -0.14293, 0.219923, -0.145026, 0.155638),
# DT Parameters
# Det_Stations p1 p2 p3 ep1 ep2 ep3
#------------------------------------------------------------------------ #
DT_12 = cms.vdouble(0.176182, 0.058535, -0.090549, 0.202363, -0.203126, 0.222219),
DT_14 = cms.vdouble(0.388423, 0.068698, -0.145925, 0.159515, 0.124299, -0.133269),
OL_1232 = cms.vdouble(0.162626, 0.000843, 0.0, 0.396271, 0.002791, 0.0),
CSC_23 = cms.vdouble(-0.095236, 0.122061, -0.029852, -11.396689, 15.933598, -4.267065),
CSC_24 = cms.vdouble(-0.049769, 0.063087, -0.011029, -13.765978, 16.296143, -4.241835),
CSC_03 = cms.vdouble(0.498992, -0.086235, -0.025772, 2.761006, -2.667607, 0.72802),
SMB_31 = cms.vdouble(0.398661, -0.024853, 0.0, 0.863324, -0.413048, 0.0),
# CSC Parameters
# Det_Stations p1 p2 p3 ep1 ep2 ep3
#------------------------------------------------------------------------ #
CSC_01 = cms.vdouble(0.155906, -0.000406, 0.0, 0.194022, -0.010181, 0.0),
SMB_32 = cms.vdouble(0.421649, -0.111654, 0.0, -0.044613, 1.134858, 0.0),
SMB_30 = cms.vdouble(0.399628, 0.014922, 0.0, 0.665622, 0.358439, 0.0),
OL_2222 = cms.vdouble(0.087587, 0.005729, 0.0, 0.535169, -0.087675, 0.0),
# Sigle DT Segments
# DTDet_Id p1 p2 p3 ep1 ep2 ep3
#------------------------------------------------------------------------ #
SMB_10 = cms.vdouble(1.160532, 0.148991, 0.0, 0.182785, -0.093776, 0.0),
SMB_11 = cms.vdouble(1.289468, -0.139653, 0.0, 0.137191, 0.01217, 0.0),
SMB_12 = cms.vdouble(1.923091, -0.913204, 0.0, 0.161556, 0.020215, 0.0),
DT_23 = cms.vdouble(0.120647, 0.034743, -0.070855, 0.302427, -0.21417, 0.261012),
DT_24 = cms.vdouble(0.189527, 0.037328, -0.088523, 0.251936, 0.032411, 0.010984),
SME_21 = cms.vdouble(0.64895, -0.148762, 0.0, -5.07676, 6.284227, 0.0),
SME_22 = cms.vdouble(-0.624708, 0.641043, 0.0, 32.581295, -19.604264, 0.0),
CSC_34 = cms.vdouble(0.144321, -0.142283, 0.035636, 190.260708, -180.888643, 43.430395),
CSC_02 = cms.vdouble(0.600235, -0.205683, 0.001113, 0.655625, -0.682129, 0.253916),
SME_41 = cms.vdouble(-0.187116, 0.076415, 0.0, -58.552583, 27.933864, 0.0),
DT_34 = cms.vdouble(0.049146, -0.003494, -0.010099, 0.672095, 0.36459, -0.304346),
CSC_14 = cms.vdouble(0.952517, -0.532733, 0.084601, 1.615881, -1.630744, 0.514139),
OL_1222 = cms.vdouble(0.215915, 0.002556, 0.0, 0.313596, -0.021465, 0.0),
CSC_13 = cms.vdouble(1.22495, -1.792358, 0.711378, 5.271848, -6.280625, 2.0142),
CSC_12 = cms.vdouble(-0.363549, 0.569552, -0.173186, 7.777069, -10.203618, 3.478874)
)
|
test/slicing/test_convergence.py | HazyResearch/snorkel | 2,906 | 11078631 | import random
import unittest
from typing import List
import numpy as np
import pandas as pd
import pytest
import torch
import torch.nn as nn
from snorkel.analysis import Scorer
from snorkel.classification import (
DictDataLoader,
DictDataset,
MultitaskClassifier,
Operation,
Task,
Trainer,
)
from snorkel.slicing import (
PandasSFApplier,
add_slice_labels,
convert_to_slice_tasks,
slicing_function,
)
from snorkel.types import DataPoint
# Define SFs specifying points inside a circle
@slicing_function()
def f(x: DataPoint) -> int:
# targets ~7% of the data
radius = 0.3
h, k = (-0.15, -0.3) # center
return np.sqrt((x.x1 - h) ** 2 + (x.x2 - k) ** 2) < radius
@slicing_function()
def g(x: DataPoint) -> int:
# targets ~6% of the data
radius = 0.3
h, k = (0.25, 0.0) # center
return np.sqrt((x.x1 - h) ** 2 + (x.x2 - k) ** 2) < radius
@slicing_function()
def h(x: DataPoint) -> int:
# targets ~27% of the data
radius = 0.6
h, k = (0.25, 0.0) # center
return np.sqrt((x.x1 - h) ** 2 + (x.x2 - k) ** 2) < radius
class SlicingConvergenceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Ensure deterministic runs
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
# Create raw data
cls.N_TRAIN = 1500
cls.N_VALID = 300
cls.df_train = create_data(cls.N_TRAIN)
cls.df_valid = create_data(cls.N_VALID)
@pytest.mark.complex
def test_convergence(self):
"""Test slicing convergence with 1 slice task that represents ~25% of
the data."""
dataloaders = []
for df, split in [(self.df_train, "train"), (self.df_valid, "valid")]:
dataloader = create_dataloader(df, split)
dataloaders.append(dataloader)
base_task = create_task("task", module_suffixes=["A", "B"])
# Apply SFs
slicing_functions = [h] # high coverage slice
slice_names = [sf.name for sf in slicing_functions]
applier = PandasSFApplier(slicing_functions)
S_train = applier.apply(self.df_train, progress_bar=False)
S_valid = applier.apply(self.df_valid, progress_bar=False)
self.assertEqual(S_train.shape, (self.N_TRAIN,))
self.assertEqual(S_valid.shape, (self.N_VALID,))
self.assertIn("h", S_train.dtype.names)
# Add slice labels
add_slice_labels(dataloaders[0], base_task, S_train)
add_slice_labels(dataloaders[1], base_task, S_valid)
# Convert to slice tasks
tasks = convert_to_slice_tasks(base_task, slice_names)
model = MultitaskClassifier(tasks=tasks)
# Train
trainer = Trainer(lr=0.001, n_epochs=50, progress_bar=False)
trainer.fit(model, dataloaders)
scores = model.score(dataloaders)
# Confirm near perfect scores
self.assertGreater(scores["task/TestData/valid/accuracy"], 0.94)
self.assertGreater(scores["task_slice:h_pred/TestData/valid/accuracy"], 0.94)
self.assertGreater(scores["task_slice:h_ind/TestData/valid/f1"], 0.94)
# Calculate/check train/val loss
train_dataset = dataloaders[0].dataset
train_loss_output = model.calculate_loss(
train_dataset.X_dict, train_dataset.Y_dict
)
train_loss = train_loss_output[0]["task"].item()
self.assertLess(train_loss, 0.1)
val_dataset = dataloaders[1].dataset
val_loss_output = model.calculate_loss(val_dataset.X_dict, val_dataset.Y_dict)
val_loss = val_loss_output[0]["task"].item()
self.assertLess(val_loss, 0.1)
@pytest.mark.complex
def test_performance(self):
"""Test slicing performance with 2 corresponding slice tasks that
represent roughly <10% of the data."""
dataloaders = []
for df, split in [(self.df_train, "train"), (self.df_valid, "valid")]:
dataloader = create_dataloader(df, split)
dataloaders.append(dataloader)
base_task = create_task("task", module_suffixes=["A", "B"])
# Apply SFs
slicing_functions = [f, g] # low-coverage slices
slice_names = [sf.name for sf in slicing_functions]
applier = PandasSFApplier(slicing_functions)
S_train = applier.apply(self.df_train, progress_bar=False)
S_valid = applier.apply(self.df_valid, progress_bar=False)
# Add slice labels
add_slice_labels(dataloaders[0], base_task, S_train)
add_slice_labels(dataloaders[1], base_task, S_valid)
# Convert to slice tasks
tasks = convert_to_slice_tasks(base_task, slice_names)
model = MultitaskClassifier(tasks=tasks)
# Train
# NOTE: Needs more epochs to convergence with more heads
trainer = Trainer(lr=0.001, n_epochs=65, progress_bar=False)
trainer.fit(model, dataloaders)
scores = model.score(dataloaders)
# Confirm reasonably high slice scores
# Check train scores
self.assertGreater(scores["task/TestData/train/f1"], 0.9)
self.assertGreater(scores["task_slice:f_pred/TestData/train/f1"], 0.9)
self.assertGreater(scores["task_slice:f_ind/TestData/train/f1"], 0.9)
self.assertGreater(scores["task_slice:g_pred/TestData/train/f1"], 0.9)
self.assertGreater(scores["task_slice:g_ind/TestData/train/f1"], 0.9)
self.assertGreater(scores["task_slice:base_pred/TestData/train/f1"], 0.9)
self.assertEqual(scores["task_slice:base_ind/TestData/train/f1"], 1.0)
# Check valid scores
self.assertGreater(scores["task/TestData/valid/f1"], 0.9)
self.assertGreater(scores["task_slice:f_pred/TestData/valid/f1"], 0.9)
self.assertGreater(scores["task_slice:f_ind/TestData/valid/f1"], 0.9)
self.assertGreater(scores["task_slice:g_pred/TestData/valid/f1"], 0.9)
self.assertGreater(scores["task_slice:g_ind/TestData/valid/f1"], 0.9)
self.assertGreater(scores["task_slice:base_pred/TestData/valid/f1"], 0.9)
# base_ind is trivial: all labels are positive
self.assertEqual(scores["task_slice:base_ind/TestData/valid/f1"], 1.0)
def create_data(n: int) -> pd.DataFrame:
X = (np.random.random((n, 2)) * 2 - 1).astype(np.float32)
Y = (X[:, 0] < X[:, 1] + 0.25).astype(int)
df = pd.DataFrame({"x1": X[:, 0], "x2": X[:, 1], "y": Y})
return df
def create_dataloader(df: pd.DataFrame, split: str) -> DictDataLoader:
dataset = DictDataset(
name="TestData",
split=split,
X_dict={
"coordinates": torch.stack(
(torch.tensor(df["x1"]), torch.tensor(df["x2"])), dim=1
)
},
Y_dict={"task": torch.tensor(df["y"], dtype=torch.long)},
)
dataloader = DictDataLoader(
dataset=dataset, batch_size=4, shuffle=(dataset.split == "train")
)
return dataloader
def create_task(task_name: str, module_suffixes: List[str]) -> Task:
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
module_pool = nn.ModuleDict(
{
module1_name: nn.Sequential(nn.Linear(2, 20), nn.ReLU()),
module2_name: nn.Linear(20, 2),
}
)
op1 = Operation(module_name=module1_name, inputs=[("_input_", "coordinates")])
op2 = Operation(module_name=module2_name, inputs=[op1.name])
op_sequence = [op1, op2]
task = Task(
name=task_name,
module_pool=module_pool,
op_sequence=op_sequence,
scorer=Scorer(metrics=["f1", "accuracy"]),
)
return task
if __name__ == "__main__":
unittest.main()
|
src/ros_callbacks.py | UnofficialJuliaMirrorSnapshots/RobotOS.jl-22415677-39a4-5241-a37a-00beabbbdae8 | 147 | 11078636 | <reponame>UnofficialJuliaMirrorSnapshots/RobotOS.jl-22415677-39a4-5241-a37a-00beabbbdae8<filename>src/ros_callbacks.py
#Python 2/3 compatibility with 3 style code
from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type
import sys
import ctypes
import threading
try:
import queue
except ImportError:
import Queue as queue
class MessageQueue:
"Queue up received messages and invoke notification to run callback"
def __init__(self, cbptr, notify_handle):
CBType = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)
self._cb_notify = CBType(cbptr.value)
self._notify_handle = notify_handle
self._queue = queue.Queue()
def storemsg(self, msg):
self._queue.put(msg)
self._cb_notify(self._notify_handle)
def size(self):
return self._queue.qsize()
def get(self):
return self._queue.get()
class ServiceCallback:
def __init__(self, cbptr, notify_handle):
CBType = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)
self._cb_notify = CBType(cbptr.value)
self._notify_handle = notify_handle
self._response = None
self._hasresponse = threading.Condition()
def srv_cb(self, srvreq):
"Store received service request and block until Julia callback completes"
self._hasresponse.acquire()
self._request = srvreq
self._cb_notify(self._notify_handle)
#wait for the julia callback
self._hasresponse.wait()
self._hasresponse.release()
return self._response
def get_request(self):
return self._request
def set_response(self, resp):
self._response = resp
self._hasresponse.acquire()
self._hasresponse.notify()
self._hasresponse.release()
|
tests/modifier_test.py | khoih-prog/TinyNeuralNetwork | 241 | 11078650 | import random
import unittest
from distutils.version import LooseVersion
from operator import add, mul, sub, truediv
from unittest.case import SkipTest
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from interval import Interval
from tinynn.graph.modifier import l2_norm
from tinynn.prune.oneshot_pruner import OneShotChannelPruner
def removed_idx_group_check(removed_idx, total_idx_len, removed_idx_len, group, offset=0):
for i in range(group):
remove_group_len = removed_idx_len // group
for j in range(i * remove_group_len, i * remove_group_len + remove_group_len):
idx_group_len = total_idx_len // group
assert removed_idx[j] in Interval(
offset + i * idx_group_len, offset + i * idx_group_len + idx_group_len, upper_closed=False
)
def get_rd_lst(length):
rd_lst = random.sample(range(0, 1000), length)
random.shuffle(rd_lst)
print(rd_lst)
return rd_lst
def get_topk(lst, k, offset=0):
_, idx = torch.topk(torch.tensor(lst), k, largest=False)
return sorted([i + offset for i in idx.tolist()])
def init_conv_by_list(conv, ch_value):
assert conv.weight.shape[0] == len(ch_value)
for i in range(len(ch_value)):
conv.weight.data[i, :] = ch_value[i]
class ModifierTester(unittest.TestCase):
def test_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Conv2d(3, 8, (3, 3))
self.conv1 = nn.Conv2d(3, 8, (3, 3))
self.conv2 = nn.Conv2d(16, 32, (3, 3))
self.linear = nn.Linear(800, 100)
def forward(self, x):
x0 = self.conv0(x)
x1 = self.conv1(x)
cat0 = torch.cat([x0, x1], dim=1)
conv2 = self.conv2(cat0)
view0 = conv2.view((1, -1))
linear0 = self.linear(view0)
return linear0
def test_func():
model = TestModel()
rd_lst_8 = get_rd_lst(8)
rd_lst_32 = get_rd_lst(32)
init_conv_by_list(model.conv0, rd_lst_8)
init_conv_by_list(model.conv1, rd_lst_8)
init_conv_by_list(model.conv2, rd_lst_32)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0).tolist()
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
importance_conv2 = l2_norm(model.conv2.weight, model.conv2).tolist()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
conv0_idxes = get_topk(importance_conv0, 4)
conv1_idxes = get_topk(importance_conv1, 4)
conv2_idxes = get_topk(importance_conv2, 16)
assert model.conv0.masker.ot_remove_idx == conv0_idxes
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.conv2.masker.in_remove_idx == conv0_idxes + [i + 8 for i in conv0_idxes]
assert model.conv2.masker.ot_remove_idx == conv2_idxes
pruner.apply_mask()
assert model.conv1.out_channels == 4
assert model.conv0.out_channels == 4
assert model.conv2.in_channels == 8
assert model.conv2.out_channels == 16
assert model.linear.in_features == 400
assert model.linear.out_features == 100
model(torch.ones(1, 3, 9, 9))
for i in range(100):
test_func()
def test_cat_add_graph(self):
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Conv2d(3, 8, (3, 3), padding=(1, 1))
self.conv1 = nn.Conv2d(3, 8, (3, 3), padding=(1, 1))
self.conv2 = nn.Conv2d(3, 16, (3, 3), padding=(1, 1))
self.conv3 = nn.Conv2d(16, 32, (3, 3), padding=(1, 1))
def forward(self, x):
x0 = self.conv0(x)
x1 = self.conv1(x)
x2 = self.conv2(x)
cat0 = torch.cat([x0, x1], dim=1)
add0 = torch.add(cat0, x2)
return self.conv3(add0)
def test_func():
model = TestModel()
while True:
ch_8 = get_rd_lst(8)
ch_16 = get_rd_lst(16)
ch_32 = get_rd_lst(32)
init_conv_by_list(model.conv0, ch_8)
init_conv_by_list(model.conv1, ch_8)
init_conv_by_list(model.conv2, ch_16)
init_conv_by_list(model.conv3, ch_32)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0).tolist()
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
importance_conv2 = l2_norm(model.conv2.weight, model.conv2).tolist()
importance_conv3 = l2_norm(model.conv3.weight, model.conv3).tolist()
importance_add0 = list(map(add, importance_conv0 + importance_conv1, importance_conv2))
# Duplicate values may lead to multiple possibilities for remove idx
if len(set(importance_add0)) == len(importance_add0):
break
conv0_idxes = get_topk(importance_add0[:8], 4)
conv1_idxes = get_topk(importance_add0[8:], 4)
conv3_idxes = get_topk(importance_conv3, 16)
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv0.masker.ot_remove_idx == conv0_idxes
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.conv2.masker.ot_remove_idx == conv0_idxes + [8 + i for i in conv1_idxes]
assert model.conv3.masker.ot_remove_idx == conv3_idxes
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 4
assert model.conv1.out_channels == 4
assert model.conv2.out_channels == 8
assert model.conv3.in_channels == 8
assert model.conv3.out_channels == 16
for i in range(100):
test_func()
def test_flatten_graph(self):
class TestFlattenModel(nn.Module):
def __init__(self):
super(TestFlattenModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3))
self.dropout = nn.Dropout()
self.linear1 = nn.Linear(800, 100)
self.linear2 = nn.Linear(100, 10)
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
flatten0 = torch.flatten(conv1, 1)
dropout0 = self.dropout(flatten0)
linear1 = self.linear1(dropout0)
linear2 = self.linear2(linear1)
return linear2
def test_func():
model = TestFlattenModel()
ch_32 = get_rd_lst(32)
init_conv_by_list(model.conv1, ch_32)
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
conv1_idxes = get_topk(importance_conv1, 24)
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.75, "metrics": "l2_norm"})
pruner.register_mask()
linear1_idxes = np.array([i for i in range(800)])
linear1_idxes = linear1_idxes.reshape([32, 25])
linear1_idxes = linear1_idxes[conv1_idxes, :]
linear1_idxes = linear1_idxes.reshape([600]).tolist()
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.linear1.masker.in_remove_idx == linear1_idxes
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 4
assert model.conv1.out_channels == 8
assert model.linear1.in_features == 200
assert model.linear1.out_features == 25
assert model.linear2.in_features == 25
assert model.linear2.out_features == 10
for i in range(10):
test_func()
def test_loop_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 16, (3, 3))
self.conv2 = nn.Conv2d(64, 128, (3, 3))
self.relu1 = torch.nn.modules.activation.ReLU(inplace=True)
self.relu2 = torch.nn.modules.activation.ReLU(inplace=True)
self.relu3 = torch.nn.modules.activation.ReLU(inplace=True)
self.relu4 = torch.nn.modules.activation.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
relu1 = self.relu1(conv1)
relu2 = self.relu2(conv1)
relu3 = self.relu3(conv1)
relu4 = self.relu4(conv1)
z = torch.cat([relu1, relu2, relu3, relu4], dim=1)
return self.conv2(z)
def test_func():
model = TestModel()
ch_16 = get_rd_lst(16)
init_conv_by_list(model.conv1, ch_16)
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
conv1_idxes = get_topk(importance_conv1, 8)
pruner = OneShotChannelPruner(model, torch.randn((1, 3, 9, 9)), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.conv2.masker.in_remove_idx == [j + i * 16 for i in range(4) for j in conv1_idxes]
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv1.out_channels == 8
assert model.conv2.in_channels == 32
assert model.conv2.out_channels == 64
for i in range(10):
test_func()
def test_group_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 16, (3, 3))
self.conv2 = nn.Conv2d(3, 32, (3, 3))
self.conv3 = nn.Conv2d(48, 64, (3, 3), groups=4)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
cat0 = torch.cat([conv1, conv2], dim=1)
return self.conv3(cat0)
def test_func():
model = TestModel()
ch_16 = get_rd_lst(16)
ch_32 = get_rd_lst(32)
init_conv_by_list(model.conv1, ch_16)
init_conv_by_list(model.conv2, ch_32)
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
importance_conv2 = l2_norm(model.conv2.weight, model.conv2).tolist()
conv1_idxes_g1 = get_topk(importance_conv1[:12], 6)
conv1_idxes_g2 = get_topk(importance_conv1[12:], 2, offset=12)
conv1_idxes = conv1_idxes_g1 + conv1_idxes_g2
conv2_idxes_g1 = get_topk(importance_conv2[:8], 4)
conv2_idxes_g2 = get_topk(importance_conv2[8:20], 6, offset=8)
conv2_idxes_g3 = get_topk(importance_conv2[20:], 6, offset=20)
conv2_idxes = conv2_idxes_g1 + conv2_idxes_g2 + conv2_idxes_g3
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.conv2.masker.ot_remove_idx == conv2_idxes
assert model.conv3.masker.in_remove_idx == conv1_idxes + [i + 16 for i in conv2_idxes]
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv1.out_channels == 8
assert model.conv2.out_channels == 16
assert model.conv3.in_channels == 24
assert model.conv3.out_channels == 32
for i in range(10):
test_func()
def test_nonaligned_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 8, (3, 3))
self.conv2 = nn.Conv2d(3, 4, (3, 3))
self.conv3 = nn.Conv2d(3, 4, (3, 3))
self.conv4 = nn.Conv2d(16, 64, (3, 3))
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
cat0 = torch.cat([conv1, conv2, conv3], dim=1)
return self.conv4(cat0)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.25, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv1.out_channels == 6
assert model.conv2.out_channels == 3
assert model.conv3.out_channels == 3
assert model.conv4.in_channels == 12
assert model.conv4.out_channels == 48
def test_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3), groups=8)
self.conv2 = nn.Conv2d(32, 32, (3, 3))
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
return self.conv2(conv1)
def test_func():
model = TestModel()
ch_16 = get_rd_lst(16)
ch_32 = get_rd_lst(32)
init_conv_by_list(model.conv0, ch_16)
init_conv_by_list(model.conv1, ch_32)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0).tolist()
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
conv0_idxes = []
conv1_idxes = []
for i in range(8):
conv0_idxes += get_topk(importance_conv0[i * 2 : (i + 1) * 2], 1, offset=i * 2)
conv1_idxes += get_topk(importance_conv1[i * 4 : (i + 1) * 4], 2, offset=i * 4)
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 32, 16, 8)
assert model.conv0.masker.ot_remove_idx == conv0_idxes
assert model.conv1.masker.ot_remove_idx == conv1_idxes
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 8
assert model.conv1.in_channels == 8
assert model.conv1.out_channels == 16
assert model.conv2.out_channels == 16
for i in range(10):
test_func()
def test_multi_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3), groups=4)
self.conv2 = nn.Conv2d(16, 32, (3, 3), groups=8)
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv2 = self.conv2(conv0)
return conv1, conv2
def test_func():
model = TestModel()
ch_16 = get_rd_lst(16)
ch_32 = get_rd_lst(32)
init_conv_by_list(model.conv0, ch_16)
init_conv_by_list(model.conv1, ch_32)
init_conv_by_list(model.conv2, ch_32)
conv0_idxes = []
conv1_idxes = []
conv2_idxes = []
importance_conv0 = l2_norm(model.conv0.weight, model.conv0).tolist()
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
importance_conv2 = l2_norm(model.conv2.weight, model.conv2).tolist()
for i in range(4):
conv1_idxes += get_topk(importance_conv1[i * 8 : (i + 1) * 8], 4, offset=i * 8)
for i in range(8):
conv0_idxes += get_topk(importance_conv0[i * 2 : (i + 1) * 2], 1, offset=i * 2)
conv2_idxes += get_topk(importance_conv2[i * 4 : (i + 1) * 4], 2, offset=i * 4)
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 32, 16, 4)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 32, 16, 8)
assert model.conv0.masker.ot_remove_idx == conv0_idxes
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.conv2.masker.ot_remove_idx == conv2_idxes
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 8
assert model.conv1.in_channels == 8
assert model.conv1.out_channels == 16
assert model.conv2.out_channels == 16
for i in range(10):
test_func()
def test_add_cat_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3), groups=2)
self.conv2 = nn.Conv2d(16, 32, (3, 3), groups=4)
self.conv3 = nn.Conv2d(3, 16, (3, 3))
self.conv4 = nn.Conv2d(16, 32, (3, 3), groups=8)
self.conv5 = nn.Conv2d(64, 64, (3, 3))
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv2 = self.conv2(conv0)
add1 = conv1.__add__(conv2)
conv3 = self.conv3(x)
conv4 = self.conv4(conv3)
cat0 = torch.cat([add1, conv4], dim=1)
return self.conv5(cat0)
def test_func():
while True:
model = TestModel()
ch_conv0 = get_rd_lst(16)
ch_conv1 = get_rd_lst(32)
ch_conv2 = get_rd_lst(32)
ch_conv4 = get_rd_lst(32)
init_conv_by_list(model.conv0, ch_conv0)
init_conv_by_list(model.conv1, ch_conv1)
init_conv_by_list(model.conv2, ch_conv2)
init_conv_by_list(model.conv4, ch_conv4)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0).tolist()
importance_conv1 = l2_norm(model.conv1.weight, model.conv1).tolist()
importance_conv2 = l2_norm(model.conv2.weight, model.conv2).tolist()
importance_conv4 = l2_norm(model.conv4.weight, model.conv4).tolist()
importance_conv12 = list(map(add, importance_conv1, importance_conv2))
if len(importance_conv12) == len(set(importance_conv12)):
break
conv0_idxes = []
conv4_idxes = []
conv12_idxes = []
for i in range(4):
conv0_idxes += get_topk(importance_conv0[i * 4 : (i + 1) * 4], 2, i * 4)
conv12_idxes += get_topk(importance_conv12[i * 8 : (i + 1) * 8], 4, i * 8)
for i in range(8):
conv4_idxes += get_topk(importance_conv4[i * 4 : (i + 1) * 4], 2, i * 4)
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv0.masker.ot_remove_idx == conv0_idxes
assert model.conv1.masker.ot_remove_idx == conv12_idxes
assert model.conv2.masker.ot_remove_idx == conv12_idxes
assert model.conv5.masker.in_remove_idx == conv12_idxes + [i + 32 for i in conv4_idxes]
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 32, 16, 4)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 32, 16, 4)
removed_idx_group_check(model.conv4.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv4.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv5.masker.in_remove_idx[:16], 32, 16, 4)
removed_idx_group_check(model.conv5.masker.in_remove_idx[16:], 32, 16, 8, offset=32)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
for i in range(50):
test_func()
def test_multi_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 8, (3, 3))
self.conv1 = nn.Conv2d(8, 16, (3, 3))
self.conv2 = nn.Conv2d(8, 16, (3, 3), groups=4)
self.conv3 = nn.Conv2d(8, 16, (3, 3))
self.conv4 = nn.Conv2d(32, 64, (3, 3))
self.conv5 = nn.Conv2d(32, 64, (3, 3))
def forward(self, x):
conv0 = self.conv0(x)
relu0 = F.relu(conv0)
x1 = self.conv1(relu0)
x2 = self.conv2(relu0)
x3 = self.conv3(relu0)
cat0 = torch.cat([x1, x2], dim=1)
cat1 = torch.cat([x2, x3], dim=1)
cat0 = self.conv4(cat0)
cat1 = self.conv5(cat1)
return cat0, cat1
def test_func():
model = TestModel()
conv0_ch = get_rd_lst(8)
conv1_ch = get_rd_lst(16)
conv2_ch = get_rd_lst(16)
conv3_ch = get_rd_lst(16)
init_conv_by_list(model.conv0, conv0_ch)
init_conv_by_list(model.conv1, conv1_ch)
init_conv_by_list(model.conv2, conv2_ch)
init_conv_by_list(model.conv3, conv3_ch)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0)
importance_conv1 = l2_norm(model.conv1.weight, model.conv1)
importance_conv2 = l2_norm(model.conv2.weight, model.conv2)
importance_conv3 = l2_norm(model.conv3.weight, model.conv3)
conv0_idxes = []
conv2_idxes = []
conv1_idxes = get_topk(importance_conv1, 8)
conv3_idxes = get_topk(importance_conv3, 8)
for i in range(4):
conv0_idxes += get_topk(importance_conv0[i * 2 : (i + 1) * 2], 1, offset=i * 2)
conv2_idxes += get_topk(importance_conv2[i * 4 : (i + 1) * 4], 2, offset=i * 4)
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv0.masker.ot_remove_idx == conv0_idxes
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.conv2.masker.ot_remove_idx == conv2_idxes
assert model.conv3.masker.ot_remove_idx == conv3_idxes
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv3.masker.in_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv4.masker.in_remove_idx[8:], 16, 8, 4, offset=16)
removed_idx_group_check(model.conv5.masker.in_remove_idx[:8], 16, 8, 4)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
for i in range(50):
test_func()
def test_split_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 32, (3, 3))
self.conv2 = nn.Conv2d(3, 16, (3, 3))
self.conv3 = nn.Conv2d(3, 16, (3, 3))
self.conv4 = nn.Conv2d(16, 32, (3, 3), groups=2)
self.conv5 = nn.Conv2d(16, 32, (3, 3), groups=4)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
size = conv1.shape[1] // 2
sp1, sp2 = torch.split(conv1, size, 1)
add0 = conv2 + sp1
add1 = sp2 + conv3
return self.conv4(add0), self.conv5(add1)
def test_func():
model = TestModel()
ch_conv1 = get_rd_lst(32)
ch_conv2 = get_rd_lst(16)
ch_conv3 = get_rd_lst(16)
init_conv_by_list(model.conv1, ch_conv1)
init_conv_by_list(model.conv2, ch_conv2)
init_conv_by_list(model.conv3, ch_conv3)
importance_conv1 = l2_norm(model.conv1.weight, model.conv1)
importance_conv2 = l2_norm(model.conv2.weight, model.conv2)
importance_conv3 = l2_norm(model.conv3.weight, model.conv3)
importance_conv12 = list(map(add, importance_conv1[:16], importance_conv2))
importance_conv13 = list(map(add, importance_conv1[16:], importance_conv3))
conv2_idxes = []
conv3_idxes = []
for i in range(2):
conv2_idxes += get_topk(importance_conv12[i * 8 : (i + 1) * 8], 4, offset=i * 8)
for i in range(4):
conv3_idxes += get_topk(importance_conv13[i * 4 : (i + 1) * 4], 2, offset=i * 4)
conv1_idxes = conv2_idxes + [i + 16 for i in conv3_idxes]
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv1.masker.ot_remove_idx == conv1_idxes
assert model.conv2.masker.ot_remove_idx == conv2_idxes
assert model.conv3.masker.ot_remove_idx == conv3_idxes
assert model.conv4.masker.in_remove_idx == conv2_idxes
assert model.conv5.masker.in_remove_idx == conv3_idxes
removed_idx_group_check(model.conv1.masker.ot_remove_idx[:8], 16, 8, 2)
removed_idx_group_check(model.conv1.masker.ot_remove_idx[8:], 16, 8, 4, offset=16)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 2)
removed_idx_group_check(model.conv3.masker.ot_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv4.masker.in_remove_idx, 16, 8, 2)
removed_idx_group_check(model.conv4.masker.ot_remove_idx, 32, 16, 2)
removed_idx_group_check(model.conv5.masker.in_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv5.masker.ot_remove_idx, 32, 16, 4)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
for i in range(50):
test_func()
def group_element_wise_graph(self, op):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv1 = nn.Conv2d(16, 16, (1, 1))
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=4)
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(x)
add0 = op(conv0, conv1)
conv2 = self.conv2(add0)
return conv2
def test_func():
model = TestModel()
ch_conv0 = get_rd_lst(16)
ch_conv1 = get_rd_lst(16)
ch_conv2 = get_rd_lst(16)
init_conv_by_list(model.conv0, ch_conv0)
init_conv_by_list(model.conv1, ch_conv1)
init_conv_by_list(model.conv2, ch_conv2)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0)
importance_conv1 = l2_norm(model.conv1.weight, model.conv1)
importance_conv2 = l2_norm(model.conv2.weight, model.conv2)
importance_conv01 = list(map(add, importance_conv0, importance_conv1))
idxes_conv0 = []
idxes_conv2 = []
for i in range(8):
idxes_conv0 += get_topk(importance_conv01[i * 2 : (i + 1) * 2], 1, offset=i * 2)
for i in range(4):
idxes_conv2 += get_topk(importance_conv2[i * 4 : (i + 1) * 4], 2, offset=i * 4)
pruner = OneShotChannelPruner(model, torch.ones(16, 16, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv0.masker.ot_remove_idx == idxes_conv0
assert model.conv1.masker.ot_remove_idx == idxes_conv0
assert model.conv2.masker.ot_remove_idx == idxes_conv2
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 4)
pruner.apply_mask()
model(torch.ones(16, 16, 9, 9))
for i in range(50):
test_func()
def test_group_element_wise_graph(self):
self.group_element_wise_graph(add)
self.group_element_wise_graph(mul)
self.group_element_wise_graph(sub)
self.group_element_wise_graph(truediv)
def group_element_wise_split_graph(self, op):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 32, (1, 1))
self.conv1 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=2)
def forward(self, x):
conv0 = self.conv0(x)
sp0, sp1 = torch.split(conv0, conv0.shape[1] // 2, 1)
conv1 = self.conv1(sp0)
add0 = op(conv1, sp1)
conv2 = self.conv2(add0)
return conv2
def test_func():
model = TestModel()
ch_conv0 = get_rd_lst(32)
ch_conv1 = get_rd_lst(16)
init_conv_by_list(model.conv0, ch_conv0)
init_conv_by_list(model.conv1, ch_conv1)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0)
importance_conv1 = l2_norm(model.conv1.weight, model.conv1)
importance_conv01 = list(map(add, importance_conv0[16:], importance_conv1))
idxes_conv0 = []
idxes_conv1 = []
for i in range(8):
idxes_conv0 += get_topk(importance_conv0[i * 2 : (i + 1) * 2], 1, offset=i * 2)
idxes_conv1 += get_topk(importance_conv01[i * 2 : (i + 1) * 2], 1, offset=i * 2)
idxes_conv0 += [i + 16 for i in idxes_conv1]
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv0.masker.ot_remove_idx == idxes_conv0
assert model.conv1.masker.ot_remove_idx == idxes_conv1
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 2)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
for i in range(50):
test_func()
def test_group_element_wise_split_graph(self):
self.group_element_wise_split_graph(add)
self.group_element_wise_split_graph(sub)
self.group_element_wise_split_graph(mul)
self.group_element_wise_split_graph(truediv)
def test_res_2_net_block(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 64, (1, 1))
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv3 = nn.Conv2d(16, 16, (1, 1), groups=2)
self.conv4 = nn.Conv2d(16, 16, (1, 1))
self.conv5 = nn.Conv2d(64, 64, (1, 1))
def forward(self, x):
conv1 = self.conv1(x)
size0 = conv1.shape[1] // 4
split0 = torch.split(conv1, size0, 1)
conv2 = self.conv2(split0[0])
add0 = conv2 + split0[1]
conv3 = self.conv3(add0)
add3 = conv3 + split0[2]
conv4 = self.conv4(add3)
cat0 = torch.cat([conv2, conv3, conv4, split0[3]], 1)
return self.conv5(cat0)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv1.masker.ot_remove_idx[:16], 32, 16, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx[16:24], 16, 8, 2, offset=32)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_conv1d_block(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv1d(3, 16, (3,))
self.conv2 = nn.Conv1d(16, 32, (3,))
def forward(self, x):
conv1 = self.conv1(x)
return self.conv2(conv1)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9), {"sparsity": 0.25, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9))
assert model.conv1.out_channels == 12
assert model.conv2.out_channels == 24
def test_loop_conv_block(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3))
def forward(self, x):
conv0 = self.conv0(x)
return self.conv1(conv0 + conv0 + conv0)
def test_func():
model = TestModel()
ch_conv0 = get_rd_lst(16)
init_conv_by_list(model.conv0, ch_conv0)
importance_conv0 = l2_norm(model.conv0.weight, model.conv0)
idxes_conv0 = get_topk(importance_conv0, 8)
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
assert model.conv0.masker.ot_remove_idx == idxes_conv0
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
for i in range(10):
test_func()
def test_multi_dim_fc(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc0 = nn.Linear(8, 32)
self.fc1 = nn.Linear(32, 32)
def forward(self, x):
fc0 = self.fc0(x)
fc1 = self.fc1(fc0)
return fc1
model = TestModel()
pruner = OneShotChannelPruner(model, torch.rand((16, 16, 8)), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.prune()
model(torch.rand((16, 8)))
assert model.fc0.out_features == 16
assert model.fc1.in_features == 16
assert model.fc1.out_features == 32
def test_rnn(self):
rnn_in_size = 28
rnn_hidden_size = 128
fc_out_channel = 10
class TestModel(nn.Module):
def __init__(self, *args, **kwargs):
super(TestModel, self).__init__()
assert 'cell_type' in kwargs
cell_type = kwargs.pop('cell_type')
assert cell_type in (nn.RNN, nn.GRU, nn.LSTM)
bidirectional = kwargs.get('bidirectional', False)
num_directions = 2 if bidirectional else 1
fc_in_channel = rnn_hidden_size * num_directions
if 'proj_size' in kwargs:
fc_in_channel = kwargs['proj_size'] * num_directions
self.rnn = cell_type(rnn_in_size, rnn_hidden_size, *args, **kwargs)
self.fc = nn.Linear(fc_in_channel, fc_out_channel)
def forward(self, x):
rnn, _ = self.rnn(x)
fc = self.fc(rnn)
return fc
for cell_type in (nn.RNN, nn.GRU, nn.LSTM):
for num_layers in (1, 2):
for bidirectional in (False, True):
for batch_first in (False, True):
for proj_size in (0, 120):
if cell_type != nn.LSTM and proj_size > 0:
continue
kwargs = {
'num_layers': num_layers,
'bidirectional': bidirectional,
'batch_first': batch_first,
'cell_type': cell_type,
}
if proj_size > 0:
if LooseVersion(torch.__version__) >= LooseVersion('1.8.0'):
kwargs.update({'proj_size': proj_size})
else:
continue
filtered_args = {k: v for k, v in kwargs.items() if k != 'cell_type'}
print(f'\nTesting {cell_type.__name__} with {filtered_args}')
model = TestModel(**kwargs)
pruner = OneShotChannelPruner(
model, torch.rand((3, 3, rnn_in_size)), {"sparsity": 0.5, "metrics": "l2_norm"}
)
pruner.prune()
model(torch.rand((3, 3, rnn_in_size)))
assert model.rnn.hidden_size == 64
def test_lstm_proj_add_fc(self):
if LooseVersion(torch.__version__) < LooseVersion('1.8.0'):
raise SkipTest("LSTM with projection is not supported in PyTorch < 1.8")
rnn_in_size = 28
rnn_hidden_size = 128
fc_out_channel = 10
proj_size = 120
class TestModel(nn.Module):
def __init__(self, *args, **kwargs):
super(TestModel, self).__init__()
bidirectional = kwargs.get('bidirectional', False)
num_directions = 2 if bidirectional else 1
fc_in_channel = proj_size * num_directions
self.rnn = nn.LSTM(rnn_in_size, rnn_hidden_size, proj_size=proj_size, *args, **kwargs)
self.fc0 = nn.Linear(rnn_in_size, fc_in_channel)
self.fc1 = nn.Linear(fc_in_channel, fc_out_channel)
def forward(self, x):
rnn, _ = self.rnn(x)
fc0 = self.fc0(x) + rnn
fc1 = self.fc1(fc0)
return fc1
for num_layers in (1, 2):
for bidirectional in (False, True):
kwargs = {
'num_layers': num_layers,
'bidirectional': bidirectional,
}
print(f'\nTesting with {kwargs}')
model = TestModel(**kwargs)
model(torch.rand((3, 3, rnn_in_size)))
pruner = OneShotChannelPruner(
model, torch.rand((3, 3, rnn_in_size)), {"sparsity": 0.5, "metrics": "l2_norm"}
)
pruner.prune()
model(torch.rand((3, 3, rnn_in_size)))
if __name__ == '__main__':
unittest.main()
|
doc/tutorial.py | alex123012/biotite | 208 | 11078667 | <reponame>alex123012/biotite<filename>doc/tutorial.py
import os.path
import os
import codeop
import logging
import copy
from importlib.util import module_from_spec, spec_from_loader
from sphinx.util.logging import getLogger
from sphinx.util import status_iterator
import sphinx_gallery.gen_rst as genrst
from sphinx_gallery.gen_gallery import DEFAULT_GALLERY_CONF
import sphinx_gallery.scrapers as scrapers
import sphinx_gallery.py_source_parser as parser
import biotite
def create_tutorial(src_dir, target_dir):
logger = getLogger('sphinx-gallery')
logger.info("generating tutorial...", color="white")
with open(os.path.join(src_dir, "scripts"), "r") as file:
scripts = [line.strip() for line in file.read().splitlines()
if line[0] != "#" and line.strip() != ""]
iterator = status_iterator(
scripts, "generating tutorial...", length=len(scripts)
)
for script in iterator:
_create_tutorial_section(script, src_dir, target_dir)
# Create index
# String for enumeration of tutorial pages
include_string = "\n\n".join(
[f".. include:: {os.path.splitext(script)[0]}.rst"
for script in scripts]
)
index_content = \
f"""
========
Tutorial
========
.. contents::
:depth: 3
{include_string}
"""
with open(os.path.join(target_dir, f"index.rst"), "w") as f:
f.write(index_content)
def _create_tutorial_section(fname, src_dir, target_dir):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
src_file = os.path.normpath(os.path.join(src_dir, fname))
# Check if the same tutorial script has been already run
md5_file = os.path.join(target_dir, f"{fname}.md5")
if _md5sum_is_current(src_file, md5_file):
return
file_conf, script_blocks = parser.split_code_and_text_blocks(src_file)
# Remove *.py suffix
base_image_name = os.path.splitext(fname)[0]
# Locate file in tutorial target directory
abs_base_image_name = os.path.join(
os.getcwd(), "tutorial", "target", base_image_name
)
image_path_template = abs_base_image_name + "_{0:02}.png"
fake_main = module_from_spec(spec_from_loader('__main__', None))
script_vars = {
"execute_script": True,
"image_path_iterator": scrapers.ImagePathIterator(image_path_template),
"src_file": src_file,
"memory_delta": [],
"fake_main": fake_main
}
tutorial_globals = fake_main.__dict__
tutorial_globals.update({
"__doc__": "",
})
gallery_conf = copy.deepcopy(DEFAULT_GALLERY_CONF)
gallery_conf.update({
"abort_on_example_error": True,
"src_dir": os.getcwd(),
"execute_script": True,
"inspect_global_variables": False,
"call_memory": (lambda func: (0., func())),
"image_scrapers": (scrapers.matplotlib_scraper,),
})
compiler = codeop.Compile()
content_rst = ""
for block_label, block_content, line_no in script_blocks:
if block_label == "code":
# Run code and save output images
code_output = genrst.execute_code_block(
compiler=compiler,
block=(block_label, block_content, line_no),
example_globals=tutorial_globals,
script_vars=script_vars,
gallery_conf=gallery_conf
)
content_rst += genrst.codestr2rst(
block_content, lineno=None
) + "\n"
content_rst += code_output
else:
content_rst += block_content + "\n\n"
with open(os.path.join(target_dir, f"{base_image_name}.rst"), "w") as file:
file.write(content_rst)
# Write checksum of file to avoid unnecessary rerun
with open(md5_file, "w") as file:
file.write(genrst.get_md5sum(src_file))
def _md5sum_is_current(src_file, md5_file):
if not os.path.exists(md5_file):
return False
src_md5 = genrst.get_md5sum(src_file)
with open(md5_file, "r") as file:
ref_md5 = file.read()
return src_md5 == ref_md5
|
raw_packet/Scripts/DHCPv4/dhcpv4_server.py | Vladimir-Ivanov-Git/raw_packet | 146 | 11078671 | <reponame>Vladimir-Ivanov-Git/raw_packet<filename>raw_packet/Scripts/DHCPv4/dhcpv4_server.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# region Description
"""
dhcpv4_server.py: DHCPv4 server (dhcpv4_server)
Author: <NAME>
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from raw_packet.Utils.base import Base
from raw_packet.Servers.dhcpv4_server import DHCPv4Server
from argparse import ArgumentParser, RawDescriptionHelpFormatter
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
__script_name__ = 'DHCPv4 server (dhcpv4_server)'
# endregion
# region Main function
def main():
# region Init Raw-packet classes
base: Base = Base(admin_only=True, available_platforms=['Linux', 'Darwin', 'Windows'])
# endregion
# region Parse script arguments
parser: ArgumentParser = ArgumentParser(description=base.get_banner(__script_name__),
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-i', '--interface', help='Set interface name for send reply packets')
parser.add_argument('-f', '--first_offer_ip', type=str, help='Set first client ip for offering', default=None)
parser.add_argument('-l', '--last_offer_ip', type=str, help='Set last client ip for offering', default=None)
parser.add_argument('-m', '--target_mac', type=str, help='Set target MAC address', default=None)
parser.add_argument('-t', '--target_ip', type=str, help='Set client IP address with MAC in --target_mac',
default=None)
parser.add_argument('--netmask', type=str, help='Set network mask', default=None)
parser.add_argument('--dhcp_mac', type=str, help='Set DHCP server MAC address, if not set use your MAC address',
default=None)
parser.add_argument('--dhcp_ip', type=str, help='Set DHCP server IP address, if not set use your IP address',
default=None)
parser.add_argument('--router', type=str, help='Set router IP address, if not set use your ip address',
default=None)
parser.add_argument('--dns', type=str, help='Set DNS server IP address, if not set use your ip address',
default=None)
parser.add_argument('--tftp', type=str, help='Set TFTP server IP address', default=None)
parser.add_argument('--wins', type=str, help='Set WINS server IP address', default=None)
parser.add_argument('--domain', type=str, help='Set domain name for search, default=local', default='local')
parser.add_argument('--lease_time', type=int, help='Set lease time, default=172800', default=172800)
parser.add_argument('--discover', action='store_true', help='Send DHCP discover packets in the background thread')
parser.add_argument('-O', '--shellshock_option_code', type=int,
help='Set dhcp option code for inject shellshock payload, default=114', default=114)
parser.add_argument('-c', '--shellshock_command', type=str, help='Set shellshock command in DHCP client')
parser.add_argument('-b', '--bind_shell', action='store_true', help='Use awk bind tcp shell in DHCP client')
parser.add_argument('-p', '--bind_port', type=int, help='Set port for listen bind shell (default=1234)',
default=1234)
parser.add_argument('-N', '--nc_reverse_shell', action='store_true',
help='Use nc reverse tcp shell in DHCP client')
parser.add_argument('-E', '--nce_reverse_shell', action='store_true',
help='Use nc -e reverse tcp shell in DHCP client')
parser.add_argument('-R', '--bash_reverse_shell', action='store_true',
help='Use bash reverse tcp shell in DHCP client')
parser.add_argument('-e', '--reverse_port', type=int, help='Set port for listen bind shell (default=443)',
default=443)
parser.add_argument('-n', '--without_network', action='store_true', help='Do not add network configure in payload')
parser.add_argument('-B', '--without_base64', action='store_true', help='Do not use base64 encode in payload')
parser.add_argument('--ip_path', type=str,
help='Set path to "ip" in shellshock payload, default = /bin/', default='/bin/')
parser.add_argument('--iface_name', type=str,
help='Set iface name in shellshock payload, default = eth0', default='eth0')
parser.add_argument('--broadcast_response', action='store_true', help='Send broadcast response')
parser.add_argument('--dnsop', action='store_true', help='Do not send DHCP OFFER packets')
parser.add_argument('--exit', action='store_true', help='Exit on success MiTM attack')
parser.add_argument('--apple', action='store_true', help='Add delay before send DHCP ACK')
parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')
args = parser.parse_args()
# endregion
# region Print banner if argument quit is not set
if not args.quiet:
base.print_banner(__script_name__)
# endregion
# region Get your network settings
current_network_interface: str = \
base.network_interface_selection(interface_name=args.interface,
message='Please select a network interface for ' +
__script_name__ + ' from table: ')
# endregion
try:
dhcpv4_server: DHCPv4Server = DHCPv4Server(network_interface=current_network_interface)
dhcpv4_server.start(target_mac_address=args.target_mac,
target_ipv4_address=args.target_ip,
first_offer_ipv4_address=args.first_offer_ip,
last_offer_ipv4_address=args.last_offer_ip,
dhcp_server_mac_address=args.dhcp_mac,
dhcp_server_ipv4_address=args.dhcp_mac,
dns_server_ipv4_address=args.dns,
tftp_server_ipv4_address=args.tftp,
wins_server_ipv4_address=args.wins,
router_ipv4_address=args.router,
domain_search=args.domain,
ipv4_network_mask=args.netmask,
lease_time=args.lease_time,
shellshock_option_code=args.shellshock_option_code,
send_dhcp_discover_packets=args.discover,
send_dhcp_offer_packets=not args.dnsop,
send_broadcast_dhcp_response=args.broadcast_response,
exit_on_success=args.exit,
apple=args.apple,
quiet=args.quiet)
except KeyboardInterrupt:
base.print_info('Exit')
exit(0)
except AssertionError as Error:
base.print_error(Error.args[0])
exit(1)
# endregion
# region Call Main function
if __name__ == "__main__":
main()
# endregion
|
examples/create_boxplots.py | sevagh/sigsep-mus-eval | 116 | 11078677 | import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import museval
comparisons = museval.MethodStore()
comparisons.add_sisec18()
agg_df = comparisons.agg_frames_scores()
sns.set()
sns.set_context("notebook")
metrics = ['SDR']
selected_targets = ['vocals', 'drums', 'bass', 'other']
oracles = [
'IBM1', 'IBM2', 'IRM1', 'IRM2', 'MWF', 'IMSK'
]
# Convert to Pandas Dataframes
agg_df['oracle'] = agg_df.method.isin(oracles)
agg_df = agg_df[agg_df.target.isin(selected_targets)].dropna()
# Get sorting keys (sorted by median of SDR:vocals)
df_sort_by = agg_df[
(agg_df.metric == "SDR") &
(agg_df.target == "vocals")
]
methods_by_sdr = df_sort_by.score.groupby(
df_sort_by.method
).median().sort_values().index.tolist()
# df = df[df.target == "vocals"]
g = sns.FacetGrid(
agg_df,
row="target",
col="metric",
row_order=selected_targets,
col_order=metrics,
size=4,
sharex=False,
aspect=3
)
g = (g.map(
sns.boxplot,
"score",
"method",
"oracle",
orient='h',
order=methods_by_sdr[::-1],
hue_order=[True, False],
showfliers=False,
notch=True
))
g.fig.tight_layout()
plt.subplots_adjust(hspace=0.2, wspace=0.1)
g.fig.savefig(
"boxplot.pdf",
bbox_inches='tight',
)
|
tests/tensorflow/test_models/__init__.py | sarthakpati/nncf | 310 | 11078711 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tests.tensorflow.test_models.densenet import DenseNet121
from tests.tensorflow.test_models.mobilenet_v2_slim import HubMobileNetV2
from tests.tensorflow.test_models.inception_resnet_v2 import InceptionResNetV2
from tests.tensorflow.test_models.inception_v3 import InceptionV3
from tests.tensorflow.test_models.mobilenet import MobileNet
from tests.tensorflow.test_models.mobilenet_v2 import MobileNetV2
from tests.tensorflow.test_models.nasnet import NASNetLarge
from tests.tensorflow.test_models.nasnet import NASNetMobile
from tests.tensorflow.test_models.resnet import ResNet50
from tests.tensorflow.test_models.resnet_v2 import ResNet50V2
from tests.tensorflow.test_models.vgg16 import VGG16
from tests.tensorflow.test_models.xception import Xception
from tests.tensorflow.test_models.retinanet import RetinaNet
from tests.tensorflow.test_models.sequential_model import SequentialModel
from tests.tensorflow.test_models.sequential_model import SequentialModelNoInput
from tests.tensorflow.test_models.mobilenet_v3 import MobileNetV3Small
from tests.tensorflow.test_models.mobilenet_v3 import MobileNetV3Large
from tests.tensorflow.test_models.shared_layers_model import SharedLayersModel
from tests.tensorflow.test_models.maskrcnn import MaskRCNN
from tests.tensorflow.test_models.yolo_v4 import YOLOv4
|
src/genie/libs/parser/junos/tests/ShowOspfDatabase/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 11078720 | <gh_stars>100-1000
expected_output = {
"ospf-database-information": {
"ospf-area-header": {"ospf-area": "0.0.0.8"},
"ospf-database": [
{
"advertising-router": "10.36.3.3",
"age": "61",
"checksum": "0xa127",
"lsa-id": "10.36.3.3",
"lsa-length": "2496",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x80004d2d",
},
{
"advertising-router": "10.100.5.5",
"age": "1469",
"checksum": "0xa1c",
"lsa-id": "10.100.5.5",
"lsa-length": "60",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x800019d7",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0x95bf",
"lsa-id": "10.19.198.239",
"lsa-length": "96",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x80000442",
},
{
"advertising-router": "10.34.2.250",
"age": "736",
"checksum": "0x26f6",
"lsa-id": "10.34.2.250",
"lsa-length": "144",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x8000205a",
},
{
"advertising-router": "10.34.2.251",
"age": "567",
"checksum": "0x1022",
"lsa-id": "10.34.2.251",
"lsa-length": "108",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x80001dde",
},
{
"advertising-router": "10.169.196.241",
"age": "35",
"checksum": "0x1055",
"lsa-id": "10.169.196.241",
"lsa-length": "144",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x800004a4",
},
{
"advertising-router": "10.169.14.240",
"age": "2732",
"checksum": "0x3a76",
"lsa-id": "10.169.14.240",
"lsa-length": "144",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x80001bc1",
},
{
"advertising-router": "10.169.14.241",
"age": "1468",
"checksum": "0x81fa",
"lsa-id": "10.169.14.241",
"lsa-length": "120",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x80001f67",
},
{
"advertising-router": "10.189.5.252",
"age": "1608",
"checksum": "0x1e2",
"lsa-id": "10.189.5.252",
"lsa-length": "120",
"lsa-type": "Router",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80001b9e",
},
{
"advertising-router": "10.189.5.253",
"age": "1689",
"checksum": "0xe230",
"lsa-id": "10.189.5.253",
"lsa-length": "108",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x80001b04",
},
{
"advertising-router": "192.168.36.119",
"age": "928",
"checksum": "0xc6a6",
"lsa-id": "192.168.36.119",
"lsa-length": "48",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x800019de",
},
{
"advertising-router": "192.168.36.120",
"age": "500",
"checksum": "0x2747",
"lsa-id": "192.168.36.120",
"lsa-length": "48",
"lsa-type": "Router",
"options": "0x22",
"sequence-number": "0x800019ea",
},
{
"advertising-router": "10.169.14.240",
"age": "485",
"checksum": "0xbb30",
"lsa-id": "192.168.36.49",
"lsa-length": "32",
"lsa-type": "Network",
"options": "0x22",
"sequence-number": "0x80000499",
},
{
"advertising-router": "10.169.14.240",
"age": "2292",
"checksum": "0x5f86",
"lsa-id": "192.168.36.57",
"lsa-length": "32",
"lsa-type": "Network",
"options": "0x22",
"sequence-number": "0x80000498",
},
{
"advertising-router": "10.100.5.5",
"age": "1469",
"checksum": "0xc57f",
"lsa-id": "10.1.0.0",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x800019ac",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0x4e06",
"lsa-id": "10.1.0.0",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000028c",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0xe9d4",
"lsa-id": "10.1.0.0",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x80000fdd",
},
{
"advertising-router": "10.34.2.250",
"age": "1888",
"checksum": "0x902f",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800019e5",
},
{
"advertising-router": "10.34.2.251",
"age": "1664",
"checksum": "0xd00b",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800019c7",
},
{
"advertising-router": "10.169.14.240",
"age": "334",
"checksum": "0xde66",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001987",
},
{
"advertising-router": "10.169.14.241",
"age": "1907",
"checksum": "0x8014",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001e31",
},
{
"advertising-router": "10.189.5.252",
"age": "231",
"checksum": "0xd49a",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80001a15",
},
{
"advertising-router": "10.189.5.253",
"age": "901",
"checksum": "0xe48e",
"lsa-id": "10.1.0.1",
"lsa-length": "28",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001a0f",
},
{
"advertising-router": "10.34.2.250",
"age": "2119",
"checksum": "0x47bd",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800013d3",
},
{
"advertising-router": "10.34.2.251",
"age": "1445",
"checksum": "0x5fc3",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800013b5",
},
{
"advertising-router": "10.169.14.240",
"age": "1690",
"checksum": "0x75dc",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000063d",
},
{
"advertising-router": "10.169.14.241",
"age": "951",
"checksum": "0x1721",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000c51",
},
{
"advertising-router": "10.189.5.252",
"age": "2678",
"checksum": "0x97cc",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80000321",
},
{
"advertising-router": "10.189.5.253",
"age": "2500",
"checksum": "0x71f1",
"lsa-id": "10.1.0.3",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000322",
},
{
"advertising-router": "10.34.2.250",
"age": "1427",
"checksum": "0x1e4",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000029e",
},
{
"advertising-router": "10.34.2.251",
"age": "1226",
"checksum": "0x29c0",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000299",
},
{
"advertising-router": "10.169.14.240",
"age": "1238",
"checksum": "0xb606",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800003f8",
},
{
"advertising-router": "10.169.14.241",
"age": "2127",
"checksum": "0x694d",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800013fe",
},
{
"advertising-router": "10.189.5.252",
"age": "2411",
"checksum": "0xb804",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x800013e8",
},
{
"advertising-router": "10.189.5.253",
"age": "2772",
"checksum": "0x4ecf",
"lsa-id": "10.1.0.4",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000f9b",
},
{
"advertising-router": "10.34.2.250",
"age": "289",
"checksum": "0x5e9d",
"lsa-id": "10.1.0.5",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800001b5",
},
{
"advertising-router": "10.34.2.251",
"age": "276",
"checksum": "0xd817",
"lsa-id": "10.1.0.5",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800001b5",
},
{
"advertising-router": "10.169.14.240",
"age": "33",
"checksum": "0xdd1f",
"lsa-id": "10.1.0.5",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000289",
},
{
"advertising-router": "10.169.14.241",
"age": "1687",
"checksum": "0x21a9",
"lsa-id": "10.1.0.5",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000298",
},
{
"advertising-router": "10.189.5.252",
"age": "1312",
"checksum": "0x79b5",
"lsa-id": "10.1.0.5",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x800001bb",
},
{
"advertising-router": "10.189.5.253",
"age": "1147",
"checksum": "0x5ec3",
"lsa-id": "10.1.0.5",
"lsa-length": "136",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800001bb",
},
{
"advertising-router": "10.100.5.5",
"age": "1469",
"checksum": "0x629a",
"lsa-id": "10.1.0.6",
"lsa-length": "168",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x800019be",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0xcffa",
"lsa-id": "10.1.0.10",
"lsa-length": "132",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0x771b",
"lsa-id": "10.1.0.10",
"lsa-length": "132",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0xecd3",
"lsa-id": "10.1.0.11",
"lsa-length": "132",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0xa14f",
"lsa-id": "10.1.0.11",
"lsa-length": "132",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0x87f8",
"lsa-id": "10.1.0.12",
"lsa-length": "80",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x80000163",
},
{
"advertising-router": "10.169.196.241",
"age": "35",
"checksum": "0x8150",
"lsa-id": "10.1.8.69",
"lsa-length": "80",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000003b",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0x8a2d",
"lsa-id": "10.1.8.70",
"lsa-length": "80",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x80000151",
},
{
"advertising-router": "10.100.5.5",
"age": "1469",
"checksum": "0x810a",
"lsa-id": "10.16.0.0",
"lsa-length": "52",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x800019ac",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0x8e0f",
"lsa-id": "10.16.0.0",
"lsa-length": "76",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000028c",
},
{
"advertising-router": "10.34.2.250",
"age": "2810",
"checksum": "0xbd3d",
"lsa-id": "10.16.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001a15",
},
{
"advertising-router": "10.34.2.251",
"age": "2103",
"checksum": "0x1b10",
"lsa-id": "10.16.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800019e4",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0x2db9",
"lsa-id": "10.16.0.0",
"lsa-length": "76",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x800003a6",
},
{
"advertising-router": "10.169.14.240",
"age": "2142",
"checksum": "0x15f1",
"lsa-id": "10.16.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000199d",
},
{
"advertising-router": "10.169.14.241",
"age": "48",
"checksum": "0xb2a7",
"lsa-id": "10.16.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001e44",
},
{
"advertising-router": "10.189.5.252",
"age": "771",
"checksum": "0xe5ef",
"lsa-id": "10.16.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80001a2a",
},
{
"advertising-router": "10.189.5.253",
"age": "410",
"checksum": "0xf1eb",
"lsa-id": "10.16.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001a21",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0xcdab",
"lsa-id": "10.49.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000028c",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0x69c9",
"lsa-id": "10.49.0.0",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x800003a4",
},
{
"advertising-router": "10.100.5.5",
"age": "1469",
"checksum": "0x6c5a",
"lsa-id": "10.49.0.1",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x800019ac",
},
{
"advertising-router": "10.34.2.250",
"age": "736",
"checksum": "0x7fa7",
"lsa-id": "10.49.0.1",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001fa9",
},
{
"advertising-router": "10.34.2.251",
"age": "567",
"checksum": "0x6ce",
"lsa-id": "10.49.0.1",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001cfb",
},
{
"advertising-router": "10.169.14.240",
"age": "2732",
"checksum": "0x99aa",
"lsa-id": "10.49.0.1",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001bc1",
},
{
"advertising-router": "10.169.14.241",
"age": "1468",
"checksum": "0x6433",
"lsa-id": "10.49.0.1",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001f67",
},
{
"advertising-router": "10.189.5.252",
"age": "1608",
"checksum": "0x8c7f",
"lsa-id": "10.49.0.1",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80001b9e",
},
{
"advertising-router": "10.189.5.253",
"age": "1689",
"checksum": "0xe3bf",
"lsa-id": "10.49.0.1",
"lsa-length": "44",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80001b04",
},
{
"advertising-router": "10.34.2.250",
"age": "76",
"checksum": "0x39a3",
"lsa-id": "10.64.0.1",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800004f9",
},
{
"advertising-router": "10.169.14.241",
"age": "725",
"checksum": "0x7002",
"lsa-id": "10.64.0.1",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000311",
},
{
"advertising-router": "10.189.5.253",
"age": "2230",
"checksum": "0x6915",
"lsa-id": "10.64.0.1",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000030a",
},
{
"advertising-router": "10.169.14.241",
"age": "499",
"checksum": "0x7271",
"lsa-id": "10.64.0.2",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000305",
},
{
"advertising-router": "10.169.14.241",
"age": "274",
"checksum": "0x7248",
"lsa-id": "10.64.0.3",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000029a",
},
{
"advertising-router": "10.189.5.253",
"age": "656",
"checksum": "0x34eb",
"lsa-id": "10.64.0.3",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800002db",
},
{
"advertising-router": "10.189.5.253",
"age": "1960",
"checksum": "0x31be",
"lsa-id": "10.64.0.4",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800001bb",
},
{
"advertising-router": "10.100.5.5",
"age": "1469",
"checksum": "0x4de2",
"lsa-id": "10.64.0.6",
"lsa-length": "56",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x800019bf",
},
{
"advertising-router": "10.34.2.250",
"age": "2580",
"checksum": "0xb9a6",
"lsa-id": "10.64.0.7",
"lsa-length": "48",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000046b",
},
{
"advertising-router": "10.34.2.251",
"age": "1006",
"checksum": "0x6a96",
"lsa-id": "10.64.0.7",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800004de",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0xb34a",
"lsa-id": "10.64.0.17",
"lsa-length": "104",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0x3e3c",
"lsa-id": "10.64.0.17",
"lsa-length": "104",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.19.198.239",
"age": "622",
"checksum": "0xb938",
"lsa-id": "10.64.0.18",
"lsa-length": "104",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0x6fdb",
"lsa-id": "10.64.0.18",
"lsa-length": "104",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x8000025d",
},
{
"advertising-router": "10.34.2.251",
"age": "2542",
"checksum": "0xe909",
"lsa-id": "10.64.0.31",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000029a",
},
{
"advertising-router": "10.34.2.251",
"age": "787",
"checksum": "0xe396",
"lsa-id": "10.64.0.32",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800001b5",
},
{
"advertising-router": "10.34.2.250",
"age": "1658",
"checksum": "0xffb8",
"lsa-id": "10.64.0.37",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000029b",
},
{
"advertising-router": "10.34.2.250",
"age": "966",
"checksum": "0x71b3",
"lsa-id": "10.64.0.38",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800001b5",
},
{
"advertising-router": "10.189.5.252",
"age": "501",
"checksum": "0x7efa",
"lsa-id": "10.64.0.52",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80000308",
},
{
"advertising-router": "10.189.5.252",
"age": "1042",
"checksum": "0x1839",
"lsa-id": "10.64.0.54",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x800002dc",
},
{
"advertising-router": "10.189.5.252",
"age": "1876",
"checksum": "0x92eb",
"lsa-id": "10.64.0.55",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x800001bb",
},
{
"advertising-router": "10.169.14.240",
"age": "1087",
"checksum": "0x7544",
"lsa-id": "10.64.0.57",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x80000303",
},
{
"advertising-router": "10.169.14.240",
"age": "1389",
"checksum": "0x6d12",
"lsa-id": "10.64.0.59",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x800002f4",
},
{
"advertising-router": "10.169.14.240",
"age": "937",
"checksum": "0x4f1a",
"lsa-id": "10.64.0.60",
"lsa-length": "60",
"lsa-type": "OpaqArea",
"options": "0x22",
"sequence-number": "0x8000028b",
},
{
"advertising-router": "10.169.196.241",
"age": "35",
"checksum": "0xdcd1",
"lsa-id": "10.64.8.74",
"lsa-length": "92",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x80000030",
},
{
"advertising-router": "10.169.196.241",
"age": "521",
"checksum": "0xd4b0",
"lsa-id": "10.64.8.75",
"lsa-length": "92",
"lsa-type": "OpaqArea",
"options": "0x20",
"sequence-number": "0x80000151",
},
{
"advertising-router": "10.34.2.251",
"age": "2323",
"checksum": "0x6715",
"lsa-id": "0.0.0.0",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x800019e3",
},
{
"advertising-router": "10.169.14.240",
"age": "1991",
"checksum": "0x9fcc",
"lsa-id": "0.0.0.0",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x8000039e",
},
{
"advertising-router": "192.168.36.119",
"age": "928",
"checksum": "0x3bc3",
"lsa-id": "10.1.0.0",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x20",
"sequence-number": "0x800019b0",
},
{
"advertising-router": "192.168.36.120",
"age": "500",
"checksum": "0x33c9",
"lsa-id": "10.1.0.0",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x20",
"sequence-number": "0x800019b1",
},
{
"advertising-router": "10.169.14.240",
"age": "1841",
"checksum": "0xf161",
"lsa-id": "10.174.132.237",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x8000039e",
},
{
"advertising-router": "10.169.14.240",
"age": "2443",
"checksum": "0x473e",
"lsa-id": "10.34.2.250",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x80000288",
},
{
"advertising-router": "10.169.14.241",
"age": "2346",
"checksum": "0x2153",
"lsa-id": "10.34.2.250",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x80000298",
},
{
"advertising-router": "10.169.14.240",
"age": "184",
"checksum": "0x3b48",
"lsa-id": "10.34.2.251",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x80000289",
},
{
"advertising-router": "10.169.14.241",
"age": "1176",
"checksum": "0x175c",
"lsa-id": "10.34.2.251",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x80000298",
},
{
"advertising-router": "10.34.2.250",
"age": "1197",
"checksum": "0xf88e",
"lsa-id": "10.169.14.240",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x8000029a",
},
{
"advertising-router": "10.34.2.251",
"age": "1884",
"checksum": "0x190c",
"lsa-id": "10.169.14.240",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x800019e4",
},
{
"advertising-router": "10.189.5.252",
"age": "2143",
"checksum": "0xc3fb",
"lsa-id": "10.169.14.240",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"our-entry": True,
"sequence-number": "0x80001a3a",
},
{
"advertising-router": "10.34.2.250",
"age": "2349",
"checksum": "0xb341",
"lsa-id": "10.169.14.241",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x80001a14",
},
{
"advertising-router": "10.34.2.251",
"age": "50",
"checksum": "0xea9b",
"lsa-id": "10.169.14.241",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x80000299",
},
{
"advertising-router": "10.189.5.253",
"age": "164",
"checksum": "0xeb68",
"lsa-id": "10.169.14.241",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x80000fae",
},
{
"advertising-router": "10.169.14.240",
"age": "1539",
"checksum": "0xc372",
"lsa-id": "10.189.5.252",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x800019b0",
},
{
"advertising-router": "10.169.14.241",
"age": "2566",
"checksum": "0x6d4",
"lsa-id": "10.189.5.253",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x8000140f",
},
{
"advertising-router": "10.169.14.240",
"age": "786",
"checksum": "0xfb51",
"lsa-id": "192.168.100.0",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x800002da",
},
{
"advertising-router": "10.169.14.240",
"age": "636",
"checksum": "0x19b8",
"lsa-id": "192.168.100.252",
"lsa-length": "36",
"lsa-type": "Extern",
"options": "0x22",
"sequence-number": "0x800002d9",
},
],
}
}
|
tests/float/sys_getsizeof_float.py | iotctl/pycopy | 663 | 11078730 | <filename>tests/float/sys_getsizeof_float.py
# test sys.getsizeof() function
import sys
try:
sys.getsizeof
except AttributeError:
print("SKIP")
raise SystemExit
print(sys.getsizeof(1.0) >= 2)
|
src/nginx/config/api/base.py | sixninetynine/nginx-config-builder | 149 | 11078736 | <filename>src/nginx/config/api/base.py
class Base(object):
""" This is the base class for all blocks and options. """
_indent_level = 0
_indent_char = ' '
_indent = 4
_parent = None
def _get_indent(self):
return self._indent_char * self._indent * self._indent_level
def _render(self, name):
return '\n{indent}{name}'.format(
name=name,
indent=self._get_indent()
)
def __str__(self):
return str(self.__repr__())
@property
def parent(self):
return self._parent
|
backend/chat/schemas.py | restato/bunnybook | 131 | 11078739 | import datetime as dt
from typing import Optional
from uuid import UUID
from common.schemas import BaseSchema
class ChatMessageRead(BaseSchema):
id: UUID
created_at: dt.datetime
from_profile_id: UUID
chat_group_id: UUID
content: str
class ConversationRead(BaseSchema):
from_profile_id: UUID
from_profile_username: str
content: str
created_at: dt.datetime
chat_group_id: UUID
chat_group_name: str
read_at: Optional[dt.datetime]
class IsTypingWsMessage(BaseSchema):
profile_id: UUID
username: str
chat_group_id: UUID
class PrivateChatRead(BaseSchema):
chat_group_id: UUID
profile_id: UUID
username: str
|
copier/subproject.py | Ket3r/copier | 438 | 11078755 | <gh_stars>100-1000
"""Objects to interact with subprojects.
A *subproject* is a project that gets rendered and/or updated with Copier.
"""
from pathlib import Path
from typing import Optional
import yaml
from plumbum.cmd import git
from plumbum.machines import local
from pydantic.dataclasses import dataclass
from .template import Template
from .types import AbsolutePath, AnyByStrDict, VCSTypes
from .vcs import is_in_git_repo
try:
from functools import cached_property
except ImportError:
from backports.cached_property import cached_property
@dataclass
class Subproject:
"""Object that represents the subproject and its current state.
Attributes:
local_abspath:
Absolute path on local disk pointing to the subproject root folder.
answers_relpath:
Relative path to [the answers file][the-copier-answersyml-file].
"""
local_abspath: AbsolutePath
answers_relpath: Path = Path(".copier-answers.yml")
def is_dirty(self) -> bool:
"""Indicates if the local template root is dirty.
Only applicable for VCS-tracked templates.
"""
if self.vcs == "git":
with local.cwd(self.local_abspath):
return bool(git("status", "--porcelain").strip())
return False
@property
def _raw_answers(self) -> AnyByStrDict:
"""The last answers, loaded raw as yaml."""
try:
return yaml.safe_load(
(self.local_abspath / self.answers_relpath).read_text()
)
except OSError:
return {}
@cached_property
def last_answers(self) -> AnyByStrDict:
"""Last answers, excluding private ones (except _src_path and _commit)."""
return {
key: value
for key, value in self._raw_answers.items()
if key in {"_src_path", "_commit"} or not key.startswith("_")
}
@cached_property
def template(self) -> Optional[Template]:
"""Template, as it was used the last time."""
last_url = self.last_answers.get("_src_path")
last_ref = self.last_answers.get("_commit")
if last_url:
return Template(url=last_url, ref=last_ref)
@cached_property
def vcs(self) -> Optional[VCSTypes]:
"""VCS type of the subproject."""
if is_in_git_repo(self.local_abspath):
return "git"
|
scripts/convertChromsBigWig.py | gartician/deepTools | 351 | 11078761 | <reponame>gartician/deepTools
#!/usr/bin/env python
import sys
import pyBigWig
import requests
import re
import argparse
from argparse import RawTextHelpFormatter
import itertools
def parse_arguments(defaults):
parser = argparse.ArgumentParser(description='Convert chromosome names for bigwig files between ensembl, gencode and UCSC naming schemes\n' +
"Per default it writes to the same location as original file, however with a modified filename:\n" +
"eg. test.bw --> test.[toFormat]_chroms.bw\n" +
"Change this with the -o option!\n\n" +
"Mapping tables are taken from https://github.com/dpryan79/ChromosomeMappings\n\n" +
"Provided mapping options need to exactly match an existing file\n" +
"[GENOME]_[FROM_FORMAT]2[TO_FORMAT].txt in this repo!",
usage='$ convertChroms BIGWIG', formatter_class=RawTextHelpFormatter)
parser.add_argument('bw_in_filename',
metavar='BIGWIG',
help='bigwig file that will be converted')
parser.add_argument('--genome', '-g',
action='store',
dest='genome',
help='Genome version of original bigwig \n' +
'(GRCm37|GRCm38|GRCh37|GRCh38|BDGP6|dm3|GRCz10|GRCz11|\n' +
'JGI_4.2|MEDAKA1|R64-1-1|WBcel235|Zv9|galGal4|rn5|rn6)\n' +
'(default: %(default)s)',
default=defaults["genome"])
parser.add_argument('--fromFormat', '-f',
action='store',
dest='from_format',
help='Chr naming format of original bigwig (ensembl|gencode|UCSC) (default: %(default)s)',
default=defaults["fromFormat"])
parser.add_argument('--toFormat', '-t',
action='store',
dest='to_format',
help='Chr naming format of converted bigwig (ensembl|gencode|UCSC) (default: %(default)s)',
default=defaults["toFormat"])
parser.add_argument('--outFileName', '-o',
action='store',
dest='bw_out_filename',
help='Chr naming format of converted bigwig (ensembl|gencode|UCSC) (default: %(default)s)',
default=defaults["bw_out_filename"])
parser.add_argument('--baseURL', '-u',
action='store',
dest='base_url',
help='base url where the mapping tables can be found (default: %(default)s)\n' +
'Local files can be given with \'file://[BASE_DIR]/\'',
default=defaults["base_url"])
parser.add_argument('--verbose', '-v',
action='store_true',
dest='verbose',
help='Be more verbose where possible (default: %(default)s)',
default=defaults["verbose"])
return parser
def get_chromosome_mapping(genome="GRCm38", from_format="ensembl", to_format="UCSC", verbose=True, base_url='https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/'):
"""
creates a dict with chromosome name mappings according to provided conversion formats
default base URL access a github repo with conversion files,
but you can also give eg. a path to local directory
"""
mapping_file = genome + '_' + from_format + '2' + to_format + '.txt'
if re.match('^file:[/]+.*', base_url):
base_url = re.sub("file:[/]*(/.*)", "\\1", base_url)
if verbose:
print("load mapping table (" + mapping_file + ') from ' + base_url)
tab = None
if re.match('^https?://.*', base_url):
try:
r = requests.get(base_url + '/' + mapping_file)
r.raise_for_status()
except requests.exceptions.RequestException as e:
print("\n", e, "\n\nPlease provide correct name (GENOME, FROM_FORMAT, TO_FORMAT) for a mapping table!\n")
sys.exit(1)
tab = r.text
elif re.match('^[/]+.*', base_url):
try:
tab = open(base_url + '/' + mapping_file).read()
except IOError as e:
print("\n", e, "\n\nPlease provide a correct name (GENOME, FROM_FORMAT, TO_FORMAT) for a mapping table!\n")
sys.exit(1)
else:
print("\nPlease provide a correct BASE_URL for a mapping table!\n")
sys.exit(1)
mapping_table = {}
for ent in tab.split("\n"):
if len(ent) == 0:
continue
pair = ent.split("\t")
if (len(pair[1]) <= 0):
# if (verbose):
# print("skip chrom \'" + pair[0] + "\' - cannot be mapped to "+to_format)
continue
mapping_table[pair[0]] = pair[1]
return mapping_table
def convert_bigwig(mapping_table, bw_in_filename, bw_out_filename, verbose=False):
"""
convert chromosome names of a bigwig file according to given mapping_table
it checks which chromosome names that can correctly mapped, all other chromosomes are skipped
"""
bw = pyBigWig.open(bw_in_filename)
curr_chroms = bw.chroms()
final_mapping_table = {}
new_chroms = {}
for c in curr_chroms:
if c not in mapping_table:
if (verbose):
print("skip original chrom \'" + c + "\' - cannot be found in mapping table! Right GENOME & FROM_FORMAT?")
continue
final_mapping_table[c] = mapping_table[c]
new_chroms[mapping_table[c]] = curr_chroms[c]
if (len(new_chroms) <= 0):
print("No chromosomes found for mapping! Wrong 'FROM_FORMAT'?")
sys.exit(1)
bw_out = pyBigWig.open(bw_out_filename, "w")
bw_out.addHeader(list(new_chroms.items()))
for c in final_mapping_table:
c_int = bw.intervals(c)
c_map = final_mapping_table[c]
if verbose:
print("convert chromosome: ", c, " --> ", c_map)
bw_out.addEntries(list(itertools.repeat(c_map, len(c_int))), [x[0] for x in c_int], ends=[x[1] for x in c_int], values=[x[2] for x in c_int])
bw_out.close()
bw.close()
if (verbose):
print("\nbigwig conversion finished!\n")
def main(args=None):
defaults = {
'genome': 'GRCm38',
'fromFormat': 'ensembl',
'toFormat': 'UCSC',
'verbose': False,
'bw_out_filename': None,
'base_url': 'https://raw.githubusercontent.com/dpryan79/ChromosomeMappings/master/'
}
args = parse_arguments(defaults).parse_args(args)
bw_out_filename = args.bw_out_filename
if args.bw_out_filename is None:
bw_out_filename = re.sub(r"(.[^\.]+)$", ".%s\\1" % (args.to_format + "_chroms"), args.bw_in_filename)
print("\noutput_file: " + bw_out_filename)
mapping_table = get_chromosome_mapping(genome=args.genome, from_format=args.from_format, to_format=args.to_format, verbose=args.verbose, base_url=args.base_url)
convert_bigwig(mapping_table, args.bw_in_filename, bw_out_filename, args.verbose)
if __name__ == "__main__":
args = None
if len(sys.argv) == 1:
args = ["--help"]
main(args)
|
srl_zoo/models/autoencoders.py | anonymous-authors-2018/robotics-repo | 134 | 11078792 | from __future__ import print_function, division, absolute_import
from .models import *
class LinearAutoEncoder(BaseModelAutoEncoder):
"""
:param input_dim: (int)
:param state_dim: (int)
"""
def __init__(self, input_dim, state_dim=3):
super(LinearAutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(input_dim, state_dim),
)
self.decoder = nn.Sequential(
nn.Linear(state_dim, input_dim),
)
def encode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
# Flatten input
x = x.view(x.size(0), -1)
return self.encoder(x)
def decode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
return self.decoder(x)
class DenseAutoEncoder(BaseModelAutoEncoder):
"""
Dense autoencoder network
Known issue: it reconstructs the image but omits the robot arm
:param input_dim: (int)
:param state_dim: (int)
"""
def __init__(self, input_dim, state_dim=3):
super(DenseAutoEncoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(input_dim, 50),
nn.Tanh(),
nn.Linear(50, 50),
nn.Tanh(),
nn.Linear(50, state_dim),
)
self.decoder = nn.Sequential(
nn.Linear(state_dim, 50),
nn.Tanh(),
nn.Linear(50, 50),
nn.Tanh(),
nn.Linear(50, input_dim),
)
def encode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
# Flatten input
x = x.view(x.size(0), -1)
return self.encoder(x)
def decode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
return self.decoder(x)
class CNNAutoEncoder(BaseModelAutoEncoder):
"""
Custom convolutional autoencoder network
Input dim (same as ResNet): 3x224x224
:param state_dim: (int)
"""
def __init__(self, state_dim=3):
super(CNNAutoEncoder, self).__init__()
self.encoder_fc = nn.Sequential(
nn.Linear(6 * 6 * 64, state_dim)
)
self.decoder_fc = nn.Sequential(
nn.Linear(state_dim, 6 * 6 * 64)
)
def encode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
encoded = self.encoder_conv(x)
encoded = encoded.view(encoded.size(0), -1)
return self.encoder_fc(encoded)
def decode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
decoded = self.decoder_fc(x)
decoded = decoded.view(x.size(0), 64, 6, 6)
return self.decoder_conv(decoded)
|
scripts/bpf/gen.py | 5cs/spdk | 2,107 | 11078811 | <filename>scripts/bpf/gen.py
#!/usr/bin/env python3
from argparse import ArgumentParser
import os
import re
import subprocess
import sys
class TraceProcess:
def __init__(self, pid):
self._path = os.readlink(f'/proc/{pid}/exe')
self._pid = pid
self._probes = self._init_probes()
def _init_probes(self):
lines = subprocess.check_output(['bpftrace', '-l', '-p', str(self._pid)], text=True)
probes = {}
for line in lines.split('\n'):
parts = line.split(':')
if len(parts) < 3:
continue
ptype, path, function = parts[0], parts[1], parts[-1]
probes[(ptype, function)] = path
return probes
def fixup(self, script):
pregs = [re.compile(r'({}):__EXE__:(\w+)'.format(ptype)) for ptype in ['usdt', 'uprobe']]
with open(script, 'r') as file:
lines = file.readlines()
result = ''
for line in lines:
for regex in pregs:
match = regex.match(line)
if match is not None:
ptype, function = match.groups()
path = self._probes.get((ptype, function), self._path)
line = line.replace('__EXE__', path)
break
result += line.replace('__EXE__', self._path).replace('__PID__', str(self._pid))
return result
if __name__ == '__main__':
parser = ArgumentParser(description='bpftrace script generator replacing special ' +
'variables in the scripts with appropriate values')
parser.add_argument('-p', '--pid', type=int, required=True, help='PID of a traced process')
parser.add_argument('scripts', metavar='SCRIPTS', type=str, nargs='+',
help='bpftrace scripts to process')
args = parser.parse_args(sys.argv[1:])
proc = TraceProcess(args.pid)
for script in args.scripts:
print(proc.fixup(script))
|
examples/pysolver.py | tenglongcong/amgcl | 504 | 11078820 | <gh_stars>100-1000
#!/usr/bin/env python
import sys, argparse
import numpy as np
import pyamgcl as amg
from scipy.io import mmread, mmwrite
from time import time
from make_poisson import *
class timeit:
profile = {}
def __init__(self, desc):
self.desc = desc
self.tic = time()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
toc = time()
timeit.profile[self.desc] = timeit.profile.get(self.desc, 0.0) + (toc - self.tic)
@staticmethod
def report():
print('\n---------------------------------')
total = sum(timeit.profile.values())
for k,v in sorted(timeit.profile.items()):
print('{0:>22}: {1:>8.3f}s ({2:>5.2f}%)'.format(k, v, 100 * v / total))
print('---------------------------------')
print('{0:>22}: {1:>8.3f}s'.format('Total', total))
#----------------------------------------------------------------------------
parser = argparse.ArgumentParser(sys.argv[0])
parser.add_argument('-A,--matrix', dest='A', help='System matrix in MatrixMarket format')
parser.add_argument('-f,--rhs', dest='f', help='RHS in MatrixMarket format')
parser.add_argument('-n,--size', dest='n', type=int, default=64, help='The size of the Poisson problem to solve when no system matrix is given')
parser.add_argument('-o,--out', dest='x', help='Output file name')
parser.add_argument('-p,--precond', dest='p', help='preconditioner parameters: key1=val1 key2=val2', nargs='+', default=[])
parser.add_argument('-s,--solver', dest='s', help='solver parameters: key1=val1 key2=val2', nargs='+', default=[])
args = parser.parse_args(sys.argv[1:])
#----------------------------------------------------------------------------
if args.A:
with timeit('Read problem'):
A = mmread(args.A)
f = mmread(args.f).flatten() if args.f else np.ones(A.shape[0])
else:
with timeit('Generate problem'):
A,f = make_poisson_3d(args.n)
# Parse parameters
p_prm = {p[0]: p[1] for p in map(lambda s: s.split('='), args.p)}
s_prm = {p[0]: p[1] for p in map(lambda s: s.split('='), args.s)}
# Create solver/preconditioner pair
with timeit('Setup solver'):
S = amg.solver(amg.amgcl(A, p_prm), s_prm)
print(S)
# Solve the system for the RHS
with timeit('Solve the problem'):
x = S(f)
error = np.linalg.norm(f - A * x) / np.linalg.norm(f)
print("{0.iters}: {0.error:.6e} / {1:.6e}".format(S, error))
# Save the solution
if args.x:
with timeit('Save the result'):
mmwrite(args.x, x.reshape((-1,1)))
timeit.report()
|
chrome/test/functional/media/media_stat_perf.py | nagineni/chromium-crosswalk | 231 | 11078827 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""CPU, Memory, and FPS performance test for <video>.
Calculates decoded fps, dropped fps, CPU, and memory statistics while playing
HTML5 media element. The test compares results of playing a media file on
different video resolutions.
"""
import logging
import os
import psutil
import pyauto_media
import pyauto
import pyauto_utils
# HTML test path; relative to src/chrome/test/data.
_TEST_HTML_PATH = os.path.join('media', 'html', 'media_stat_perf.html')
# Path under data path for test files.
_TEST_MEDIA_PATH_CROWD = os.path.join('pyauto_private', 'media', 'crowd')
# Path under data path for test files.
_TEST_MEDIA_PATH_TULIP = os.path.join('media', 'avperf', 'tulip')
# The media files used for testing.
_TEST_VIDEOS = [os.path.join(_TEST_MEDIA_PATH_CROWD, name) for name in [
'crowd2160.webm', 'crowd1080.webm']]
_TEST_VIDEOS.extend([os.path.join(_TEST_MEDIA_PATH_TULIP, name) for name in [
'tulip2.webm', 'tulip2.wav', 'tulip2.ogv', 'tulip2.ogg', 'tulip2.mp4',
'tulip2.mp3', 'tulip2.m4a']])
class MediaStatsPerfTest(pyauto.PyUITest):
"""PyAuto test container. See file doc string for more information."""
def _GetChromeRendererProcess(self):
"""Returns the Chrome renderer process."""
renderer_id = self.GetBrowserInfo()['windows'][0]['tabs'][1]['renderer_pid']
if not renderer_id:
self.fail('Can not find the tab renderer process.')
return psutil.Process(renderer_id)
def testMediaPerformance(self):
"""Launches HTML test which plays each video and records statistics."""
for file_name in _TEST_VIDEOS:
# Append a tab and delete it at the end of the test to free its memory.
self.AppendTab(pyauto.GURL(self.GetFileURLForDataPath(_TEST_HTML_PATH)))
file_url = self.GetFileURLForDataPath(file_name)
logging.debug('Running perf test for %s.', file_url)
renderer_process = self._GetChromeRendererProcess()
# Call to set a starting time to record CPU usage by the renderer.
renderer_process.get_cpu_percent()
self.assertTrue(
self.CallJavascriptFunc('startTest', [file_url], tab_index=1))
cpu_usage = renderer_process.get_cpu_percent()
mem_usage_mb = renderer_process.get_memory_info()[0] / 1024
file_name = os.path.basename(file_name)
pyauto_utils.PrintPerfResult('cpu', file_name, cpu_usage, '%')
pyauto_utils.PrintPerfResult('memory', file_name, mem_usage_mb, 'KB')
decoded_fps = [
float(value) for value in
self.GetDOMValue("decodedFPS.join(',')", tab_index=1).split(',')]
dropped_frames = self.GetDOMValue('droppedFrames', tab_index=1)
dropped_fps = [
float(value) for value in
self.GetDOMValue("droppedFPS.join(',')", tab_index=1).split(',')]
pyauto_utils.PrintPerfResult('fps', file_name, decoded_fps, 'fps')
pyauto_utils.PrintPerfResult('dropped_fps', file_name, dropped_fps, 'fps')
pyauto_utils.PrintPerfResult('dropped_frames', file_name, dropped_frames,
'frames')
self.CloseTab(tab_index=1)
if __name__ == '__main__':
pyauto_media.Main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.