ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b404ca972ee32f1417ba60efb542b613167b8334 | from itertools import combinations
class MotionSimulator:
def main(self, coord):
coord = [list(map(lambda x: int(x.strip()[2:]), x[1:-1].split(','))) for x in coord.strip().split("\n")]
velocity = [[0, 0, 0] for _ in range(len(coord))]
for _ in range(1000):
# update velocity
for i, j in combinations(range(len(coord)), 2):
for k in range(3):
step = (coord[j][k] - coord[i][k]) and (1, -1)[(coord[j][k] - coord[i][k]) < 0]
velocity[i][k] += step
velocity[j][k] -= step
# update position
coord = [[a+b for a, b in zip(x, velocity[i])] for i, x in enumerate(coord)]
# compute total energy
return sum([sum(map(abs, coord[i])) * sum(map(abs, velocity[i])) for i in range(len(coord))])
if __name__ == "__main__":
with open("input.txt", "r") as f:
coord = f.read()
print(MotionSimulator().main(coord))
|
py | b404caa143c3b60337599528cec11a3959503c87 | from django.conf import *
def event_processor(event, hint):
# skip specified exceptions
if event.get('exception', {}).get('values', [{}])[-1].get('type') in [
# 'Http404', 'NotAuthenticated', 'AuthenticationFailed', 'NotFound', 'XMLSyntaxError',
# 'FileUpload.DoesNotExist',
'Forbidden', 'KeyboardInterrupt',
'LabelStudioErrorSentryIgnored', 'LabelStudioAPIExceptionSentryIgnored',
'LabelStudioValidationErrorSentryIgnored'
]:
return None
# special flag inside of logger.error(..., extra={'sentry_skip': True}) to skip error message
if event.get('extra', {}).get('sentry_skip', False):
return None
# skip transactions by urls
if event.get("transaction") in [
'/static/{path}', '/dm/{path}', '/react-app/{path}', '/label-studio-frontend/{path}', '/favicon.ico',
'/health'
]:
return None
return event # to return all other events
def init_sentry(release_name, release_version):
if settings.SENTRY_DSN:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
if settings.SENTRY_REDIS_ENABLED:
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.rq import RqIntegration
advanced = [RedisIntegration(), RqIntegration()]
else:
advanced = []
# define the event processor, this runs before before_send if enabled
sentry_sdk.scope.add_global_event_processor(event_processor)
sentry_sdk.init(
dsn=settings.SENTRY_DSN,
integrations=[DjangoIntegration()] + advanced,
traces_sample_rate=settings.SENTRY_RATE,
send_default_pii=True,
environment=settings.SENTRY_ENVIRONMENT,
release=release_name + '@' + str(release_version)
)
|
py | b404cb35139ea1710f07c71e26691a1de80581ff | # pylint:disable-msg=C0103
"""ho ho ho"""
__revision__ = 'toto'
import sys
e = 1
e2 = 'yo'
e3 = None
try:
raise e, 'toto'
except Exception, ex:
print ex
_, _, tb = sys.exc_info()
raise e2
def func():
"""bla bla bla"""
raise e3
def reraise():
"""reraise a catched exception instance"""
try:
raise Exception()
except Exception, exc:
print exc
raise exc
raise e3
|
py | b404cbec002e896c982f450200d0a0ab229c5ebd | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python37_app]
from datetime import datetime
from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import url_for
from scraper import RedditScraper
import random
import numpy as np
import matplotlib.pyplot as plt
import base64
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
app = Flask(__name__)
@app.route('/update_time')
def update_time():
with open('time.txt', 'w') as outfile:
outfile.write(str(datetime.now()))
return 'success'
@app.route('/time')
def time():
with open('time.txt') as infile:
time = infile.read()
return time
@app.route('/', methods=['GET','POST'])
def index():
"""Return a friendly HTTP greeting."""
# has_user_submitted = ping storage DB to see if called for this week, need to send user?
# has_user_submitted = False
# if has_user_submitted:
# return redirect(url_for('already_submitted'))
image_url = reddit_scraper.get_top_meme()
# return reddit_scraper.get_top_meme()
return render_template('index.html', image_url=image_url)
@app.route('/submitted', methods=['GET','POST'])
def submitted():
if request.method == 'POST':
comment = request.form['comment']
print(comment)
return render_template('submitted.html')
@app.route('/voting', methods=['GET','POST'])
def voting():
# has_user_submitted = ping storage DB to see if called for this week, need to send user?
# has_user_submitted = False
# if has_user_submitted:
# return redirect(url_for('already_submitted'))
comments = ['1','2','3','4','5','6'] # connect with sherif
image_url = reddit_scraper.get_top_meme()
return render_template('voting.html', comments=comments, image_url=image_url)
@app.route('/voted', methods=['GET','POST'])
def voted():
# has_user_submitted = ping storage DB to see if called for this week, need to send user?
# has_user_submitted = False
# if has_user_submitted:
# return redirect(url_for('already_submitted'))
print(request.form)
if request.method == 'POST':
best = request.form['best']
comments = []
for i in range(1, 7):
comment = request.form.get(f'bots{i}', None)
if comment is not None:
comments.append(comment)
print(comments)
return render_template('voted.html')
@app.route('/results')
def results():
count = 500
xScale = np.linspace(0, 100, count)
yScale = np.random.randn(count)
graph_file = 'score_history.png'
plt.clf()
plt.scatter(xScale,yScale)
plt.savefig(graph_file)
with open(graph_file, 'rb') as infile:
string = base64.b64encode(infile.read()).decode("utf-8")
# graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('results.html', plot=string)
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
reddit_scraper = RedditScraper()
app.run(host='127.0.0.1', port=8080, debug=True)
# [END gae_python37_app]
|
py | b404ccc7a1281179293365aaa6b5bdafdb400d9c | # -*- coding: utf-8 -*-
#
# MINST Mlops 2022 documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"MINST Mlops 2022"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "minist-mlops-2022doc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"minist-mlops-2022.tex",
u"MINST Mlops 2022 Documentation",
u"Abdulstar Kousa",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"minist-mlops-2022",
u"MINST Mlops 2022 Documentation",
[u"Abdulstar Kousa"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"minist-mlops-2022",
u"MINST Mlops 2022 Documentation",
u"Abdulstar Kousa",
"MINST Mlops 2022",
"MINST Mlops 2022",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | b404cd68207a057bf79b2e1c4fb1bedd0a6fc7ee | import pytest
from arraystack import Stack
def test_lifo():
stack = Stack()
stack.push(5)
stack.push(6)
assert stack.pop() == 6
assert stack.pop() == 5
def test_push_pop_empty():
stack = Stack()
assert stack.empty()
stack.push(5)
assert stack.pop() == 5
assert stack.empty()
def test_pop():
stack = Stack()
with pytest.raises(TypeError) as e_info:
stack.pop()
def test_capacity():
# Create a stack that can hold a single element only.
stack = Stack(1)
assert stack.empty()
stack.push(5)
# Attempting to push an additional item should fail.
with pytest.raises(TypeError) as e_info:
stack.push(6)
|
py | b404ce22a92fbc12abd03ad49a1f9cbd76cdcb00 | from unittest.mock import MagicMock
import pytest
import trio
from elro.device import create_device_from_data, WindowSensor, AlarmSensor, DeviceType
from elro.command import Command
@pytest.fixture
def update_data():
data = {"data": {"cmdId": Command.DEVICE_STATUS_UPDATE.value,
"device_name": DeviceType.DOOR_WINDOW_SENSOR.value,
"device_ID": "vader",
"device_status": " 2AAA "}}
return data
@pytest.fixture
def device(update_data):
device = create_device_from_data(update_data)
return device
@pytest.fixture
def alarm_device():
data = {"data": {"cmdId": Command.DEVICE_STATUS_UPDATE.value,
"device_name": DeviceType.CO_ALARM.value,
"device_ID": "vader",
"device_status": " 2AAA "}}
device = create_device_from_data(data)
return device
def test_factory_creates_the_right_type_for_window_sensor():
data = {"data": {"cmdId": Command.DEVICE_STATUS_UPDATE.value,
"device_name": DeviceType.DOOR_WINDOW_SENSOR.value,
"device_ID": "vader",
"device_status": " 2AAA "}}
device = create_device_from_data(data)
assert isinstance(device, WindowSensor)
def test_factory_creates_the_right_type_for_water_sensor():
data = {"data": {"cmdId": Command.DEVICE_STATUS_UPDATE.value,
"device_name": DeviceType.WATER_ALARM.value,
"device_ID": "vader",
"device_status": " 2AAA "}}
device = create_device_from_data(data)
assert isinstance(device, AlarmSensor)
def test_factory_creates_the_right_type_for_co_sensor():
data = {"data": {"cmdId": Command.DEVICE_STATUS_UPDATE.value,
"device_name": DeviceType.CO_ALARM.value,
"device_ID": "vader",
"device_status": " 2AAA "}}
device = create_device_from_data(data)
assert isinstance(device, AlarmSensor)
def test_factory_creates_the_right_type_for_heat_sensor():
data = {"data": {"cmdId": Command.DEVICE_STATUS_UPDATE.value,
"device_name": DeviceType.HEAT_ALARM.value,
"device_ID": "vader",
"device_status": " 2AAA "}}
device = create_device_from_data(data)
assert isinstance(device, AlarmSensor)
def test_factory_creates_the_right_type_for_fire_sensor():
data = {"data": {"cmdId": Command.DEVICE_STATUS_UPDATE.value,
"device_name": DeviceType.FIRE_ALARM.value,
"device_ID": "vader",
"device_status": " 2AAA "}}
device = create_device_from_data(data)
assert isinstance(device, AlarmSensor)
def test_calling_update_fires_updated_event(device, update_data):
event_set = MagicMock()
device.updated.set = event_set
device.update(update_data)
event_set.assert_called_once_with()
assert isinstance(device.updated, trio.Event)
assert device.updated.is_set() is False
def test_setting_device_state_fires_updated_event(device, update_data):
event_set = MagicMock()
device.updated.set = event_set
device.device_state = "anakin"
event_set.assert_called_once_with()
def test_setting_battery_level_fires_updated_event(device, update_data):
event_set = MagicMock()
device.updated.set = event_set
device.battery_level = 42
event_set.assert_called_once_with()
def test_update_window_sensor_to_open_sets_correct_state(device, update_data):
device.device_state = "leia"
update_data['data']['device_status'] = ' 2A55 '
device.update_specifics(update_data)
assert device.device_state == "Open"
def test_update_window_sensor_to_closed_sets_correct_state(device, update_data):
device.device_state = "leia"
update_data['data']['device_status'] = ' 2AAA '
device.update_specifics(update_data)
assert device.device_state == "Closed"
def test_update_alarm_sensor_to_normal_sets_correct_state(alarm_device, update_data):
alarm_device.device_state = 'leia'
update_data['data']['device_status'] = ' 2AAA '
alarm_device.update_specifics(update_data)
assert alarm_device.device_state == "Normal"
def test_update_alarm_sensor_to_alarm_sets_correct_state(alarm_device, update_data):
alarm_device.device_state = 'leia'
update_data['data']['device_status'] = ' 2ABB '
alarm_device.update_specifics(update_data)
assert alarm_device.device_state == "Test Alarm"
|
py | b404ceb6052fe9fdeab1e86ffd0a2468765685a9 | import requests, html
print(html.parse("<body>"))
html = requests.get("https://example.com/") ## bad: overwrites imported package name
print(html)
|
py | b404cf01493c765a9aacf1fdf448368308fd0010 | import asyncio
import sys
from motor import motor_asyncio
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
from KilluaRobot import MONGO_DB_URI
from KilluaRobot.configure import get_int_key, get_str_key
MONGO_PORT = get_int_key("27017")
MONGO_DB_URI = get_str_key("MONGO_DB_URI")
MONGO_DB = "KilluaRobot"
client = MongoClient()
client = MongoClient(MONGO_DB_URI, MONGO_PORT)[MONGO_DB]
motor = motor_asyncio.AsyncIOMotorClient(MONGO_DB_URI, MONGO_PORT)
db = motor[MONGO_DB]
db = client["emiexrobot"]
try:
asyncio.get_event_loop().run_until_complete(motor.server_info())
except ServerSelectionTimeoutError:
sys.exit(log.critical("Can't connect to mongodb! Exiting..."))
|
py | b404d038cdc48398b2cc077e434cc22532fad758 | from opentrons import types
metadata = {
'protocolName': 'Testosaur',
'author': 'Opentrons <[email protected]>',
'description': 'A variant on "Dinosaur" for testing',
'source': 'Opentrons Repository'
}
def run(ctx):
ctx.home()
tr = ctx.load_labware('opentrons_96_tiprack_300ul', 1)
right = ctx.load_instrument('p300_single', types.Mount.RIGHT, [tr])
lw = ctx.load_labware('corning_96_wellplate_360ul_flat', 2)
right.pick_up_tip()
right.aspirate(10, lw.wells()[0].bottom())
right.dispense(10, lw.wells()[1].bottom())
right.drop_tip(tr.wells()[-1].top())
|
py | b404d0fd412c40111722693e60fc92b6a3c7a52b | #//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2013 NVIDIA Corporation
#// Copyright 2019-2020 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
from typing import List, Dict
import cocotb
from cocotb.triggers import Timer
from .uvm_report_object import UVMReportObject
from .uvm_object_globals import (UVM_NONE, UVM_MEDIUM, UVM_PHASE_DONE, UVM_INFO,
UVM_CHECK_FIELDS, UVM_PHASE_SCHEDULE, UVM_WARNING, UVM_ERROR, UVM_FATAL)
from .uvm_common_phases import UVMBuildPhase
from .uvm_domain import UVMDomain
from .uvm_debug import uvm_debug
from .uvm_object import UVMObject
from .uvm_queue import UVMQueue
from .uvm_pool import UVMEventPool
from .sv import sv, uvm_split_string
from ..macros import uvm_info, uvm_fatal, uvm_warning, uvm_error
from .uvm_recorder import UVMRecorder
from .uvm_globals import (uvm_is_match, uvm_string_to_action,
uvm_string_to_severity, uvm_zero_delay)
from .uvm_factory import UVMObjectWrapper
from .uvm_links import (UVMRelatedLink, UVMParentChildLink)
INV_WARN1 = ("+uvm_set_action requires 4 arguments, but %0d given for command "
+ "+uvm_set_action=%s, Usage: +uvm_set_action=<comp>,<id>,<severity>,<action[|action]>")
INV_WARN2 = ("Bad severity argument \"%s\" given to command +uvm_set_action=%s,"
+ "Usage: +uvm_set_action=<comp>,<id>,<severity>,<action[|action]>")
INV_WARN3 = ("Bad action argument \"%s\" given to command +uvm_set_action=%s, "
+ "Usage: +uvm_set_action=<comp>,<id>,<severity>,<action[|action]>")
INV_WARN4 = ("+uvm_set_severity requires 4 arguments, but %0d given for command "
+ "+uvm_set_severity=%s, Usage: +uvm_set_severity=<comp>,<id>,<orig_severity>,<new_severity>")
INV_WARN5 = ("Bad severity argument \"%s\" given to command +uvm_set_severity=%s,"
+ " Usage: +uvm_set_severity=<comp>,<id>,<orig_severity>,<new_severity>")
INV_WARN6 = ("Bad severity argument \"%s\" given to command +uvm_set_severity=%s, "
+ "Usage: +uvm_set_severity=<comp>,<id>,<orig_severity>,<new_severity>")
class VerbositySetting:
"""
The verbosity settings may have a specific phase to start at.
We will do this work in the phase_started callback.
:ivar UVMComponent comp: Component related to the settings
:ivar UVMPhase phase: Phase in which settings apply.
:ivar int id: ID for verbosity setting
:ivar int offset: Time offset for verbosity setting
:ivar int verbosity: Verbosity level of the setting.
"""
def __init__(self):
self.comp = ""
self.phase = ""
self.id = ""
self.offset = 0
self.verbosity = UVM_MEDIUM
def convert2string(self):
return "Comp: {}, phase: {}, ID: {}, offset: {}, verb: {}".format(
self.comp, self.phase, self.id, self.offset, self.verbosity)
CLONE_ERR = ("Attempting to clone '%s'. Clone cannot be called on a uvm_component. "
+ " The clone target variable will be set to None.")
class uvm_cmdline_parsed_arg_t:
"""
Class used to return arguments and associated data parsed from cmd line
arguments. These are usually plusargs given with +ARG_NAME=ARG_VALUE
:ivar str arg: Argument name
:ivar list args: Values of given argument.
:ivar int used: Logs how many times arg is used.
"""
def __init__(self):
self.arg = ""
self.args = []
self.used = 0
class UVMComponent(UVMReportObject):
"""
Base class for defining UVM components
The uvm_component class is the root base class for UVM components. In
addition to the features inherited from `UVMObject` and `UVMReportObject`,
uvm_component provides the following interfaces:
Hierarchy:
provides methods for searching and traversing the component hierarchy.
Phasing
defines a phased test flow that all components follow, with a
group of standard phase methods and an API for custom phases and
multiple independent phasing domains to mirror DUT behavior e.g. power
Reporting
provides a convenience interface to the `UVMReportHandler`. All
messages, warnings, and errors are processed through this interface.
Transaction recording
provides methods for recording the transactions
produced or consumed by the component to a transaction database (vendor
specific).
Factory
provides a convenience interface to the `UVMFactory`. The factory
is used to create new components and other objects based on type-wide and
instance-specific configuration.
The `UVMComponent` is automatically seeded during construction using UVM
seeding, if enabled. All other objects must be manually reseeded, if
appropriate. See `UVMObject.reseed` for more information.
Most local methods within the class are prefixed with m_, indicating they are
not user-level methods.
:cvar bool print_config_matches: Setting this static variable causes
`UVMConfigDb.get` to print info about
matching configuration settings as they are being applied.
:ivar UVMTrDatabase tr_database: Specifies the `UVMTrDatabase` object to use
for `begin_tr` and other methods in the <Recording Interface>.
Default is `UVMCoreService.get_default_tr_database`.
"""
print_config_matches = False
m_time_settings: List[VerbositySetting] = []
def __init__(self, name, parent):
"""
Creates a new component with the given leaf instance `name` and handle
to its `parent`. If the component is a top-level component (i.e. it is
created in a static module or interface), `parent` should be `None`.
The component will be inserted as a child of the `parent` object, if any.
If `parent` already has a child by the given `name`, an error is produced.
If `parent` is `None`, then the component will become a child of the
implicit top-level component, `uvm_top`.
All classes derived from uvm_component must call super.new(name,parent).
Args:
name (str): Name of the component.
parent (UVMComponent): Parent component.
"""
super().__init__(name)
self.m_children: Dict[str, 'UVMComponent'] = {} # uvm_component[string]
#// Variable: print_enabled
#//
#// This bit determines if this component should automatically be printed as a
#// child of its parent object.
#//
#// By default, all children are printed. However, this bit allows a parent
#// component to disable the printing of specific children.
self.print_enabled = True
self.m_current_phase = None # the most recently executed phase
self.m_parent = None
self.m_children_by_handle: Dict['UVMComponent', 'UVMComponent'] = {}
self.m_children_ordered: List['UVMComponent'] = []
self.m_build_done = False
self.m_phasing_active = 0
self.recording_detail = UVM_NONE
self.m_name = ""
self.m_verbosity_settings = []
self.m_main_stream = None
# functors to override uvm_root defaults
self.m_phase_imps = {} # uvm_phase[uvm_phase]
self.child_ptr = -1
self.m_tr_h = {} # uvm_recorder m_tr_h[uvm_transaction]
self.m_run_process = None
self.tr_database = None # uvm_tr_database
self.m_domain = None
self.m_phase_process = None # process
self.event_pool: UVMEventPool = UVMEventPool("evt_pool")
self.m_streams = {} # uvm_tr_stream [string][string]
# If uvm_top, reset name to "" so it doesn't show in full paths then return
if parent is None and name == "__top__":
self.set_name("")
#UVMReportObject.set_name(self, name)
#self.m_name = self.get_name()
return
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
top = cs.get_root()
# Check that we're not in or past end_of_elaboration
common = UVMDomain.get_common_domain()
bld = common.find(UVMBuildPhase.get())
if bld is None:
uvm_fatal("COMP/INTERNAL", "attempt to find build phase object failed")
if bld.get_state() == UVM_PHASE_DONE:
parent_name = top.get_full_name()
if parent is not None:
parent_name = parent.get_full_name()
uvm_fatal("ILLCRT", ("It is illegal to create a component ('" +
name + "' under '" + parent_name + "') after the build phase has ended."))
if name == "":
name = "COMP_" + str(UVMObject.m_inst_count)
if parent == self:
uvm_fatal("THISPARENT",
"cannot set the parent of a component to itself")
if parent is None:
parent = top
if self.uvm_report_enabled(UVM_MEDIUM+1, UVM_INFO, "NEWCOMP"):
nname = parent.get_full_name()
if parent == top:
nname = "uvm_top"
uvm_info("NEWCOMP", "Creating " + nname + "." + name, UVM_MEDIUM+1)
if parent.has_child(name) and self != parent.get_child(name):
if parent == top:
error_str = ("Name '" + name + "' is not unique to other top-level "
+ "instances. If parent is a module, build a unique name by combining the "
+ "the module name and component name: $sformatf('%m.%s','"
+ name + "').")
uvm_fatal("CLDEXT", error_str)
else:
uvm_fatal("CLDEXT",
sv.sformatf("Cannot set '%s' as a child of '%s', %s",
name, parent.get_full_name(), "which already has a child by that name."))
return
self.m_parent = parent
self.set_name(name)
if self.m_parent.m_add_child(self) is False:
self.m_parent = None
self.m_domain = parent.m_domain # by default, inherit domains from parents
# Now that inst name is established, reseed (if use_uvm_seeding is set)
self.reseed()
# Do local configuration settings
arr = []
from .uvm_config_db import UVMConfigDb
if UVMConfigDb.get(self, "", "recording_detail", arr):
self.recording_detail = arr[0]
self.m_rh.set_name(self.get_full_name())
self.set_report_verbosity_level(parent.get_report_verbosity_level())
self.m_set_cl_msg_args()
"""
Group: Hierarchy Interface
These methods provide user access to information about the component
hierarchy, i.e., topology.
"""
def get_parent(self):
"""
Function: get_parent
Returns a handle to this component's parent, or `None` if it has no parent.
Returns:
UVMComponent: Parent of this component.
"""
return self.m_parent
def get_full_name(self) -> str:
"""
Returns the full hierarchical name of this object. The default
implementation concatenates the hierarchical name of the parent, if any,
with the leaf name of this object, as given by `UVMObject.get_name`.
Returns:
str: Full hierarchical name of this component.
"""
if self.m_name == "":
return self.get_name()
else:
return self.m_name
def get_children(self, children):
"""
This function populates the end of the `children` array with the
list of this component's children.
.. code-block:: python
array = []
my_comp.get_children(array)
for comp in array:
do_something(comp)
Args:
children (list): List into which child components are appended
"""
for name in self.m_children:
children.append(self.m_children[name])
def get_child(self, name):
"""
Args:
name (str): Name of desired child
Returns:
UVMComponent: Child component matching name.
"""
if name in self.m_children:
return self.m_children[name]
def get_next_child(self):
"""
Returns:
UVMComponent: Next child component.
"""
self.child_ptr += 1
if (self.child_ptr < len(self.m_children_ordered)):
return self.m_children_ordered[self.child_ptr]
return None
def get_first_child(self):
"""
These methods are used to iterate through this component's children, if
any. For example, given a component with an object handle, `comp`, the
following code calls `UVMObject.print` for each child:
.. code-block:: python
name = ""
child = comp.get_first_child()
while child is not None:
child.print()
child = comp.get_next_child()
Returns:
UVMComponent: First child component.
"""
self.child_ptr = 0
if len(self.m_children_ordered) > 0:
return self.m_children_ordered[0]
return None
def get_num_children(self):
"""
Returns:
int: The number of this component's children.
"""
return len(self.m_children_ordered)
def has_child(self, name):
"""
Args:
name (str): Desired child component name.
Returns:
bool: True if this component has a child with the given `name`,
False otherwise.
"""
return name in self.m_children
def set_name(self, name):
"""
Renames this component to `name` and recalculates all descendants'
full names. This is an internal function for now.
Args:
name (str): New name
"""
if self.m_name != "":
uvm_error("INVSTNM", ("It is illegal to change the name of a component."
+ "The component name will not be changed to \"{}\"".format(name)))
return
super().set_name(name)
self.m_set_full_name()
def lookup(self, name):
"""
Looks for a component with the given hierarchical `name` relative to this
component. If the given `name` is preceded with a '.' (dot), then the search
begins relative to the top level (absolute lookup). The handle of the
matching component is returned, else `None`. The name must not contain
wildcards.
Args:
name:
Returns:
UVMComponent: Matching component, or None is no match is found.
"""
leaf = ""
remainder = ""
comp = None
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
top = cs.get_root()
comp = self
[leaf, remainder] = self.m_extract_name(name, leaf, remainder)
if leaf == "":
comp = top # absolute lookup
[leaf, remainder] = self.m_extract_name(remainder, leaf, remainder)
if comp.has_child(leaf) is False:
uvm_warning("Lookup Error",
sv.sformatf("Cannot find child %0s from comp %s",leaf,
comp.get_name()))
return None
if remainder != "":
return comp.m_children[leaf].lookup(remainder)
return comp.m_children[leaf]
def get_depth(self) -> int:
"""
Returns the component's depth from the root level. uvm_top has a
depth of 0. The test and any other top level components have a depth
of 1, and so on.
Returns:
int: The component's depth from the root level
"""
if self.m_name == "":
return 0
get_depth = 1
for i in range(len(self.m_name)):
if (self.m_name[i] == "."):
get_depth += 1
return get_depth
#----------------------------------------------------------------------------
# Group: Phasing Interface
#----------------------------------------------------------------------------
#
# These methods implement an interface which allows all components to step
# through a standard schedule of phases, or a customized schedule, and
# also an API to allow independent phase domains which can jump like state
# machines to reflect behavior e.g. power domains on the DUT in different
# portions of the testbench. The phase tasks and functions are the phase
# name with the _phase suffix. For example, the build phase function is
# <build_phase>.
#
# All processes associated with a task-based phase are killed when the phase
# ends. See <uvm_task_phase> for more details.
#----------------------------------------------------------------------------
def build_phase(self, phase):
"""
The `UVMBuildPhase` phase implementation method.
Any override should call super().build_phase(phase) to execute the automatic
configuration of fields registered in the component by calling
`apply_config_settings`.
To turn off automatic configuration for a component,
do not call super().build_phase(phase).
This method should never be called directly.
Args:
phase (UVMPhase):
"""
self.m_build_done = True
self.build()
def build(self):
"""
For backward compatibility the base `build_phase` method calls `build`.
"""
self.m_build_done = True
self.apply_config_settings(UVMComponent.print_config_matches)
if self.m_phasing_active == 0:
uvm_warning("UVM_DEPRECATED",
"build()/build_phase() has been called explicitly")
def connect_phase(self, phase):
"""
The `UVMConnectPhase` phase implementation method.
This method should never be called directly.
Args:
phase (UVMPhase):
"""
pass
# For backward compatibility the base connect_phase method calls connect.
# extern virtual function void connect()
def end_of_elaboration_phase(self, phase):
"""
The `UVMEndOfElaborationPhase` phase implementation method.
This method should never be called directly.
Args:
phase (UVMPhase):
"""
pass
# For backward compatibility the base <end_of_elaboration_phase> method calls <end_of_elaboration>.
# extern virtual function void end_of_elaboration()
def start_of_simulation_phase(self, phase):
"""
The `UVMStartOfSimulationPhase` phase implementation method.
This method should never be called directly.
Args:
phase (UVMPhase):
"""
self.start_of_simulation()
return
def start_of_simulation(self):
"""
For backward compatibility the base `start_of_simulation_phase` method calls `start_of_simulation`.
extern virtual function void start_of_simulation()
"""
return
async def run_phase(self, phase):
"""
Task: run_phase
The `UVMRunPhase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
Thus the phase will automatically
end once all objections are dropped using ~phase.drop_objection()~.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
The run_phase task should never be called directly.
"""
uvm_debug(self, 'run_phase', self.get_name() + ' yielding self.run()')
# self.m_run_process = cocotb.fork(self.run())
# yield self.m_run_process
await self.run()
# For backward compatibility the base <run_phase> method calls <run>.
# extern virtual task run()
async def run(self):
uvm_debug(self, 'run', self.get_name() + ' yield Timer(0) in self.run()')
await uvm_zero_delay()
async def pre_reset_phase(self, phase):
"""
Task: pre_reset_phase
The `uvm_pre_reset_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def reset_phase(self, phase):
"""
Task: reset_phase
The `uvm_reset_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def post_reset_phase(self, phase):
"""
Task: post_reset_phase
The `uvm_post_reset_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def pre_configure_phase(self, phase):
"""
Task: pre_configure_phase
The `uvm_pre_configure_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def configure_phase(self, phase):
"""
Task: configure_phase
The `uvm_configure_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def post_configure_phase(self, phase):
"""
Task: post_configure_phase
The `uvm_post_configure_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def pre_main_phase(self, phase):
"""
Task: pre_main_phase
The `uvm_pre_main_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def main_phase(self, phase):
"""
Task: main_phase
The `uvm_main_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def post_main_phase(self, phase):
"""
Task: post_main_phase
The `uvm_post_main_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def pre_shutdown_phase(self, phase):
"""
Task: pre_shutdown_phase
The `uvm_pre_shutdown_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def shutdown_phase(self, phase):
"""
Task: shutdown_phase
The `uvm_shutdown_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
async def post_shutdown_phase(self, phase):
"""
Task: post_shutdown_phase
The `uvm_post_shutdown_phase` phase implementation method.
This task returning or not does not indicate the end
or persistence of this phase.
It is necessary to raise an objection
using ~phase.raise_objection()~ to cause the phase to persist.
Once all components have dropped their respective objection
using ~phase.drop_objection()~, or if no components raises an
objection, the phase is ended.
Any processes forked by this task continue to run
after the task returns,
but they will be killed once the phase ends.
This method should not be called directly.
Args:
phase:
"""
await uvm_zero_delay()
def extract_phase(self, phase):
"""
The `UVMExtractPhase` phase implementation method.
This method should never be called directly.
Args:
phase:
"""
pass
#// For backward compatibility the base extract_phase method calls extract.
#extern virtual function void extract()
def check_phase(self, phase):
"""
The `UVMCheckPhase` phase implementation method.
This method should never be called directly.
Args:
phase:
"""
pass
#// For backward compatibility the base check_phase method calls check.
#extern virtual function void check()
def report_phase(self, phase):
"""
The `UVMReportPhase` phase implementation method.
This method should never be called directly.
Args:
phase:
"""
pass
def final_phase(self, phase):
"""
The `UVMFinalPhase` phase implementation method.
This method should never be called directly.
Args:
phase:
"""
# self.m_rh._close_files()
pass
def phase_started(self, phase):
"""
Invoked at the start of each phase. The `phase` argument specifies
the phase being started. Any threads spawned in this callback are
not affected when the phase ends.
Args:
phase:
"""
pass
def phase_ended(self, phase):
"""
Invoked at the end of each phase. The `phase` argument specifies
the phase that is ending. Any threads spawned in this callback are
not affected when the phase ends.
Args:
phase:
"""
pass
def phase_ready_to_end(self, phase):
"""
Function: phase_ready_to_end
Invoked when all objections to ending the given `phase` and all
sibling phases have been dropped, thus indicating that `phase` is
ready to begin a clean exit. Sibling phases are any phases that
have a common successor phase in the schedule plus any phases that
sync'd to the current phase. Components needing to consume delta
cycles or advance time to perform a clean exit from the phase
may raise the phase's objection.
.. code-block:: python
phase.raise_objection(self, "Reason")
It is the responsibility of this component to drop the objection
once it is ready for this phase to end (and processes killed).
If no objection to the given `phase` or sibling phases are raised,
then phase_ended() is called after a delta cycle. If any objection
is raised, then when all objections to ending the given `phase`
and siblings are dropped, another iteration of phase_ready_to_end
is called. To prevent endless iterations due to coding error,
after 20 iterations, phase_ended() is called regardless of whether
previous iteration had any objections raised.
"""
pass
#//--------------------------------------------------------------------
#// phase / schedule / domain API
#//--------------------------------------------------------------------
def set_domain(self, domain, hier=True):
"""
Apply a phase domain to this component and, if `hier` is set,
recursively to all its children.
Calls the virtual `define_domain` method, which derived components can
override to augment or replace the domain definition of its base class.
Assigns this component [tree] to a domain. adds required schedules into graph
If called from build, `hier` won't recurse into all chilren (which don't exist yet)
If we have components inherit their parent's domain by default, then `hier`
isn't needed and we need a way to prevent children from inheriting this component's domain
Args:
domain:
hier:
"""
# build and store the custom domain
self.m_domain = domain
self.define_domain(domain)
if hier is True:
for c in self.m_children:
self.m_children[c].set_domain(domain)
def get_domain(self):
"""
Return handle to the phase domain set on this component
Returns:
"""
return self.m_domain
def define_domain(self, domain):
"""
Builds custom phase schedules into the provided `domain` handle.
This method is called by `set_domain`, which integrators use to specify
this component belongs in a domain apart from the default 'uvm' domain.
Custom component base classes requiring a custom phasing schedule can
augment or replace the domain definition they inherit by overriding
their `defined_domain`. To augment, overrides would call super.define_domain().
To replace, overrides would not call super.define_domain().
The default implementation adds a copy of the `uvm` phasing schedule to
the given `domain`, if one doesn't already exist, and only if the domain
is currently empty.
Calling `set_domain`
with the default `uvm` domain (i.e. <uvm_domain::get_uvm_domain> ) on
a component with no `define_domain` override effectively reverts the
that component to using the default `uvm` domain. This may be useful
if a branch of the testbench hierarchy defines a custom domain, but
some child sub-branch should remain in the default `uvm` domain,
call `set_domain` with a new domain instance handle with `hier` set.
Then, in the sub-branch, call `set_domain` with the default `uvm` domain handle,
obtained via <uvm_domain::get_uvm_domain>.
Alternatively, the integrator may define the graph in a new domain externally,
then call `set_domain` to apply it to a component.
Args:
domain:
"""
from .uvm_phase import UVMPhase
from .uvm_common_phases import UVMRunPhase
schedule = None # uvm_phase
# //schedule = domain.find(uvm_domain::get_uvm_schedule())
schedule = domain.find_by_name("uvm_sched")
if schedule is None:
# uvm_domain common
schedule = UVMPhase("uvm_sched", UVM_PHASE_SCHEDULE)
UVMDomain.add_uvm_phases(schedule)
domain.add(schedule)
common = UVMDomain.get_common_domain()
if common.find(domain,0) is None:
common.add(domain, with_phase=UVMRunPhase.get())
def set_phase_imp(self, phase, imp, hier=1):
"""
Override the default implementation for a phase on this component (tree) with a
custom one, which must be created as a singleton object extending the default
one and implementing required behavior in exec and traverse methods
The `hier` specifies whether to apply the custom functor to the whole tree or
just this component.
Args:
phase:
imp:
hier:
"""
self.m_phase_imps[phase] = imp
if hier:
for c in self.m_children:
self.m_children[c].set_phase_imp(phase,imp,hier)
#// Task: suspend
#//
#// Suspend this component.
#//
#// This method must be implemented by the user to suspend the
#// component according to the protocol and functionality it implements.
#// A suspended component can be subsequently resumed using <resume()>.
#extern virtual task suspend ()
#// Task: resume
#//
#// Resume this component.
#//
#// This method must be implemented by the user to resume a component
#// that was previously suspended using <suspend()>.
#// Some component may start in the suspended state and
#// may need to be explicitly resumed.
#extern virtual task resume ()
def resolve_bindings(self) -> None:
"""
Processes all port, export, and imp connections. Checks whether each port's
min and max connection requirements are met.
It is called just before the end_of_elaboration phase.
Users should not call directly.
"""
return
#extern function string massage_scope(string scope)
def massage_scope(self, scope: str) -> str:
# uvm_top
if scope == "":
return "^$"
if scope == "*":
return self.get_full_name() + ".*"
# absolute path to the top-level test
if(scope == "uvm_test_top"):
return "uvm_test_top"
# absolute path to uvm_root
if(scope[0] == "."):
return self.get_full_name() + scope
return self.get_full_name() + "." + scope
#//----------------------------------------------------------------------------
#// Group: Configuration Interface
#//----------------------------------------------------------------------------
#//
#// Components can be designed to be user-configurable in terms of its
#// topology (the type and number of children it has), mode of operation, and
#// run-time parameters (knobs). The configuration interface accommodates
#// this common need, allowing component composition and state to be modified
#// without having to derive new classes or new class hierarchies for
#// every configuration scenario.
#//
#//----------------------------------------------------------------------------
def check_config_usage(self, recurse=1) -> None:
"""
Check all configuration settings in a components configuration table
to determine if the setting has been used, overridden or not used.
When `recurse` is 1 (default), configuration for this and all child
components are recursively checked. This function is automatically
called in the check phase, but can be manually called at any time.
To get all configuration information prior to the run phase, do something
like this in your top object:
.. code-block:: python
def start_of_simulation_phase(self, phase):
self.check_config_usage()
Args:
recurse:
"""
from .uvm_resource import UVMResourcePool
rp = UVMResourcePool.get()
rq = rp.find_unused_resources()
if len(rq) == 0:
return
uvm_info("CFGNRD"," ::: The following resources have at least one write and no reads :::",UVM_INFO)
rp.print_resources(rq, 1)
def apply_config_settings(self, verbose=0):
"""
Searches for all config settings matching this component's instance path.
For each match, the appropriate set_*_local method is called using the
matching config setting's field_name and value. Provided the set_*_local
method is implemented, the component property associated with the
field_name is assigned the given value.
This function is called by <uvm_component::build_phase>.
The apply_config_settings method determines all the configuration
settings targeting this component and calls the appropriate set_*_local
method to set each one. To work, you must override one or more set_*_local
methods to accommodate setting of your component's specific properties.
Any properties registered with the optional `uvm_*_field macros do not
require special handling by the set_*_local methods; the macros provide
the set_*_local functionality for you.
If you do not want apply_config_settings to be called for a component,
then the build_phase() method should be overloaded and you should not call
super.build_phase(phase). Likewise, apply_config_settings can be overloaded to
customize automated configuration.
When the `verbose` bit is set, all overrides are printed as they are
applied. If the component's `print_config_matches` property is set, then
apply_config_settings is automatically called with `verbose` = 1.
Args:
verbose (bool): If true, prints more verbose information
"""
from .uvm_resource import UVMResourcePool
rp = UVMResourcePool.get() # uvm_resource_pool
rq = UVMQueue() # uvm_queue#(uvm_resource_base) rq
r = None # uvm_resource_base r
name = ""
search_name = ""
i = 0
j = 0
# populate an internal 'field_array' with list of
# fields declared with `uvm_field macros (checking
# that there aren't any duplicates along the way)
self._m_uvm_field_automation(None, UVM_CHECK_FIELDS, "")
T_cont = UVMObject._m_uvm_status_container
# if no declared fields, nothing to do.
if len(T_cont.field_array) == 0:
return
if verbose:
uvm_info("CFGAPL","applying configuration settings", UVM_NONE)
# // The following is VERY expensive. Needs refactoring. Should
# // get config only for the specific field names in 'field_array'.
# // That's because the resource pool is organized first by field name.
# // Can further optimize by encoding the value for each 'field_array'
# // entry to indicate string, uvm_bitstream_t, or object. That way,
# // we call 'get' for specific fields of specific types rather than
# // the search-and-cast approach here.
rq = rp.lookup_scope(self.get_full_name())
rq = UVMResourcePool.sort_by_precedence(rq)
# // rq is in precedence order now, so we have to go through in reverse
# // order to do the settings.
# for(int i=rq.size()-1; i>=0; --i):
for i in range(len(rq)):
r = rq[i]
name = r.get_name()
# // does name have brackets [] in it?
while j < len(name):
if (name[j] == "[" or name[j] == "."):
break
j += 1
# // If it does have brackets then we'll use the name
# // up to the brackets to search __m_uvm_status_container.field_array
if j < len(name):
search_name = name[0:j]
else:
search_name = name
if search_name not in T_cont.field_array and search_name != "recording_detail":
continue
if verbose:
uvm_info("CFGAPL",sv.sformatf("applying configuration to field %s", name),UVM_NONE)
val = r.read(self)
if isinstance(val, int):
self.set_int_local(name, val)
elif isinstance(val, UVMObject):
self.set_object_local(name, val, 0)
elif isinstance(val, UVMObjectWrapper):
self.set_object_local(name, val.obj, val.clone)
elif isinstance(val, str):
self.set_string_local(name, val)
elif verbose:
uvm_info("CFGAPL", sv.sformatf("field %s has an unsupported type", name), UVM_NONE)
T_cont.field_array.clear()
def print_config_settings(self, field="", comp=None, recurse=False):
"""
Function: print_config_settings
Called without arguments, print_config_settings prints all configuration
information for this component, as set by previous calls to <uvm_config_db::set()>.
The settings are printing in the order of their precedence.
If `field` is specified and non-empty, then only configuration settings
matching that field, if any, are printed. The field may not contain
wildcards.
If `comp` is specified and non-`None`, then the configuration for that
component is printed.
If `recurse` is set, then configuration information for all `comp`'s
children and below are printed as well.
This function has been deprecated. Use print_config instead.
Args:
field (str): Print all config related to given field,
comp (UVMComponent): If given, print config only for that component.
recurse (bool): If true, recurse to all children
"""
UVMComponent.have_been_warned = False
if not UVMComponent.have_been_warned:
uvm_warning("deprecated",
"uvm_component::print_config_settings has been deprecated. Use print_config() instead")
UVMComponent.have_been_warned = True
self.print_config(recurse, 1)
def print_config(self, recurse=False, audit=False) -> None:
"""
Function: print_config
Print_config prints all configuration information for this
component, as set by previous calls to `UVMConfigDb.set` and exports to
the resources pool. The settings are printing in the order of
their precedence.
If `recurse` is set, then configuration information for all
children and below are printed as well.
if `audit` is set then the audit trail for each resource is printed
along with the resource name and value
Args:
recurse (bool): If true, recurse to child components
audit (bool): If true, print audit trail for each resource.
"""
from .uvm_resource import UVMResourcePool
rp = UVMResourcePool.get()
uvm_info("CFGPRT","visible resources:", UVM_INFO)
rp.print_resources(rp.lookup_scope(self.get_full_name()), audit)
if recurse:
for key in self.m_children:
c = self.m_children[key]
c.print_config(recurse, audit)
def print_config_with_audit(self, recurse=0):
"""
Function: print_config_with_audit
Operates the same as print_config except that the audit bit is
forced to 1. This interface makes user code a bit more readable as
it avoids multiple arbitrary bit settings in the argument list.
If `recurse` is set, then configuration information for all
children and below are printed as well.
"""
self.print_config(recurse, 1)
"""
Group: Objection Interface
These methods provide object level hooks into the `UVMObjection`
mechanism.
"""
def raised(self, objection, source_obj, description, count):
"""
Function: raised
The `raised` callback is called when this or a descendant of this component
instance raises the specified `objection`. The `source_obj` is the object
that originally raised the objection.
The `description` is optionally provided by the `source_obj` to give a
reason for raising the objection. The `count` indicates the number of
objections raised by the `source_obj`.
"""
pass
def dropped(self, objection, source_obj, description, count):
"""
The `dropped` callback is called when this or a descendant of this component
instance drops the specified `objection`. The `source_obj` is the object
that originally dropped the objection.
The `description` is optionally provided by the `source_obj` to give a
reason for dropping the objection. The `count` indicates the number of
objections dropped by the `source_obj`.
Args:
objection (UVMObjection):
source_obj (UVMObject):
description (str):
count (int):
"""
pass
async def all_dropped(self, objection, source_obj, description, count):
"""
The `all_droppped` callback is called when all objections have been
dropped by this component and all its descendants. The `source_obj` is the
object that dropped the last objection.
The `description` is optionally provided by the `source_obj` to give a
reason for raising the objection. The `count` indicates the number of
objections dropped by the `source_obj`.
Args:
objection:
source_obj:
description:
count:
"""
pass
#//----------------------------------------------------------------------------
#// Group: Factory Interface
#//----------------------------------------------------------------------------
#//
#// The factory interface provides convenient access to a portion of UVM's
#// <uvm_factory> interface. For creating new objects and components, the
#// preferred method of accessing the factory is via the object or component
#// wrapper (see <uvm_component_registry #(T,Tname)> and
#// <uvm_object_registry #(T,Tname)>). The wrapper also provides functions
#// for setting type and instance overrides.
#//
#//----------------------------------------------------------------------------
#// Function: create_component
#//
#// A convenience function for <uvm_factory::create_component_by_name>,
#// this method calls upon the factory to create a new child component
#// whose type corresponds to the preregistered type name, `requested_type_name`,
#// and instance name, `name`. This method is equivalent to:
#//
#//| factory.create_component_by_name(requested_type_name,
#//| get_full_name(), name, this)
#//
#// If the factory determines that a type or instance override exists, the type
#// of the component created may be different than the requested type. See
#// <set_type_override> and <set_inst_override>. See also <uvm_factory> for
#// details on factory operation.
def create_component(self, requested_type_name, name):
factory = _get_factory()
return factory.create_component_by_name(requested_type_name,
self.get_full_name(), name, self)
#// Function: create_object
#//
#// A convenience function for <uvm_factory::create_object_by_name>,
#// this method calls upon the factory to create a new object
#// whose type corresponds to the preregistered type name,
#// `requested_type_name`, and instance name, `name`. This method is
#// equivalent to:
#//
#//| factory.create_object_by_name(requested_type_name,
#//| get_full_name(), name)
#//
#// If the factory determines that a type or instance override exists, the
#// type of the object created may be different than the requested type. See
#// <uvm_factory> for details on factory operation.
def create_object(self, requested_type_name, name=""):
factory = _get_factory()
return factory.create_object_by_name(requested_type_name,
self.get_full_name(), name)
#// Function: set_type_override_by_type
#//
#// A convenience function for `UVMFactory.set_type_override_by_type`, this
#// method registers a factory override for components and objects created at
#// this level of hierarchy or below. This method is equivalent to:
#//
#//| factory.set_type_override_by_type(original_type, override_type,replace)
#//
#// The `relative_inst_path` is relative to this component and may include
#// wildcards. The `original_type` represents the type that is being overridden.
#// In subsequent calls to `UVMFactory.create_object_by_type` or
#// `UVMFactory.create_component_by_type`, if the requested_type matches the
#// `original_type` and the instance paths match, the factory will produce
#// the `override_type`.
#//
#// The original and override type arguments are lightweight proxies to the
#// types they represent. See <set_inst_override_by_type> for information
#// on usage.
#extern static function void set_type_override_by_type
# (uvm_object_wrapper original_type,
# uvm_object_wrapper override_type,
# bit replace=1)
@classmethod
def set_type_override_by_type(cls, original_type, override_type, replace=1):
factory = _get_factory()
factory.set_type_override_by_type(original_type, override_type, replace)
#// Function: set_inst_override_by_type
#//
#// A convenience function for <uvm_factory::set_inst_override_by_type>, this
#// method registers a factory override for components and objects created at
#// this level of hierarchy or below. In typical usage, this method is
#// equivalent to:
#//
#//| factory.set_inst_override_by_type( original_type,
#//| override_type,
#//| {get_full_name(),".",
#//| relative_inst_path})
#//
#// The `relative_inst_path` is relative to this component and may include
#// wildcards. The `original_type` represents the type that is being overridden.
#// In subsequent calls to `UVMFactory.create_object_by_type` or
#// `UVMFactory.create_component_by_type`, if the requested_type matches the
#// `original_type` and the instance paths match, the factory will produce the
#// `override_type`.
#//
#// The original and override types are lightweight proxies to the types they
#// represent. They can be obtained by calling ~type::get_type()~, if
#// implemented by `type`, or by directly calling ~type::type_id::get()~, where
#// `type` is the user type and `type_id` is the name of the typedef to
#// <uvm_object_registry #(T,Tname)> or <uvm_component_registry #(T,Tname)>.
#//
#// If you are employing the `uvm_*_utils macros, the typedef and the get_type
#// method will be implemented for you. For details on the utils macros
#// refer to <Utility and Field Macros for Components and Objects>.
#//
#// The following example shows `uvm_*_utils usage:
#//
#//| class comp extends uvm_component
#//| `uvm_component_utils(comp)
#//| ...
#//| endclass
#//|
#//| class mycomp extends uvm_component
#//| `uvm_component_utils(mycomp)
#//| ...
#//| endclass
#//|
#//| class block extends uvm_component
#//| `uvm_component_utils(block)
#//| comp c_inst
#//| virtual function void build_phase(uvm_phase phase)
#//| set_inst_override_by_type("c_inst",comp::get_type(),
#//| mycomp::get_type())
#//| endfunction
#//| ...
#//| endclass
#extern function void set_inst_override_by_type(string relative_inst_path,
# uvm_object_wrapper original_type,
# uvm_object_wrapper override_type)
#function void uvm_component::set_inst_override_by_type (string relative_inst_path,
# uvm_object_wrapper original_type,
# uvm_object_wrapper override_type)
# string full_inst_path
# uvm_coreservice_t cs = uvm_coreservice_t::get()
# uvm_factory factory=cs.get_factory()
#
# if (relative_inst_path == "")
# full_inst_path = get_full_name()
# else
# full_inst_path = {get_full_name(), ".", relative_inst_path}
#
# factory.set_inst_override_by_type(original_type, override_type, full_inst_path)
#
#endfunction
#// Function: set_type_override
#//
#// A convenience function for <uvm_factory::set_type_override_by_name>,
#// this method configures the factory to create an object of type
#// `override_type_name` whenever the factory is asked to produce a type
#// represented by `original_type_name`. This method is equivalent to:
#//
#//| factory.set_type_override_by_name(original_type_name,
#//| override_type_name, replace)
#//
#// The `original_type_name` typically refers to a preregistered type in the
#// factory. It may, however, be any arbitrary string. Subsequent calls to
#// create_component or create_object with the same string and matching
#// instance path will produce the type represented by override_type_name.
#// The `override_type_name` must refer to a preregistered type in the factory.
@classmethod
def set_type_override(cls, original_type_name, override_type_name,
replace=1):
factory = _get_factory()
factory.set_type_override_by_name(original_type_name,override_type_name, replace)
#// Function: set_inst_override
#//
#// A convenience function for <uvm_factory::set_inst_override_by_name>, this
#// method registers a factory override for components created at this level
#// of hierarchy or below. In typical usage, this method is equivalent to:
#//
#//| factory.set_inst_override_by_name(original_type_name,
#//| override_type_name,
#//| {get_full_name(),".",
#//| relative_inst_path}
#//| )
#//
#// The `relative_inst_path` is relative to this component and may include
#// wildcards. The `original_type_name` typically refers to a preregistered type
#// in the factory. It may, however, be any arbitrary string. Subsequent calls
#// to create_component or create_object with the same string and matching
#// instance path will produce the type represented by `override_type_name`.
#// The `override_type_name` must refer to a preregistered type in the factory.
#extern function void set_inst_override(string relative_inst_path,
# string original_type_name,
# string override_type_name)
#// Function: print_override_info
#//
#// This factory debug method performs the same lookup process as create_object
#// and create_component, but instead of creating an object, it prints
#// information about what type of object would be created given the
#// provided arguments.
def print_override_info(self, requested_type_name, name=""):
factory = _get_factory()
factory.debug_create_by_name(requested_type_name, self.get_full_name(), name)
#//----------------------------------------------------------------------------
#// Group: Hierarchical Reporting Interface
#//----------------------------------------------------------------------------
#//
#// This interface provides versions of the set_report_* methods in the
#// <uvm_report_object> base class that are applied recursively to this
#// component and all its children.
#//
#// When a report is issued and its associated action has the LOG bit set, the
#// report will be sent to its associated FILE descriptor.
#//----------------------------------------------------------------------------
#// Function: set_report_id_verbosity_hier
def set_report_id_verbosity_hier(self, id, verbosity):
self.set_report_id_verbosity(id, verbosity)
for c in self.m_children:
self.m_children[c].set_report_id_verbosity_hier(id, verbosity)
#// Function: set_report_severity_id_verbosity_hier
#//
#// These methods recursively associate the specified verbosity with reports of
#// the given `severity`, `id`, or ~severity-id~ pair. A verbosity associated
#// with a particular severity-id pair takes precedence over a verbosity
#// associated with id, which takes precedence over a verbosity associated
#// with a severity.
#//
#// For a list of severities and their default verbosities, refer to
#// `UVMReportHandler`.
def set_report_severity_id_verbosity_hier(self, severity, id, verbosity):
self.set_report_severity_id_verbosity(severity, id, verbosity)
for c in self.m_children:
self.m_children[c].set_report_severity_id_verbosity_hier(severity, id, verbosity)
def set_report_severity_action_hier(self, severity, action):
"""
Args:
severity:
action:
"""
self.set_report_severity_action(severity, action)
for c in self.m_children:
self.m_children[c].set_report_severity_action_hier(severity, action)
def set_report_severity_id_action_hier(self, severity, id, action):
self.set_report_severity_id_action(severity, id, action)
for c in self.m_children:
self.m_children[c].set_report_severity_id_action_hier(severity, id, action)
def set_report_id_action_hier(self, id, action):
"""
These methods recursively associate the specified action with reports of
the given `severity`, `id`, or ~severity-id~ pair. An action associated
with a particular severity-id pair takes precedence over an action
associated with id, which takes precedence over an action associated
with a severity.
For a list of severities and their default actions, refer to
`UVMReportHandler`.
Args:
id (str): Message ID to use ('' = all)
action:
"""
self.set_report_id_action(id, action)
for c in self.m_children:
self.m_children[c].set_report_id_action_hier(id, action)
def set_report_default_file_hier(self, file):
"""
Sets default report file hierarchically. All `UVM_LOG` actions are
written into this file, unless more specific file is set.
Args:
file:
"""
self.set_report_default_file(file)
for c in self.m_children:
self.m_children[c].set_report_default_file_hier(file)
#// Function: set_report_severity_file_hier
def set_report_severity_file_hier(self, severity, file):
self.set_report_severity_file(severity, file)
for c in self.m_children:
self.m_children[c].set_report_severity_file_hier(severity, file)
#// Function: set_report_id_file_hier
def set_report_id_file_hier(self, id, file):
self.set_report_id_file(id, file)
for c in self.m_children:
self.m_children[c].set_report_id_file_hier(id, file)
#// Function: set_report_severity_id_file_hier
#//
#// These methods recursively associate the specified FILE descriptor with
#// reports of the given `severity`, `id`, or ~severity-id~ pair. A FILE
#// associated with a particular severity-id pair takes precedence over a FILE
#// associated with id, which take precedence over an a FILE associated with a
#// severity, which takes precedence over the default FILE descriptor.
#//
#// For a list of severities and other information related to the report
#// mechanism, refer to `UVMReportHandler`.
def set_report_severity_id_file_hier(self, severity, id, file):
self.set_report_severity_id_file(severity, id, file)
for c in self.m_children:
self.m_children[c].set_report_severity_id_file_hier(severity, id, file)
def set_report_verbosity_level_hier(self, verbosity):
"""
This method recursively sets the maximum verbosity level for reports for
this component and all those below it. Any report from this component
subtree whose verbosity exceeds this maximum will be ignored.
See `UVMReportHandler` for a list of predefined message verbosity levels
and their meaning.
Args:
verbosity:
"""
self.set_report_verbosity_level(verbosity)
for c in self.m_children:
self.m_children[c].set_report_verbosity_level_hier(verbosity)
def pre_abort(self):
"""
This callback is executed when the message system is executing a
`UVM_EXIT` action. The exit action causes an immediate termination of
the simulation, but the pre_abort callback hook gives components an
opportunity to provide additional information to the user before
the termination happens. For example, a test may want to executed
the report function of a particular component even when an error
condition has happened to force a premature termination you would
write a function like:
.. code-block:: python
def pre_abort(self):
self.report()
The pre_abort() callback hooks are called in a bottom-up fashion.
"""
pass
def m_do_pre_abort(self):
for child in self.m_children:
self.m_children[child].m_do_pre_abort()
self.pre_abort()
#//----------------------------------------------------------------------------
#// Group: Recording Interface
#//----------------------------------------------------------------------------
#// These methods comprise the component-based transaction recording
#// interface. The methods can be used to record the transactions that
#// this component "sees", i.e. produces or consumes.
#//
#// The API and implementation are subject to change once a vendor-independent
#// use-model is determined.
#//----------------------------------------------------------------------------
def accept_tr(self, tr, accept_time=0):
"""
This function marks the acceptance of a transaction, `tr`, by this
component. Specifically, it performs the following actions:
- Calls the `tr`'s <uvm_transaction::accept_tr> method, passing to it the
`accept_time` argument.
- Calls this component's <do_accept_tr> method to allow for any post-begin
action in derived classes.
- Triggers the component's internal accept_tr event. Any processes waiting
on this event will resume in the next delta cycle.
Args:
tr:
accept_time:
"""
e = None
tr.accept_tr(accept_time)
self.do_accept_tr(tr)
e = self.event_pool.get("accept_tr")
if e is not None:
e.trigger()
def do_accept_tr(self, tr):
"""
The `accept_tr` method calls this function to accommodate any user-defined
post-accept action. Implementations should call super.do_accept_tr to
ensure correct operation.
#extern virtual protected function void do_accept_tr (uvm_transaction tr)
Args:
tr:
"""
return
def begin_tr(self, tr, stream_name="main", label="", desc="", begin_time=0,
parent_handle=0):
"""
This function marks the start of a transaction, `tr`, by this component.
Specifically, it performs the following actions:
- Calls `tr`'s <uvm_transaction::begin_tr> method, passing to it the
`begin_time` argument. The `begin_time` should be greater than or equal
to the accept time. By default, when `begin_time` = 0, the current
simulation time is used.
If recording is enabled (recording_detail != UVM_OFF), then a new
database-transaction is started on the component's transaction stream
given by the stream argument. No transaction properties are recorded at
this time.
- Calls the component's <do_begin_tr> method to allow for any post-begin
action in derived classes.
- Triggers the component's internal begin_tr event. Any processes waiting
on this event will resume in the next delta cycle.
A handle to the transaction is returned. The meaning of this handle, as
well as the interpretation of the arguments `stream_name`, `label`, and
`desc` are vendor specific.
Args:
tr:
stream_name:
label:
desc:
begin_time:
parent_handle:
Returns:
"""
return self.m_begin_tr(tr, parent_handle, stream_name, label, desc, begin_time)
def begin_child_tr(self, tr, parent_handle=0, stream_name="main", label="", desc="",
begin_time=0):
"""
This function marks the start of a child transaction, `tr`, by this
component. Its operation is identical to that of <begin_tr>, except that
an association is made between this transaction and the provided parent
transaction. This association is vendor-specific.
Args:
tr:
parent_handle:
stream_name:
label:
desc:
begin_time:
Returns:
"""
return self.m_begin_tr(tr, parent_handle, stream_name, label, desc, begin_time)
def do_begin_tr(self, tr, stream_name, tr_handle):
"""
The <begin_tr> and <begin_child_tr> methods call this function to
accommodate any user-defined post-begin action. Implementations should call
super.do_begin_tr to ensure correct operation.
Args:
tr:
stream_name:
tr_handle:
"""
return
def end_tr(self, tr, end_time=0, free_handle=1):
"""
Function: end_tr
This function marks the end of a transaction, `tr`, by this component.
Specifically, it performs the following actions:
- Calls `tr`'s <uvm_transaction::end_tr> method, passing to it the
`end_time` argument. The `end_time` must at least be greater than the
begin time. By default, when `end_time` = 0, the current simulation time
is used.
The transaction's properties are recorded to the database-transaction on
which it was started, and then the transaction is ended. Only those
properties handled by the transaction's do_record method (and optional
`uvm_*_field macros) are recorded.
- Calls the component's <do_end_tr> method to accommodate any post-end
action in derived classes.
- Triggers the component's internal end_tr event. Any processes waiting on
this event will resume in the next delta cycle.
The `free_handle` bit indicates that this transaction is no longer needed.
The implementation of free_handle is vendor-specific.
Args:
tr:
end_time:
free_handle:
"""
e = None # uvm_event#(uvm_object) e
recorder = None # uvm_recorder recorder
# db: uvm_tr_database = self.m_get_tr_database()
if tr is None:
return
tr.end_tr(end_time, free_handle)
if self.recording_detail != UVM_NONE:
if tr in self.m_tr_h:
recorder = self.m_tr_h[tr]
self.do_end_tr(tr, recorder.get_handle()) # callback
del self.m_tr_h[tr]
tr.record(recorder)
recorder.close(end_time)
if free_handle:
recorder.free()
else:
self.do_end_tr(tr, 0) # callback
e = self.event_pool.get("end_tr")
if e is not None:
e.trigger()
def do_end_tr(self, tr, tr_handle):
"""
The <end_tr> method calls this function to accommodate any user-defined
post-end action. Implementations should call super.do_end_tr to ensure
correct operation.
Args:
tr:
tr_handle:
"""
return
#// Function: record_error_tr
#//
#// This function marks an error transaction by a component. Properties of the
#// given uvm_object, `info`, as implemented in its <uvm_object::do_record> method,
#// are recorded to the transaction database.
#//
#// An `error_time` of 0 indicates to use the current simulation time. The
#// `keep_active` bit determines if the handle should remain active. If 0,
#// then a zero-length error transaction is recorded. A handle to the
#// database-transaction is returned.
#//
#// Interpretation of this handle, as well as the strings `stream_name`,
#// `label`, and `desc`, are vendor-specific.
#extern function integer record_error_tr (string stream_name="main",
# uvm_object info=None,
# string label="error_tr",
# string desc="",
# time error_time=0,
# bit keep_active=0)
def record_error_tr(self, stream_name="main", info=None, label="error_tr", desc="",
error_time=0, keep_active=0):
recorder = None # uvm_recorder
etype = ""
handle = 0
stream = None # uvm_tr_stream
db = self.m_get_tr_database() # uvm_tr_database
if keep_active:
etype = "Event, Link"
else:
etype = "Event"
if error_time == 0:
error_time = sv.realtime()
if (stream_name == "" or stream_name == "main"):
if self.m_main_stream is None:
self.m_main_stream = self.tr_database.open_stream("main", self.get_full_name(), "TVM")
stream = self.m_main_stream
else:
stream = self.get_tr_stream(stream_name)
handle = 0
if stream is not None:
recorder = stream.open_recorder(label, error_time, etype)
if recorder is not None:
if label != "":
recorder.record_string("label", label)
if desc != "":
recorder.record_string("desc", desc)
if info is not None:
info.record(recorder)
recorder.close(error_time)
if keep_active == 0:
recorder.free()
else:
handle = recorder.get_handle()
return handle
#// Function: record_event_tr
#//
#// This function marks an event transaction by a component.
#//
#// An `event_time` of 0 indicates to use the current simulation time.
#//
#// A handle to the transaction is returned. The `keep_active` bit determines
#// if the handle may be used for other vendor-specific purposes.
#//
#// The strings for `stream_name`, `label`, and `desc` are vendor-specific
#// identifiers for the transaction.
#extern function integer record_event_tr (string stream_name="main",
# uvm_object info=None,
# string label="event_tr",
# string desc="",
# time event_time=0,
# bit keep_active=0)
def record_event_tr(self, stream_name="main", info=None, label="event_tr",
desc="", event_time=0,keep_active=0):
recorder = None # uvm_recorder
etype = ""
handle = 0
stream = None # uvm_tr_stream
db = self.m_get_tr_database() # uvm_tr_database
if keep_active:
etype = "Event, Link"
else:
etype = "Event"
if event_time == 0:
event_time = sv.realtime()
if (stream_name == "" or stream_name=="main"):
if self.m_main_stream is None:
self.m_main_stream = self.tr_database.open_stream("main", self.get_full_name(), "TVM")
stream = self.m_main_stream
else:
stream = self.get_tr_stream(stream_name)
handle = 0
if stream is not None:
recorder = stream.open_recorder(label, event_time, etype)
if recorder is not None:
if label != "":
recorder.record_string("label", label)
if desc != "":
recorder.record_string("desc", desc)
if info is not None:
info.record(recorder)
recorder.close(event_time)
if keep_active == 0:
recorder.free()
else:
handle = recorder.get_handle()
return handle
def get_tr_stream(self, name, stream_type_name=""):
"""
Streams which are retrieved via this method will be stored internally,
such that later calls to `get_tr_stream` will return the same stream
reference.
The stream can be removed from the internal storage via a call
to `free_tr_stream`.
Args:
name (str):Name for the stream
stream_type_name: Type name for the stream (Default = "")
Returns:
UVMTrStream: Stream with this component's full name as scope.
"""
db = self.m_get_tr_database() # uvm_tr_database
if name not in self.m_streams:
self.m_streams[name] = {}
if stream_type_name not in self.m_streams[name]:
self.m_streams[name][stream_type_name] = db.open_stream(name,
self.get_full_name(), stream_type_name)
return self.m_streams[name][stream_type_name]
#// Function: free_tr_stream
#// Frees the internal references associated with `stream`.
#//
#// The next call to <get_tr_stream> will result in a newly created
#// <uvm_tr_stream>. If the current stream is open (or closed),
#// then it will be freed.
#extern virtual function void free_tr_stream(uvm_tr_stream stream)
def free_tr_stream(self, stream):
# Check the None case...
if stream is None:
return
str_name = stream.get_name()
str_type_name = stream.get_stream_type_name()
# Then make sure this name/type_name combo exists
if (str_name not in self.m_streams or str_type_name not in
self.m_streams[str_name]):
return
# Then make sure this name/type_name combo is THIS stream
if self.m_streams[str_name][str_type_name] != stream:
return
# Then delete it from the arrays
del self.m_streams[str_name][str_type_name]
if len(self.m_streams[str_name]) == 0:
del self.m_streams[str_name]
# Finally, free the stream if necessary
if stream.is_open() or stream.is_closed():
stream.free()
def m_get_tr_database(self):
"""
Returns:
"""
if self.tr_database is None:
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
self.tr_database = cs.get_default_tr_database()
return self.tr_database
def set_int_local(self, field_name, value, recurse=1):
"""
Args:
field_name:
value:
recurse:
"""
# call the super function to get child recursion and any registered fields
super().set_int_local(field_name, value, recurse)
# set the local properties
if uvm_is_match(field_name, "recording_detail"):
self.recording_detail = value
def m_set_full_name(self):
"""
m_set_full_name
---------------
"""
#if self.m_parent is None:
# uvm_fatal("Should not be called with uvm_root")
from .uvm_root import UVMRoot
#top = UVMRoot.get()
# top = None
top = []
if sv.cast(top, self.m_parent, UVMRoot) or self.m_parent is None:
self.m_name = self.get_name()
else:
self.m_name = self.m_parent.get_full_name() + "." + self.get_name()
for c in self.m_children:
tmp = self.m_children[c]
tmp.m_set_full_name()
def do_resolve_bindings(self):
"""
do_resolve_bindings
-------------------
"""
for s in self.m_children:
self.m_children[s].do_resolve_bindings()
self.resolve_bindings()
#extern function void do_flush()
def do_flush(self):
for c in self.m_children:
self.m_children[c].do_flush()
self.flush()
#extern virtual function void flush ()
def flush(self):
pass
def m_extract_name(self, name, leaf, remainder):
"""
Args:
name:
leaf:
remainder:
Returns:
"""
_len = len(name)
i = 0
for i in range(len(name)):
if name[i] == ".":
break
if i == _len - 1:
leaf = name
remainder = ""
return [leaf, remainder]
leaf = name[0:i]
remainder = name[i + 1: _len]
return [leaf, remainder]
#endfunction
def create(self, name=""):
"""
Overridden to disable component creation using this method.
Args:
name (str): Name of the component.
Returns:
None - Create cannot be called on `UVMComponent`
"""
uvm_error("ILLCRT",
"create cannot be called on a uvm_component. Use create_component instead.")
return None
def clone(self):
"""
Components cannot be cloned. Added this to show `uvm_error` whenever a
cloning is attempted.
Returns:
None - Components cannot be cloned.
"""
uvm_error("ILLCLN", sv.sformatf(CLONE_ERR, self.get_full_name()))
return None
def m_begin_tr(self, tr, parent_handle=0, stream_name="main", label="",
desc="", begin_time=0):
"""
Args:
tr (UVMTransaction):
parent_handle:
stream_name (str):
label (str):
desc (str):
begin_time (int):
Returns:
"""
e = None # uvm_event#(uvm_object) e
name = ""
kind = ""
db = None # uvm_tr_database db
handle = 0
link_handle = 0
stream = None # uvm_tr_stream
# uvm_recorder
recorder = None
parent_recorder = None
link_recorder = None
if tr is None:
return 0
db = self.m_get_tr_database()
if parent_handle != 0:
parent_recorder = UVMRecorder.get_recorder_from_handle(parent_handle)
if parent_recorder is None:
seq = [] # uvm_sequence_item
from ..seq.uvm_sequence_item import UVMSequenceItem
if (sv.cast(seq,tr, UVMSequenceItem)):
seq = seq[0]
parent_seq = seq.get_parent_sequence()
if (parent_seq is not None):
parent_recorder = parent_seq.m_tr_recorder
if parent_recorder is not None:
link_handle = tr.begin_child_tr(begin_time, parent_recorder.get_handle())
else:
link_handle = tr.begin_tr(begin_time)
if link_handle != 0:
link_recorder = UVMRecorder.get_recorder_from_handle(link_handle)
if tr.get_name() != "":
name = tr.get_name()
else:
name = tr.get_type_name()
# TODO needed for recording only
if self.recording_detail != UVM_NONE:
if (stream_name == "") or (stream_name == "main"):
if self.m_main_stream is None:
self.m_main_stream = db.open_stream("main", self.get_full_name(), "TVM")
stream = self.m_main_stream
else:
stream = self.get_tr_stream(stream_name)
if stream is not None:
kind = "Begin_End, Link"
if parent_recorder is None:
kind = "Begin_No_Parent, Link"
recorder = stream.open_recorder(name, begin_time, kind)
if recorder is not None:
if label != "":
recorder.record_string("label", label)
if desc != "":
recorder.record_string("desc", desc)
if parent_recorder is not None:
self.tr_database.establish_link(UVMParentChildLink.get_link(parent_recorder,
recorder))
if link_recorder is not None:
self.tr_database.establish_link(UVMRelatedLink.get_link(recorder,
link_recorder))
self.m_tr_h[tr] = recorder
handle = 0
if recorder is not None:
handle = recorder.get_handle()
self.do_begin_tr(tr, stream_name, handle)
e = self.event_pool.get("begin_tr")
if e is not None:
e.trigger(tr)
return handle
#type_name = "uvm_component"
#def get_type_name(self):
# return UVMComponent.type_name
#extern function void do_print(uvm_printer printer)
def m_set_cl_msg_args(self):
"""
Internal methods for setting up command line messaging stuff
#extern function void m_set_cl_msg_args
"""
self.m_set_cl_verb()
self.m_set_cl_action()
self.m_set_cl_sev()
first_m_set_cl_verb = 1
def m_set_cl_verb(self):
"""
#extern function void m_set_cl_verb
"""
# // _ALL_ can be used for ids
# // +uvm_set_verbosity=<comp>,<id>,<verbosity>,<phase|time>,<offset>
# // +uvm_set_verbosity=uvm_test_top.env0.agent1.*,_ALL_,UVM_FULL,time,800
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
from .uvm_cmdline_processor import UVMCmdlineProcessor
values = [] # static string values[$]
args = [] # string args[$]
clp = UVMCmdlineProcessor.get_inst()
cs = UVMCoreService.get()
top = cs.get_root()
if len(values) == 0:
clp.get_arg_values("+uvm_set_verbosity=", values)
for i in range(len(values)):
setting = VerbositySetting()
args.clear()
uvm_split_string(values[i], ",", args)
# Warning is already issued in uvm_root, so just don't keep it
len_match = len(args) not in [4, 5]
setting.verbosity = clp.m_convert_verb(args[2])
if UVMComponent.first_m_set_cl_verb and (len_match or setting.verbosity == -1):
del values[i]
else:
setting.comp = args[0]
setting.id = args[1]
setting.verbosity = clp.m_convert_verb(args[2])
setting.phase = args[3]
setting.offset = 0
if len(args) == 5:
setting.offset = int(args[4])
if ((setting.phase == "time") and (self == top)):
UVMComponent.m_time_settings.append(setting)
if uvm_is_match(setting.comp, self.get_full_name()):
if((setting.phase == "" or setting.phase == "build" or
setting.phase == "time") and setting.offset == 0):
if setting.id == "_ALL_":
self.set_report_verbosity_level(setting.verbosity)
else:
self.set_report_id_verbosity(setting.id, setting.verbosity)
else:
if setting.phase != "time":
self.m_verbosity_settings.append(setting)
if self == top:
cocotb.fork(self.m_fork_time_settings(top))
UVMComponent.first_m_set_cl_verb = 0
async def m_fork_time_settings(self, top):
m_time_settings = UVMComponent.m_time_settings
#fork begin
last_time = 0
if len(m_time_settings) > 0:
m_time_settings.sort(key=lambda item: item.offset)
for i in range(len(m_time_settings)):
comps = []
top.find_all(m_time_settings[i].comp, comps)
duration = m_time_settings[i].offset - last_time
last_time = m_time_settings[i].offset
cocotb.fork(self.m_set_comp_settings(i, comps.copy(), duration))
#end join_none // fork begin
async def m_set_comp_settings(self, i, comps, dur):
m_time_settings = UVMComponent.m_time_settings
await Timer(dur)
if m_time_settings[i].id == "_ALL_":
for comp in comps:
comp.set_report_verbosity_level(m_time_settings[i].verbosity)
else:
for comp in comps:
comp.set_report_id_verbosity(m_time_settings[i].id, m_time_settings[i].verbosity)
initialized_m_set_cl_action = 0
#extern function void m_set_cl_action
def m_set_cl_action(self):
# _ALL_ can be used for ids or severities
# +uvm_set_action=<comp>,<id>,<severity>,<action[|action]>
# +uvm_set_action=uvm_test_top.env0.*,_ALL_,UVM_ERROR,UVM_NO_ACTION
sev = 0
action = 0
if not UVMComponent.initialized_m_set_cl_action:
from .uvm_cmdline_processor import UVMCmdlineProcessor
values = []
UVMCmdlineProcessor.uvm_cmdline_proc.get_arg_values("+uvm_set_action=", values)
for idx in range(len(values)):
t = uvm_cmdline_parsed_arg_t()
args = []
uvm_split_string(values[idx], ",", args)
if len(args) != 4:
uvm_warning("INVLCMDARGS", sv.sformatf(INV_WARN1, len(args), values[idx]))
if((args[2] != "_ALL_") and not uvm_string_to_severity(args[2], sev)):
uvm_warning("INVLCMDARGS", sv.sformatf(INV_WARN2, args[2], values[idx]))
continue
if not uvm_string_to_action(args[3], action):
uvm_warning("INVLCMDARGS", sv.sformatf(INV_WARN3, args[3], values[idx]))
continue
t.args = args
t.arg = values[idx]
UVMComponent.m_uvm_applied_cl_action.append(t)
UVMComponent.initialized_m_set_cl_action = 1
for i in range(len(UVMComponent.m_uvm_applied_cl_action)):
args = UVMComponent.m_uvm_applied_cl_action[i].args
if not uvm_is_match(args[0], self.get_full_name()):
continue
sev = uvm_string_to_severity(args[2], sev)
action = uvm_string_to_action(args[3], action)
UVMComponent.m_uvm_applied_cl_action[i].used += 1
if args[1] == "_ALL_":
if args[2] == "_ALL_":
self.set_report_severity_action(UVM_INFO, action)
self.set_report_severity_action(UVM_WARNING, action)
self.set_report_severity_action(UVM_ERROR, action)
self.set_report_severity_action(UVM_FATAL, action)
else:
self.set_report_severity_action(sev, action)
else:
if args[2] == "_ALL_":
self.set_report_id_action(args[1], action)
else:
self.set_report_severity_id_action(sev, args[1], action)
initialized_m_set_cl_sev = 0
#extern function void m_set_cl_sev
def m_set_cl_sev(self):
# // _ALL_ can be used for ids or severities
# // +uvm_set_severity=<comp>,<id>,<orig_severity>,<new_severity>
# // +uvm_set_severity=uvm_test_top.env0.*,BAD_CRC,UVM_ERROR,UVM_WARNING
orig_sev = 0
sev = 0
if not UVMComponent.initialized_m_set_cl_sev:
values = []
from .uvm_cmdline_processor import UVMCmdlineProcessor
UVMCmdlineProcessor.uvm_cmdline_proc.get_arg_values("+uvm_set_severity=",values)
for idx in range(len(values)):
t = uvm_cmdline_parsed_arg_t()
args = [] # string[$]
uvm_split_string(values[idx], ",", args)
if len(args) != 4:
uvm_warning("INVLCMDARGS", sv.sformatf(INV_WARN4, len(args), values[idx]))
continue
if args[2] != "_ALL_" and not uvm_string_to_severity(args[2], orig_sev):
uvm_warning("INVLCMDARGS", sv.sformatf(INV_WARN5, args[2], values[idx]))
continue
if uvm_string_to_severity(args[3], sev) == -1:
uvm_warning("INVLCMDARGS", sv.sformatf(INV_WARN6, args[3], values[idx]))
continue
t.args = args
t.arg = values[idx]
UVMComponent.m_uvm_applied_cl_sev.append(t)
UVMComponent.initialized_m_set_cl_sev = 1
m_uvm_applied_cl_sev = UVMComponent.m_uvm_applied_cl_sev
for i in range(len(m_uvm_applied_cl_sev)):
args = m_uvm_applied_cl_sev[i].args
if not uvm_is_match(args[0], self.get_full_name()):
continue
orig_sev = uvm_string_to_severity(args[2], orig_sev)
sev = uvm_string_to_severity(args[3], sev)
m_uvm_applied_cl_sev[i].used += 1
if (args[1] == "_ALL_" and args[2] == "_ALL_"):
self.set_report_severity_override(UVM_INFO,sev)
self.set_report_severity_override(UVM_WARNING,sev)
self.set_report_severity_override(UVM_ERROR,sev)
self.set_report_severity_override(UVM_FATAL,sev)
elif (args[1] == "_ALL_"):
self.set_report_severity_override(orig_sev,sev)
elif (args[2] == "_ALL_"):
self.set_report_severity_id_override(UVM_INFO,args[1],sev)
self.set_report_severity_id_override(UVM_WARNING,args[1],sev)
self.set_report_severity_id_override(UVM_ERROR,args[1],sev)
self.set_report_severity_id_override(UVM_FATAL,args[1],sev)
else:
self.set_report_severity_id_override(orig_sev,args[1],sev)
m_uvm_applied_cl_action: List[uvm_cmdline_parsed_arg_t] = []
m_uvm_applied_cl_sev: List[uvm_cmdline_parsed_arg_t] = []
def m_add_child(self, child):
if child.get_name() in self.m_children:
old_child = self.m_children[child.get_name()]
uvm_warning("BDCLD",
"A child with name {} (type={}) exists"
.format(child.get_name(), old_child.get_type_name()))
return False
self.m_children[child.get_name()] = child
self.m_children_ordered.append(child)
self.m_children_by_handle[child] = child
return True
def has_first_child(self):
return len(self.m_children_ordered) > 0
def has_next_child(self):
return len(self.m_children_ordered) > (self.child_ptr + 1)
def m_apply_verbosity_settings(self, phase):
"""
#extern function void m_apply_verbosity_settings(uvm_phase phase)
Args:
phase:
"""
pass
for i in self.m_verbosity_settings:
if phase.get_name() == self.m_verbosity_settings[i].phase:
if self.m_verbosity_settings[i].offset == 0:
if self.m_verbosity_settings[i].id == "_ALL_":
self.set_report_verbosity_level(self.m_verbosity_settings[i].verbosity)
else:
self.set_report_id_verbosity(self.m_verbosity_settings[i].id,
self.m_verbosity_settings[i].verbosity)
else:
#process p = process::self()
#string p_rand = p.get_randstate()
#fork begin
setting = self.m_verbosity_settings[i]
#setting.offset
if setting.id == "_ALL_":
self.set_report_verbosity_level(setting.verbosity)
else:
self.set_report_id_verbosity(setting.id, setting.verbosity)
#end join_none
#p.set_randstate(p_rand)
# Remove after use
del self.m_verbosity_settings[i]
def kill(self):
"""
Kill internal run process of this component.
"""
if self.m_run_process is not None:
if hasattr(self.m_run_process, 'kill'):
self.m_run_process.kill()
else:
print("No kill() available")
#//------------------------------------------------------------------------------
#//
#// Factory Methods
#//
#//------------------------------------------------------------------------------
#
#
#// set_inst_override
#// -----------------
#
#function void uvm_component::set_inst_override (string relative_inst_path,
# string original_type_name,
# string override_type_name)
# full_inst_path = ""
# factory= _get_factory()
#
# if (relative_inst_path == "")
# full_inst_path = get_full_name()
# else
# full_inst_path = {get_full_name(), ".", relative_inst_path}
#
# factory.set_inst_override_by_name(
# original_type_name,
# override_type_name,
# full_inst_path)
#endfunction
#
#
#
#
#//------------------------------------------------------------------------------
#//
#// Phase interface
#//
#//------------------------------------------------------------------------------
#
#
#// phase methods
#//--------------
#// these are prototypes for the methods to be implemented in user components
#// build_phase() has a default implementation, the others have an empty default
#
#// these phase methods are common to all components in UVM. For backward
#// compatibility, they call the old style name (without the _phse)
#
#function void uvm_component::connect_phase(uvm_phase phase)
# connect()
# return
#endfunction
#function void uvm_component::end_of_elaboration_phase(uvm_phase phase)
# end_of_elaboration()
# return
#endfunction
#function void uvm_component::extract_phase(uvm_phase phase)
# extract()
# return
#endfunction
#function void uvm_component::check_phase(uvm_phase phase)
# check()
# return
#endfunction
#function void uvm_component::report_phase(uvm_phase phase)
# report()
# return
#endfunction
#
#
#// These are the old style phase names. In order for runtime phase names
#// to not conflict with user names, the _phase postfix was added.
#
#function void uvm_component::connect(); return; endfunction
#function void uvm_component::end_of_elaboration(); return; endfunction
#function void uvm_component::extract(); return; endfunction
#function void uvm_component::check(); return; endfunction
#function void uvm_component::report(); return; endfunction
#function void uvm_component::final_phase(uvm_phase phase); return; endfunction
#
#// these runtime phase methods are only called if a set_domain() is done
#
#//------------------------------
#// phase / schedule / domain API
#//------------------------------
#// methods for VIP creators and integrators to use to set up schedule domains
#// - a schedule is a named, organized group of phases for a component base type
#// - a domain is a named instance of a schedule in the master phasing schedule
#
#
#// suspend
#// -------
#
#task uvm_component::suspend()
# `uvm_warning("COMP/SPND/UNIMP", "suspend() not implemented")
#endtask
#
#
#// resume
#// ------
#
#task uvm_component::resume()
# `uvm_warning("COMP/RSUM/UNIMP", "resume() not implemented")
#endtask
#
#
#
#
#
#
#//------------------------------------------------------------------------------
#//
#// Configuration interface
#//
#//------------------------------------------------------------------------------
#
#
#// Undocumented struct for storing clone bit along w/
#// object on set_config_object(...) calls
#class uvm_config_object_wrapper
# uvm_object obj
# bit clone
#endclass : uvm_config_object_wrapper
#
#
#// do_print (override)
#// --------
#
#function void uvm_component::do_print(uvm_printer printer)
# string v
# super.do_print(printer)
#
# // It is printed only if its value is other than the default (UVM_NONE)
# if(uvm_verbosity'(recording_detail) != UVM_NONE)
# case (recording_detail)
# UVM_LOW : printer.print_generic("recording_detail", "uvm_verbosity",
# $bits(recording_detail), "UVM_LOW")
# UVM_MEDIUM : printer.print_generic("recording_detail", "uvm_verbosity",
# $bits(recording_detail), "UVM_MEDIUM")
# UVM_HIGH : printer.print_generic("recording_detail", "uvm_verbosity",
# $bits(recording_detail), "UVM_HIGH")
# UVM_FULL : printer.print_generic("recording_detail", "uvm_verbosity",
# $bits(recording_detail), "UVM_FULL")
# default : printer.print_field_int("recording_detail", recording_detail,
# $bits(recording_detail), UVM_DEC, , "integral")
# endcase
#
#endfunction
def _get_factory():
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
factory = cs.get_factory()
return factory
|
py | b404d173bff2499b29045598a31749a9a7369fb8 | import csv
import requests
import json
fires = []
with open('fire.csv') as f:
reader = csv.DictReader(f)
counter = 0
f = open('processed.csv', 'a')
for row in reader:
if(row["STAT_CAUSE_DESCR"] != "Miscellaneous" and row["STAT_CAUSE_DESCR"] != "Missing/Undefined" and row["DISCOVERY_DOY"] != "" and row["FIRE_SIZE"] != "" and row["LATITUDE"] != "" and row["LONGITUDE"] != ""):
if counter > 0:
fire = []
fire.append(row["STAT_CAUSE_DESCR"])
fire.append(row["DISCOVERY_DOY"])
fire.append(row["CONT_DOY"])
fire.append(row["FIRE_SIZE"])
fire.append(row["LATITUDE"])
fire.append(row["LONGITUDE"])
fire.append(row["NWCG_REPORTING_AGENCY"])
fire.append(row["FIRE_YEAR"])
fire.append(row["OWNER_DESCR"])
# url = 'http://data.fcc.gov/api/block/find?format=json&latitude=' + row["LATITUDE"] + '&longitude=' + row["LONGITUDE"] + '&showall=true'
# headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
# result = requests.get(url, headers=headers)
# parsed = (result.content.decode())
# data = json.loads(parsed)
# fire.append(data["State"]["name"])
# fire.append(data["County"]["name"])
fires.append(fire)
line = str(fire[0]) + "," + str(fire[1]) + "," + str(fire[2]) + "," + str(fire[3]) + "," + str(fire[4]) + "," + str(fire[5]) + "," + str(fire[6]) + "," + str(fire[7]) + "," + str(fire[8]) + "\n"
f.write(str(line))
#print(fire)
counter+=1
if(counter%100 == 0):
print(counter)
|
py | b404d1d16006d1f5e7a8c88cec8284c33e6327c8 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import MagicMock
from mock import patch
from diamond.collector import Collector
from mongodb import MongoDBCollector
################################################################################
def run_only_if_pymongo_is_available(func):
try:
import pymongo
pymongo # workaround for pyflakes issue #13
except ImportError:
pymongo = None
pred = lambda: pymongo is not None
return run_only(func, pred)
class TestMongoDBCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MongoDBCollector', {
'host': 'localhost:27017',
})
self.collector = MongoDBCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(MongoDBCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'more_keys.nested_key': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_once_with('dbStats')
metrics = {
'db_keys.db_nested_key': 1,
'dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'more_keys': 1,
'key': 2
})
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1']
################################################################################
if __name__ == "__main__":
unittest.main()
|
py | b404d2074ae4a382ff39e7b36729b2fb88b839d9 | from ipaddress import IPv6Address, IPv4Address
class SocksReply:
rep = 0
def __init__(self, bind_addr=IPv4Address("0.0.0.0"), bind_port=0):
self.ver = 5
self.rsv = 0
if isinstance(bind_addr, IPv4Address):
self.atyp = 1
elif isinstance(bind_addr, IPv6Address):
self.atyp = 4
else:
self.atyp = 3
self.bind_addr = bind_addr
self.bind_port = bind_port
def __bytes__(self):
return self.ver.to_bytes(1, byteorder='big') + \
self.rep.to_bytes(1, byteorder='big') + \
self.rsv.to_bytes(1, byteorder='big') + \
self.atyp.to_bytes(1, byteorder='big') + \
int(self.bind_addr).to_bytes(4, byteorder='big') + \
self.bind_port.to_bytes(2, byteorder='big')
class Success(SocksReply):
pass
class Failure(SocksReply):
rep = 1
class ConnectionNotAllowed(SocksReply):
rep = 2
class NetworkUnreachable(SocksReply):
rep = 3
class HostUnreachable(SocksReply):
rep = 4
class ConnectionRefused(SocksReply):
rep = 5
class TTLExpired(SocksReply):
rep = 6
class CommandNotSupported(SocksReply):
rep = 7
class AddressTypeNotSupported(SocksReply):
rep = 8
|
py | b404d2955fd0a7b565b9b46312e63e9d936d96cf | import numpyro.distributions as dist
from jax import numpy as np
from jax import random
from ramsey.covariance_functions import exponentiated_quadratic
# pylint: disable=too-many-locals,invalid-name
def sample_from_sinus_function(key, batch_size=10, num_observations=100):
x = np.linspace(-np.pi, np.pi, num_observations).reshape(
(num_observations, 1)
)
ys = []
fs = []
for _ in range(batch_size):
key, sample_key1, sample_key2, sample_key3 = random.split(key, 4)
a = 2 * random.uniform(sample_key1) - 1
b = random.uniform(sample_key2) - 0.5
f = a * np.sin(x - b)
y = f + random.normal(sample_key3, shape=(num_observations, 1)) * 0.10
fs.append(f.reshape((1, num_observations, 1)))
ys.append(y.reshape((1, num_observations, 1)))
x = np.tile(x, [batch_size, 1, 1])
y = np.vstack(np.array(ys))
f = np.vstack(np.array(fs))
return (x, y), f
# pylint: disable=too-many-locals,invalid-name
def sample_from_gaussian_process(
key, batch_size=10, num_observations=100, num_dim=1
):
x = random.normal(key, shape=(num_observations * num_dim,)).reshape(
(num_observations, num_dim)
)
ys = []
fs = []
for _ in range(batch_size):
key, sample_key1, sample_key2, sample_key3, sample_key4 = random.split(
key, 5
)
rho = dist.InverseGamma(1, 1).sample(sample_key1)
sigma = dist.InverseGamma(5, 5).sample(sample_key2)
K = exponentiated_quadratic(x, x, rho, sigma)
f = random.multivariate_normal(
sample_key3,
mean=np.zeros(num_observations),
cov=K + np.diag(np.ones(num_observations)) * 1e-5,
)
y = random.multivariate_normal(
sample_key4, mean=f, cov=np.eye(num_observations) * 0.05
)
fs.append(f.reshape((1, num_observations, 1)))
ys.append(y.reshape((1, num_observations, 1)))
x = np.tile(x, [batch_size, 1, 1])
y = np.vstack(np.array(ys))
f = np.vstack(np.array(fs))
return (x, y), f
|
py | b404d32a3bff7eb8a7178f0c4a9422d1a27cddd1 | # -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test service unit tests."""
from __future__ import print_function
import contextlib
import os
import shutil
import mock
from chromite.api.gen.chromiumos import common_pb2
from chromite.cbuildbot import commands
from chromite.cbuildbot import goma_util
from chromite.lib import build_target_lib
from chromite.lib import chroot_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import failures_lib
from chromite.lib import image_lib
from chromite.lib import moblab_vm
from chromite.lib import osutils
from chromite.lib import portage_util
from chromite.lib import sysroot_lib
from chromite.lib.parser import package_info
from chromite.service import test
class PartialDict:
"""Used as key value matcher in a mocked call."""
def __init__(self, key, value):
self.key = key
self.value = value
def __eq__(self, other):
return other[self.key] == self.value
class BuildTargetUnitTestResultTest(cros_test_lib.TestCase):
"""BuildTargetUnitTestResult tests."""
def testSuccess(self):
"""Test success case."""
result = test.BuildTargetUnitTestResult(0, None)
self.assertTrue(result.success)
def testPackageFailure(self):
"""Test packages failed."""
# Supposed to be CPVs, but not actually necessary at the moment.
packages = ['a', 'b']
# Should have a non-zero return code when packages fail.
result = test.BuildTargetUnitTestResult(1, packages)
self.assertFalse(result.success)
# Make sure failed packages alone are enough.
result = test.BuildTargetUnitTestResult(0, packages)
self.assertFalse(result.success)
def testScriptFailure(self):
"""Test non-package failure."""
# Should have a non-zero return code when packages fail.
result = test.BuildTargetUnitTestResult(1, None)
self.assertFalse(result.success)
class BuildTargetUnitTestTest(cros_test_lib.RunCommandTempDirTestCase):
"""BuildTargetUnitTest tests."""
def setUp(self):
self.board = 'board'
self.build_target = build_target_lib.BuildTarget(self.board)
self.chroot = chroot_lib.Chroot(path=self.tempdir)
# Make the chroot's tmp directory, used for the parallel emerge status file.
tempdir = os.path.join(self.tempdir, 'tmp')
osutils.SafeMakedirs(tempdir)
def testSuccess(self):
"""Test simple success case."""
result = test.BuildTargetUnitTest(self.build_target, self.chroot)
self.assertCommandContains(['cros_run_unit_tests', '--board', self.board])
self.assertTrue(result.success)
def testPackages(self):
"""Test the packages argument."""
packages = ['foo/bar', 'cat/pkg']
test.BuildTargetUnitTest(self.build_target, self.chroot, packages=packages)
self.assertCommandContains(['--packages', 'foo/bar cat/pkg'])
def testBlocklist(self):
"""Test the blocklist argument."""
blocklist = ['foo/bar', 'cat/pkg']
test.BuildTargetUnitTest(self.build_target, self.chroot,
blocklist=blocklist)
self.assertCommandContains(['--blacklist_packages', 'foo/bar cat/pkg'])
def testTestablePackagesOptional(self):
"""Test the testable packages optional argument."""
test.BuildTargetUnitTest(
self.build_target, self.chroot, testable_packages_optional=True)
self.assertCommandContains(['--no-testable-packages-ok'])
def testFailure(self):
"""Test non-zero return code and failed package handling."""
packages = ['foo/bar', 'cat/pkg']
cpvs = [package_info.SplitCPV(p, strict=False) for p in packages]
self.PatchObject(portage_util, 'ParseDieHookStatusFile',
return_value=cpvs)
expected_rc = 1
self.rc.SetDefaultCmdResult(returncode=expected_rc)
result = test.BuildTargetUnitTest(self.build_target, self.chroot)
self.assertFalse(result.success)
self.assertEqual(expected_rc, result.return_code)
self.assertCountEqual(cpvs, result.failed_cpvs)
def testCodeCoverage(self):
"""Test adding use flags for coverage when requested."""
result = test.BuildTargetUnitTest(self.build_target, self.chroot,
code_coverage=True)
self.assertCommandContains(['cros_run_unit_tests', '--board', self.board],
extra_env=PartialDict('USE', 'coverage'))
self.assertTrue(result.success)
def testCodeCoverageExistingFlags(self):
"""Test adding use flags for coverage when existing flags."""
chroot = chroot_lib.Chroot(path=self.tempdir, env={'USE': 'foo bar'})
result = test.BuildTargetUnitTest(self.build_target, chroot,
code_coverage=True)
self.assertCommandContains(['cros_run_unit_tests', '--board', self.board],
extra_env=PartialDict('USE', 'foo bar coverage'))
self.assertTrue(result.success)
def testCodeCoverageExistingCoverageFlag(self):
"""Test adding use flags for coverage when already has coverage flag."""
chroot = chroot_lib.Chroot(path=self.tempdir, env={'USE': 'coverage bar'})
result = test.BuildTargetUnitTest(self.build_target, chroot,
code_coverage=True)
self.assertCommandContains(['cros_run_unit_tests', '--board', self.board],
extra_env=PartialDict('USE', 'coverage bar'))
self.assertTrue(result.success)
class BuildTargetUnitTestTarballTest(cros_test_lib.MockTempDirTestCase):
"""BuildTargetUnitTestTarball tests."""
def setUp(self):
self.chroot = chroot_lib.Chroot(
path=os.path.join(self.tempdir, 'chroot/path'))
self.sysroot = sysroot_lib.Sysroot('/sysroot/path')
test_dir = os.path.join(
self.chroot.full_path(self.sysroot.path, constants.UNITTEST_PKG_PATH))
osutils.SafeMakedirs(test_dir)
self.result_path = os.path.join(self.tempdir, 'result')
def testSuccess(self):
"""Test success handling."""
result = cros_build_lib.CommandResult(returncode=0)
self.PatchObject(cros_build_lib, 'CreateTarball', return_value=result)
self.PatchObject(os.path, 'exists', return_value=True)
path = test.BuildTargetUnitTestTarball(self.chroot, self.sysroot,
self.result_path)
self.assertStartsWith(path, self.result_path)
def testNotExists(self):
"""Test creating the tarball for a path that doesn't exist."""
path = test.BuildTargetUnitTestTarball(
self.chroot, sysroot_lib.Sysroot('/invalid/sysroot'), self.result_path)
self.assertIsNone(path)
def testFailure(self):
"""Test failure creating tarball."""
result = cros_build_lib.CommandResult(returncode=1)
self.PatchObject(cros_build_lib, 'CreateTarball', return_value=result)
path = test.BuildTargetUnitTestTarball(self.chroot, self.sysroot,
self.result_path)
self.assertIsNone(path)
class DebugInfoTestTest(cros_test_lib.RunCommandTestCase):
"""DebugInfoTest tests."""
def testSuccess(self):
"""Test command success."""
self.assertTrue(test.DebugInfoTest('/sysroot/path'))
self.assertCommandContains(['debug_info_test',
'/sysroot/path/usr/lib/debug'])
def testFailure(self):
"""Test command failure."""
self.rc.SetDefaultCmdResult(returncode=1)
self.assertFalse(test.DebugInfoTest('/sysroot/path'))
class MoblabVmTestCase(cros_test_lib.RunCommandTempDirTestCase):
"""Tests for the SetupBoardRunConfig class."""
def MockDirectory(self, path):
"""Create an empty directory.
Args:
path (str): Relative path for the directory.
Returns:
str: Path to the directory.
"""
path = os.path.join(self.tempdir, path)
osutils.SafeMakedirs(path)
return path
def setUp(self):
self.builder = 'moblab-generic-vm/R12-3.4.5-67-890'
self.image_dir = self.MockDirectory('files/image')
self.payload_dir = self.MockDirectory('files/payload')
self.results_dir = self.MockDirectory('results')
self.vms = moblab_vm.MoblabVm(self.tempdir)
self.chroot = chroot_lib.Chroot(path=self.tempdir)
class CreateMoblabVmTest(MoblabVmTestCase):
"""Unit tests for CreateMoblabVm."""
def setUp(self):
self.mock_vm_create = self.PatchObject(moblab_vm.MoblabVm, 'Create')
def testBasic(self):
vms = test.CreateMoblabVm(self.tempdir, self.chroot.path, self.image_dir)
self.assertEqual(vms.workspace, self.tempdir)
self.assertEqual(vms.chroot, self.chroot.path)
self.assertEqual(
self.mock_vm_create.call_args_list,
[mock.call(self.image_dir, dut_image_dir=self.image_dir,
create_vm_images=False)])
class PrepareMoblabVmImageCacheTest(MoblabVmTestCase):
"""Unit tests for PrepareMoblabVmImageCache."""
def setUp(self):
@contextlib.contextmanager
def MountedMoblabDiskContextMock(*_args, **_kwargs):
yield self.tempdir
self.PatchObject(moblab_vm.MoblabVm, 'MountedMoblabDiskContext',
MountedMoblabDiskContextMock)
self.payload_file_name = 'payload.bin'
self.payload_file = os.path.join(self.payload_dir, self.payload_file_name)
self.payload_file_content = 'A Lannister always pays his debts.'
osutils.WriteFile(os.path.join(self.payload_dir, self.payload_file_name),
self.payload_file_content)
def testBasic(self):
"""PrepareMoblabVmImageCache loads all payloads into the vm."""
image_cache_dir = test.PrepareMoblabVmImageCache(self.vms, self.builder,
[self.payload_dir])
expected_cache_dir = 'static/prefetched/moblab-generic-vm/R12-3.4.5-67-890'
self.assertEqual(image_cache_dir,
os.path.join('/mnt/moblab/', expected_cache_dir))
copied_payload_file = os.path.join(self.tempdir, expected_cache_dir,
self.payload_file_name)
self.assertExists(copied_payload_file)
self.assertEqual(osutils.ReadFile(copied_payload_file),
self.payload_file_content)
class RunMoblabVmTestTest(MoblabVmTestCase):
"""Unit tests for RunMoblabVmTestTest."""
def setUp(self):
self.image_cache_dir = '/mnt/moblab/whatever'
self.PatchObject(moblab_vm.MoblabVm, 'Start')
self.PatchObject(moblab_vm.MoblabVm, 'Stop')
def testBasic(self):
"""RunMoblabVmTest calls test_that with correct args."""
test.RunMoblabVmTest(self.chroot, self.vms, self.builder,
self.image_cache_dir, self.results_dir)
self.assertCommandContains([
'test_that', '--no-quickmerge',
'--results_dir', self.results_dir,
'-b', 'moblab-generic-vm',
'moblab_DummyServerNoSspSuite',
'--args',
'services_init_timeout_m=10 '
'target_build="%s" '
'test_timeout_hint_m=90 '
'clear_devserver_cache=False '
'image_storage_server="%s"' % (self.builder,
self.image_cache_dir + '/'),
], enter_chroot=True, chroot_args=self.chroot.get_enter_args())
class SimpleChromeWorkflowTestTest(cros_test_lib.MockTempDirTestCase):
"""Unit tests for SimpleChromeWorkflowTest."""
def setUp(self):
self.chrome_root = '/path/to/chrome/root'
self.sysroot_path = '/chroot/path/sysroot/path'
self.build_target = 'board'
self.goma_mock = self.PatchObject(goma_util, 'Goma')
self.chrome_sdk_run_mock = self.PatchObject(commands.ChromeSDK, 'Run')
# SimpleChromeTest workflow creates directories based on objects that are
# mocked for this test, so patch osutils.WriteFile
self.write_mock = self.PatchObject(osutils, 'WriteFile')
self.PatchObject(cros_build_lib, 'CmdToStr', return_value='CmdToStr value')
self.PatchObject(shutil, 'copy2')
def testSimpleChromeWorkflowTest(self):
goma_test_dir = os.path.join(self.tempdir, 'goma_test_dir')
goma_test_json_string = os.path.join(self.tempdir, 'goma_json_string.txt')
chromeos_goma_dir = os.path.join(self.tempdir, 'chromeos_goma_dir')
goma_config = common_pb2.GomaConfig(goma_dir=goma_test_dir,
goma_client_json=goma_test_json_string)
osutils.SafeMakedirs(goma_test_dir)
osutils.SafeMakedirs(chromeos_goma_dir)
osutils.Touch(goma_test_json_string)
goma = goma_util.Goma(
goma_config.goma_dir,
goma_config.goma_client_json,
stage_name='BuildApiTestSimpleChrome',
chromeos_goma_dir=chromeos_goma_dir)
mock_goma_log_dir = os.path.join(self.tempdir, 'goma_log_dir')
osutils.SafeMakedirs(mock_goma_log_dir)
goma.goma_log_dir = mock_goma_log_dir
# For this test, we avoid running test._VerifySDKEnvironment because use of
# other mocks prevent creating the SDK dir that _VerifySDKEnvironment checks
# for
self.PatchObject(test, '_VerifySDKEnvironment')
self.PatchObject(os.path, 'exists', return_value=True)
ninja_cmd = self.PatchObject(commands.ChromeSDK, 'GetNinjaCommand',
return_value='ninja command')
test.SimpleChromeWorkflowTest(self.sysroot_path, self.build_target,
self.chrome_root, goma)
# Verify ninja_cmd calls.
ninja_calls = [mock.call(), mock.call(debug=False)]
ninja_cmd.assert_has_calls(ninja_calls)
# Verify calls with args to chrome_sdk_run made by service/test.py.
gn_dir = os.path.join(self.chrome_root, 'buildtools/linux64/gn')
board_out_dir = os.path.join(self.chrome_root, 'out_board/Release')
self.chrome_sdk_run_mock.assert_any_call(['gclient', 'runhooks'])
self.chrome_sdk_run_mock.assert_any_call(['true'])
self.chrome_sdk_run_mock.assert_any_call(
['bash', '-c', ('%s gen "%s" --args="$GN_ARGS"'
% (gn_dir, board_out_dir))])
self.chrome_sdk_run_mock.assert_any_call(
['env', '--null'], run_args=mock.ANY)
self.chrome_sdk_run_mock.assert_any_call('ninja command', run_args=mock.ANY)
# Create expected paths from constants so that the tests work inside or
# outside the SDK.
deploy_chrome_path = os.path.join(constants.SOURCE_ROOT,
constants.CHROMITE_BIN_SUBDIR,
'deploy_chrome')
image_dir_symlink = image_lib.GetLatestImageLink(self.build_target)
image_path = os.path.join(image_dir_symlink, constants.VM_IMAGE_BIN)
self.chrome_sdk_run_mock.assert_any_call(
[deploy_chrome_path, '--build-dir', board_out_dir, '--staging-only',
'--staging-dir', mock.ANY])
self.chrome_sdk_run_mock.assert_any_call(
['cros_run_test', '--copy-on-write', '--deploy', '--board=board',
('--image-path=%s' % (image_path)),
'--build-dir=out_board/Release'])
# Verify goma mock was started and stopped.
# TODO(crbug/1065172): Invalid assertions that had previously been mocked.
# self.goma_mock.Start.assert_called_once()
# self.goma_mock.Stop.assert_called_once()
class ValidateMoblabVmTestTest(MoblabVmTestCase):
"""Unit tests for ValidateMoblabVmTest."""
def setUp(self):
self.logs_dir = os.path.join(self.results_dir, 'debug')
osutils.SafeMakedirs(self.logs_dir)
self.logs_file = os.path.join(self.logs_dir, 'test_that.INFO')
def testValidateMoblabVmTestSuccess(self):
"""ValidateMoblabVmTest does not die when tests succeeded."""
osutils.WriteFile(self.logs_file, 'dummy_PassServer [PASSED]')
test.ValidateMoblabVmTest(self.results_dir)
def testValidateMoblabVmTestNoLogs(self):
"""ValidateMoblabVmTest dies when test_that logs not present."""
self.assertRaises(failures_lib.TestFailure,
test.ValidateMoblabVmTest, self.results_dir)
def testValidateMoblabVmTestFailure(self):
"""ValidateMoblabVmTest dies when tests failed."""
osutils.WriteFile(self.logs_file, 'dummy_PassServer [FAILED]')
self.assertRaises(failures_lib.TestFailure,
test.ValidateMoblabVmTest, self.results_dir)
|
py | b404d3b871ab3d026567438c5841fa0e555eb504 | # Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Machine Translation example.
This script trains a Transformer on a WMT dataset.
"""
import collections
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import nn
from flax import optim
import bleu
import decode
import input_pipeline
import models
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model_dir', default=None,
help='Directory to store model data.')
flags.DEFINE_string(
'data_dir', default=None,
help='Tensorflow datasets directory.')
flags.DEFINE_string(
'vocab_path', default=None,
help='Path to load or store sentencepiece vocab file.')
flags.DEFINE_integer(
'vocab_size', default=32000,
help='Vocabulary size if `vocab_path` is not given.')
flags.DEFINE_string(
'dataset_name', default='wmt17_translate/de-en',
help='Name of TFDS translation dataset to use.')
flags.DEFINE_string(
'eval_dataset_name', default='wmt14_translate/de-en:test',
help='Optional name of TFDS translation dataset to use for evaluation.')
flags.DEFINE_bool(
'reverse_translation', default=False,
help='Reverse the direction of translation.')
flags.DEFINE_integer(
'batch_size', default=256,
help='Per host batch size for training.')
flags.DEFINE_integer(
'beam_size', default=4,
help='Beam size for inference.')
flags.DEFINE_integer(
'eval_frequency', default=1000,
help='Frequency of eval during training, e.g. every 1000 steps.')
flags.DEFINE_integer(
'num_train_steps', default=500000,
help='Number of train steps.')
flags.DEFINE_integer(
'num_eval_steps', default=20,
help='Number of steps to take during evaluation.')
flags.DEFINE_float(
'learning_rate', default=0.0625,
help='Base learning rate.')
flags.DEFINE_integer(
'warmup_steps', default=1000,
help='Linear learning rate warmup.')
flags.DEFINE_float(
'label_smoothing', default=0.1,
help='Cross entropy loss label smoothing.')
flags.DEFINE_float(
'weight_decay', default=0.0,
help='Decay factor for AdamW style weight decay.')
flags.DEFINE_integer(
'max_target_length', default=256,
help='Maximum length cutoff for training examples.')
flags.DEFINE_integer(
'max_eval_target_length', default=256,
help='Maximum length cutoff for eval examples.')
flags.DEFINE_integer(
'max_predict_length', default=256,
help='Maximum length cutoff for predicted tokens.')
flags.DEFINE_bool(
'share_embeddings', default=True,
help='Inputs and targets share embedding.')
flags.DEFINE_bool(
'logits_via_embedding', default=True,
help='Final logit transform uses embedding matrix transpose.')
flags.DEFINE_integer(
'num_layers', default=6,
help='Number of transformer layers.')
flags.DEFINE_integer(
'qkv_dim', default=1024,
help='Size of query/key/value for attention.')
flags.DEFINE_integer(
'emb_dim', default=1024,
help='Size of embeddings.')
flags.DEFINE_integer(
'mlp_dim', default=4096,
help='Size of the MLP.')
flags.DEFINE_integer(
'num_heads', default=16,
help='Number of attention heads.')
flags.DEFINE_float(
'dropout_rate', default=0.1,
help='Dropout rate.')
flags.DEFINE_float(
'attention_dropout_rate', default=0.1,
help='Attention dropout rate.')
flags.DEFINE_integer(
'random_seed', default=0,
help='Integer for PRNG random seed.')
flags.DEFINE_bool(
'save_checkpoints', default=True,
help='Whether to save model checkpoints.')
flags.DEFINE_bool(
'restore_checkpoints', default=True,
help='Whether to restore from existing model checkpoints.')
flags.DEFINE_integer(
'checkpoint_freq', default=10000,
help='Save a checkpoint every these number of steps.')
flags.DEFINE_bool(
'use_bfloat16', default=True,
help=('Use bfloat16 mixed precision training instead of float32.'))
flags.DEFINE_string(
'jax_backend_target', default=None,
help=('TPU grpc target for use with cloud TPUs.'
' e.g. grpc://192.168.0.2:8470'))
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by '*' that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
@functools.partial(jax.jit, static_argnums=(1, 2, 3))
def create_model(key, input_shape, target_shape, model_kwargs):
"""Instantiate transformer model and associated autoregressive cache def."""
model_def = models.Transformer.partial(**model_kwargs)
with nn.attention.Cache().mutate() as cache_def:
_, initial_params = model_def.init_by_shape(
key, [(input_shape, jnp.float32), (target_shape, jnp.float32)],
cache=cache_def)
model = nn.Model(model_def, initial_params)
return model, cache_def
def create_optimizer(model, learning_rate, weight_decay):
optimizer_def = optim.Adam(
learning_rate,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_weighted_cross_entropy(logits,
targets,
weights=None,
label_smoothing=0.0):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length].
label_smoothing: label smoothing constant, used to determine the on and
off values.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) + (vocab_size - 1) *
low_confidence * jnp.log(low_confidence + 1e-20))
soft_targets = common_utils.onehot(
targets, vocab_size, on_value=confidence, off_value=low_confidence)
loss = -jnp.sum(soft_targets * nn.log_softmax(logits), axis=-1)
loss = loss - normalizing_constant
normalizing_factor = jnp.prod(targets.shape)
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = jnp.prod(logits.shape[:-1])
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_metrics(logits, labels, weights, label_smoothing=0.0):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights,
label_smoothing)
acc, _ = compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, axis_name='batch')
return metrics
def train_step(optimizer,
batch,
learning_rate_fn,
label_smoothing=0.0,
use_bfloat16=False,
dropout_rng=None):
"""Perform a single training step."""
# X_position and X_segmentation are needed only when using 'packed examples'
# where multiple sequences are packed into the same example with this
# metadata.
# if such features are not present they are ignored and the example is treated
# like a normal, unpacked sequence example.
train_keys = ['inputs', 'targets',
'inputs_position', 'targets_position',
'inputs_segmentation', 'targets_segmentation']
(inputs, targets,
inputs_positions, targets_positions,
inputs_segmentation, targets_segmentation) = [
batch.get(k, None) for k in train_keys]
weights = jnp.where(targets > 0, 1, 0).astype(jnp.float32)
# We handle PRNG splitting inside the top pmap to improve efficiency.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""loss function used for training."""
with nn.stochastic(dropout_rng):
logits = model(
inputs,
targets,
use_bfloat16=use_bfloat16,
inputs_positions=inputs_positions,
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentation,
targets_segmentation=targets_segmentation,
train=True,
cache=None)
loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights,
label_smoothing)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(model, batch, label_smoothing=0.0, use_bfloat16=False):
"""Calculate evaluation metrics on a batch."""
inputs, targets = batch['inputs'], batch['targets']
weights = jnp.where(targets > 0, 1.0, 0.0)
logits = model(inputs, targets, use_bfloat16=use_bfloat16, train=False,
cache=None)
return compute_metrics(logits, targets, weights, label_smoothing)
def predict_step(inputs, model, cache, eos_id, max_decode_len,
use_bfloat16=False, beam_size=4):
"""Predict translation with fast decoding beam search on a batch."""
batch_size = inputs.shape[0]
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item's data is expanded in-place
# rather than tiled.
# i.e. if we denote each batch element subtensor as el[n]:
# [el0, el1, el2] --> beamsize=2 --> [el0,el0,el1,el1,el2,el2]
src_padding_mask = decode.flat_batch_beam_expand(
(inputs > 0)[..., None], beam_size)
tgt_padding_mask = decode.flat_batch_beam_expand(
jnp.ones((batch_size, 1, 1)), beam_size)
encoded_inputs = decode.flat_batch_beam_expand(
model.encode(inputs, use_bfloat16=use_bfloat16,
train=False, cache=None), beam_size)
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
with flat_cache.mutate() as new_flat_cache:
flat_logits = model.decode(encoded_inputs,
src_padding_mask,
flat_ids,
cache=new_flat_cache,
shift=False,
train=False,
use_bfloat16=use_bfloat16,
tgt_padding_mask=tgt_padding_mask)
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
eos_id=eos_id,
max_decode_len=max_decode_len)
# Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension
# sorted in increasing order of log-probability.
# Return the highest scoring beam sequence, drop first dummy 0 token.
return beam_seqs[:, -1, 1:]
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
return np.concatenate([x, np.tile(x[-1], (batch_pad, 1))], axis=0)
def per_host_sum_pmap(in_tree):
"""Execute psum on in_tree's leaves over one device per host."""
host2devices = collections.defaultdict(list)
for d in jax.devices():
host2devices[d.host_id].append(d)
devices = [host2devices[k][0] for k in host2devices]
host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices)
def pre_pmap(xs):
return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)
def post_pmap(xs):
return jax.tree_map(lambda x: x[0], xs)
return post_pmap(host_psum(pre_pmap(in_tree)))
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.jax_backend_target:
jax.config.FLAGS.jax_xla_backend = 'tpu_driver'
jax.config.FLAGS.jax_backend_target = FLAGS.jax_backend_target
# This seems to be necessary even when importing TF2?
tf.enable_v2_behavior()
# Number of local devices for this host.
n_devices = jax.local_device_count()
if jax.host_id() == 0:
tf.io.gfile.makedirs(FLAGS.model_dir)
train_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'train'))
eval_summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'eval'))
if FLAGS.batch_size % n_devices:
raise ValueError('Batch size must be divisible by the number of devices')
vocab_path = FLAGS.vocab_path
if vocab_path is None:
vocab_path = os.path.join(FLAGS.model_dir, 'sentencepiece_model')
tf.io.gfile.makedirs(os.path.split(vocab_path)[0])
# Load Dataset
logging.info('Initializing dataset.')
train_ds, eval_ds, predict_ds, encoder = input_pipeline.get_wmt_datasets(
n_devices=n_devices,
dataset_name=FLAGS.dataset_name,
eval_dataset_name=FLAGS.eval_dataset_name,
shard_idx=jax.host_id(),
shard_count=jax.host_count(),
data_dir=FLAGS.data_dir,
vocab_path=vocab_path,
target_vocab_size=FLAGS.vocab_size,
batch_size=FLAGS.batch_size,
max_length=FLAGS.max_target_length,
max_eval_length=FLAGS.max_eval_target_length)
train_iter = iter(train_ds)
vocab_size = int(encoder.vocab_size())
eos_id = decode.EOS_ID # Default Sentencepiece EOS token.
def decode_tokens(toks):
valid_toks = toks[:np.argmax(toks == eos_id) + 1].astype(np.int32)
return encoder.detokenize(valid_toks).numpy().decode('utf-8')
logging.info('Initializing model, optimizer, and step functions.')
# Build Model and Optimizer
transformer_kwargs = {
'vocab_size': vocab_size,
'output_vocab_size': vocab_size,
'emb_dim': FLAGS.emb_dim,
'num_heads': FLAGS.num_heads,
'num_layers': FLAGS.num_layers,
'qkv_dim': FLAGS.qkv_dim,
'mlp_dim': FLAGS.mlp_dim,
'max_len': max(FLAGS.max_target_length, FLAGS.max_eval_target_length),
'share_embeddings': FLAGS.share_embeddings,
'logits_via_embedding': FLAGS.logits_via_embedding,
}
start_step = 0
rng = random.PRNGKey(FLAGS.random_seed)
rng, init_rng = random.split(rng)
input_shape = (FLAGS.batch_size, FLAGS.max_target_length)
target_shape = (FLAGS.batch_size, FLAGS.max_target_length)
model, cache_def = create_model(init_rng,
input_shape,
target_shape,
transformer_kwargs)
optimizer = create_optimizer(model,
FLAGS.learning_rate,
FLAGS.weight_decay)
# We access model only from optimizer below via optimizer.target.
del model
if FLAGS.restore_checkpoints:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=FLAGS.learning_rate,
warmup_steps=FLAGS.warmup_steps)
p_train_step = jax.pmap(
functools.partial(
train_step,
learning_rate_fn=learning_rate_fn,
label_smoothing=FLAGS.label_smoothing,
use_bfloat16=FLAGS.use_bfloat16),
axis_name='batch')
p_eval_step = jax.pmap(
functools.partial(
eval_step,
label_smoothing=FLAGS.label_smoothing,
use_bfloat16=FLAGS.use_bfloat16),
axis_name='batch')
p_pred_step = jax.pmap(
functools.partial(predict_step, use_bfloat16=FLAGS.use_bfloat16,
beam_size=FLAGS.beam_size),
axis_name='batch',
static_broadcasted_argnums=(3, 4)) # eos token, max_length are constant
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, n_devices)
logging.info('Starting training loop.')
metrics_all = []
t_loop_start = time.time()
for step, batch in zip(range(start_step, FLAGS.num_train_steps), train_iter):
# Shard data to devices and do a training step.
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
optimizer, metrics, dropout_rngs = p_train_step(
optimizer, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
# Save a checkpoint on one host after every checkpoint_freq steps.
if (FLAGS.save_checkpoints and step % FLAGS.checkpoint_freq == 0 and
step > 0 and jax.host_id() == 0):
checkpoints.save_checkpoint(FLAGS.model_dir,
jax_utils.unreplicate(optimizer), step)
# Periodic metric handling.
if step % FLAGS.eval_frequency != 0 and step > 0:
continue
logging.info('Gathering training metrics.')
# Training Metrics
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
steps_per_eval = FLAGS.eval_frequency if step != 0 else 1
steps_per_sec = steps_per_eval / (time.time() - t_loop_start)
t_loop_start = time.time()
if jax.host_id() == 0:
train_summary_writer.scalar('steps per second', steps_per_sec, step)
for key, val in summary.items():
train_summary_writer.scalar(key, val, step)
train_summary_writer.flush()
metrics_all = []
logging.info('train in step: %d, loss: %.4f', step, summary['loss'])
# Eval Metrics
logging.info('Gathering evaluation metrics.')
t_eval_start = time.time()
eval_metrics = []
eval_iter = iter(eval_ds)
for _, eval_batch in zip(range(FLAGS.num_eval_steps), eval_iter):
eval_batch = jax.tree_map(lambda x: x._numpy(), eval_batch) # pylint: disable=protected-access
eval_batch = common_utils.shard(eval_batch)
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
if jax.host_id() == 0:
for key, val in eval_summary.items():
eval_summary_writer.scalar(key, val, step)
eval_summary_writer.flush()
logging.info('eval in step: %d, loss: %.4f', step, eval_summary['loss'])
logging.info('eval time: %.4f s step %d', time.time()-t_eval_start, step)
# Translation and BLEU Score.
logging.info('Translating evaluation dataset.')
t_inference_start = time.time()
predict_iter = iter(predict_ds)
sources, references, predictions = [], [], []
for _, pred_batch in enumerate(predict_iter):
pred_batch = jax.tree_map(lambda x: x._numpy(), pred_batch) # pylint: disable=protected-access
# Handle final odd-sized batch by padding instead of dropping it.
cur_pred_batch_size = pred_batch['inputs'].shape[0]
if cur_pred_batch_size % n_devices:
padded_size = int(
np.ceil(cur_pred_batch_size / n_devices) * n_devices)
pred_batch = jax.tree_map(
lambda x: pad_examples(x, padded_size), pred_batch) # pylint: disable=cell-var-from-loop
pred_batch = common_utils.shard(pred_batch)
per_device_batchsize = pred_batch['inputs'].shape[1]
cache_dtype = jnp.bfloat16 if FLAGS.use_bfloat16 else jnp.float32
cache = jax_utils.replicate(
cache_def.initialize_cache((per_device_batchsize,
FLAGS.max_predict_length),
dtype=cache_dtype))
predicted = p_pred_step(pred_batch['inputs'],
optimizer.target,
cache,
eos_id,
FLAGS.max_predict_length)
predicted = tohost(predicted)
inputs = tohost(pred_batch['inputs'])
targets = tohost(pred_batch['targets'])
# Iterate through non-padding examples of batch.
for i, s in enumerate(predicted[:cur_pred_batch_size]):
sources.append(decode_tokens(inputs[i]))
references.append(decode_tokens(targets[i]))
predictions.append(decode_tokens(s))
logging.info('Translation: %d predictions %d references %d sources.',
len(predictions), len(references), len(sources))
logging.info('Translation time: %.4f s step %d.',
time.time() - t_inference_start, step)
# Calculate BLEU score for translated eval corpus against reference.
bleu_matches = bleu.bleu_partial(references, predictions)
all_bleu_matches = per_host_sum_pmap(bleu_matches)
bleu_score = bleu.complete_bleu(*all_bleu_matches)
# Save translation samples for tensorboard.
exemplars = ''
for n in np.random.choice(np.arange(len(predictions)), 8):
exemplars += f'{sources[n]}\n\n{references[n]}\n\n{predictions[n]}\n\n'
if jax.host_id() == 0:
eval_summary_writer.scalar('bleu', bleu_score, step)
eval_summary_writer.text('samples', exemplars, step)
eval_summary_writer.flush()
logging.info('Translation BLEU Score %.4f', bleu_score)
if __name__ == '__main__':
app.run(main)
|
py | b404d40eb8e4f8ecbfa43fe3e29accff8a4ef487 | import pyvista
plotter = pyvista.Plotter()
actor = plotter.add_mesh(pyvista.Sphere())
plotter.store_image = True
plotter.show()
zval = plotter.get_image_depth()
|
py | b404d44cbe3c915a08d2385c39be1359cef3d1da | # Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
# pretrained infos
pretrained_bart_infos = {
"default-bart": {
"config": "https://huggingface.co/facebook/bart-base/resolve/main/config.json",
"vocab": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"merge": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
},
"bart-base": {
"config": "https://huggingface.co/facebook/bart-base/resolve/main/config.json",
"vocab": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"merge": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"weight": "https://huggingface.co/facebook/bart-base/resolve/main/pytorch_model.bin",
},
"bart-large": {
"config": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
"vocab": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"merge": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"weight": "https://huggingface.co/facebook/bart-large/resolve/main/pytorch_model.bin",
},
}
import unitorch.cli.models.bart.modeling
import unitorch.cli.models.bart.processing
|
py | b404d45e17c349164ce428ce9820a9751b8b90fc | import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
import torch.nn as nn
from caffe2.torch.fb.fx2trt.tests.test_utils import AccTestCase, InputTensorSpec
from parameterized import parameterized, param
class TestLayerNormConverter(AccTestCase):
@parameterized.expand(
[
param("1d_normalized_shape", [10], [2, 10]),
param("2d_normalized_shape", [5, 10], [2, 5, 10]),
param("4d_input_shape", [5, 10], [2, 8, 5, 10]),
]
)
def test_layer_norm(self, _, normalized_shape, input_shape):
class LayerNorm(nn.Module):
def __init__(self, normalized_shape):
super().__init__()
self.mod = nn.LayerNorm(normalized_shape, eps=1e-02)
def forward(self, x):
return self.mod(x)
inputs = [torch.randn(input_shape)]
self.run_test(
LayerNorm(normalized_shape),
inputs,
expected_ops={acc_ops.layer_norm},
)
@parameterized.expand(
[
param("1d_normalized_shape", [10], (10,)),
param("2d_normalized_shape", [5, 10], (5, 10)),
param("4d_input_shape", [5, 10], (8, 5, 10)),
]
)
def test_layer_norm_with_dynamic_shape(self, _, normalized_shape, input_shape):
class LayerNorm(nn.Module):
def __init__(self, normalized_shape):
super().__init__()
self.mod = nn.LayerNorm(normalized_shape, eps=1e-02)
def forward(self, x):
return self.mod(x)
input_specs = [
InputTensorSpec(
shape=(-1,) + input_shape,
dtype=torch.float32,
shape_ranges=[
((1,) + input_shape, (4,) + input_shape, (10,) + input_shape)
],
),
]
self.run_test_with_dynamic_shape(
LayerNorm(normalized_shape), input_specs, expected_ops={acc_ops.layer_norm}
)
|
py | b404d4dd8b10ed0ca83bef56eb9ec13dbe48a0d8 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from os.path import abspath, dirname, join
from preggy import expect
from shutil import which
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import RequestParameters, ServerParameters
from thumbor.engines.gif import Engine
from thumbor.importer import Importer
STORAGE_PATH = abspath(join(dirname(__file__), "../fixtures/images/"))
class GitEngineTestCase(TestCase):
def get_config(self):
return Config(
SECURITY_KEY="ACME-SEC",
ENGINE="thumbor.engines.gif",
IMAGE_METADATA_READ_FORMATS="exif,xmp",
LOADER="thumbor.loaders.file_loader",
FILE_LOADER_ROOT_PATH=STORAGE_PATH,
STORAGE="thumbor.storages.no_storage",
USE_GIFSICLE_ENGINE=True,
RESPECT_ORIENTATION=True,
)
def get_importer(self):
return Importer(self.config)
def get_server(self):
server = ServerParameters(8889, "localhost", "thumbor.conf", None, "info", None)
server.security_key = "ACME-SEC"
server.gifsicle_path = which("gifsicle")
return server
def get_context(self):
context = super(GitEngineTestCase, self).get_context()
req = RequestParameters(url="/foo/bar.gif")
context.request = req
return context
def test_create_engine(self):
engine = Engine(self.context)
expect(engine).to_be_instance_of(Engine)
def test_load_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
image = engine.create_image(buffer)
expect(image.format).to_equal("GIF")
def test_errors_on_gifsicle_should_not_raises_errors_when_output(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "SmallFullColourGIF.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
result = engine.run_gifsicle("--some-invalid-opt")
expect(result).Not.to_be_null()
def test_is_multiple_should_returns_true_if_gif_has_many_frames(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
expect(engine.is_multiple()).to_be_true()
def test_is_multiple_should_returns_false_if_gif_has_one_frame(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated-one-frame.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
expect(engine.is_multiple()).to_be_false()
def test_convert_to_grayscale_should_update_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
buffer = engine.read()
engine.convert_to_grayscale()
expect(buffer).not_to_equal(engine.read())
def test_convert_to_grayscale_should_not_update_image(self):
engine = Engine(self.context)
with open(join(STORAGE_PATH, "animated.gif"), "rb") as image_file:
buffer = image_file.read()
engine.load(buffer, ".gif")
buffer = engine.read()
engine.convert_to_grayscale(False)
expect(buffer).to_equal(engine.read())
|
py | b404d651d864ecfb7ba8e8448f228cc80e183130 | import sqlite3
import pytest
from sonoser.db import get_db
def test_get_close_db(app):
with app.app_context():
db = get_db()
assert db is get_db()
with pytest.raises(sqlite3.ProgrammingError) as e:
db.execute('SELECT 1')
assert 'closed' in str(e.value)
def test_init_db_command(runner, monkeypatch):
class Recorder(object):
called = False
def fake_init_db():
Recorder.called = True
monkeypatch.setattr('sonoser.db.init_db', fake_init_db)
result = runner.invoke(args=['init-db'])
assert 'Initialized' in result.output
assert Recorder.called
|
py | b404d696aff1de25253055404181b8e30cf60b43 | #
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: Jun-27-2017
# |
py | b404d733b36a1438f20b6256346cf82a74f0f4ce | # -*- coding: utf-8 -*-
import json
from collections import defaultdict
from datetime import datetime
import sqlalchemy
from dnsdb.constant.constant import NORMAL_TO_CNAME
from .models import DnsSerial
from .models import ViewDomainNameState
from .models import ViewIspStatus
from .models import ViewIsps
from .models import ViewMigrateDetail
from .models import ViewMigrateHistory
from .models import ViewRecords
from ..dal import commit_on_success, db
from ..library.exception import DnsdbException, BadParam
from ..library.log import setup, getLogger
setup("dnsdb")
log = getLogger(__name__)
A_RECORDS = ('A', 'AAAA')
class MigrateDal(object):
@staticmethod
@commit_on_success
def increase_serial_num(zone_list):
log.info('increase_serial_num: %s', zone_list)
for zone_name in zone_list:
serials = DnsSerial.query.filter_by(zone_name=zone_name).all()
if len(serials) != 1:
raise BadParam('Zone serial should be unique: %s' % zone_name, msg_ch=u'zone serial记录不存在或者不唯一')
serial = serials[0]
serial.serial_num += 1
@staticmethod
def get_isp_trans():
return {record.name_in_english: record.name_in_chinese
for record in ViewIsps.query}
@staticmethod
def get_all_abnormal_isps(key=None, value=None):
if key is None:
return ViewIspStatus.query.filter_by(closed=False).order_by(ViewIspStatus.id.desc()).all()
result_dict = {}
for status in ViewIspStatus.query.filter_by(closed=False):
k_v = getattr(status, key)
v_v = getattr(status, value)
result_dict.setdefault(k_v, set())
result_dict[k_v].add(v_v)
return result_dict
@staticmethod
def get_view_domain_rooms():
domain_rooms = {}
query = ViewRecords.query.filter(ViewRecords.record_type.in_(A_RECORDS))
for result in query.all():
domain_rooms.setdefault(result.domain_name, set())
domain_rooms[result.domain_name].add(result.property)
return domain_rooms
@staticmethod
def get_history_info(history_id):
return ViewMigrateHistory.query.get(history_id)
@staticmethod
def get_migrated_history():
return ViewMigrateHistory.query.filter_by(state='migrated').all()
@staticmethod
def get_last_few_history(limit):
return ViewMigrateHistory.query.order_by(ViewMigrateHistory.id.desc()).limit(limit).all()
@staticmethod
def add_migrate_history(migrate_rooms, migrate_isps, dst_rooms, state, cur, length, rtx_id):
with db.session.begin(subtransactions=True):
history = ViewMigrateHistory(migrate_rooms, migrate_isps, dst_rooms, state, cur, length,
rtx_id)
db.session.add(history)
return history.id
@staticmethod
def delete_history_by_id(history_id):
session = db.session
with session.begin(subtransactions=True):
history = session.query(ViewMigrateHistory).get(history_id)
if history is None:
log.error('delete_history_by_id error: no ViewMigrateHistory to Delete: %s' % history_id)
return False, 'No ViewMigrateHistory to Delete: %s' % history_id
session.delete(history)
return True, ''
@staticmethod
@commit_on_success
def update_history_total(history_id, length):
history = ViewMigrateHistory.query.filter(
ViewMigrateHistory.id == history_id).with_for_update(of=ViewMigrateHistory).first()
history.all += length
@staticmethod
@commit_on_success
def update_history_count(history_id, count):
history = ViewMigrateHistory.query.filter(
ViewMigrateHistory.id == history_id).with_for_update(of=ViewMigrateHistory).first()
history.cur += count
if history.all == history.cur:
history.state = 'migrated'
history.update_at = datetime.now()
@staticmethod
@commit_on_success
def update_history_by_id(history_id, **kwargs):
count = ViewMigrateHistory.query.filter_by(id=history_id).update(kwargs)
return count
@staticmethod
@commit_on_success
def add_or_update_abnormal_isp(user, isp, room):
count = ViewIspStatus.query.filter_by(isp=isp, room=room, closed=False).update({
'is_health': False
})
if count:
return True, 'update'
db.session.add(ViewIspStatus(isp=isp, room=room, update_user=user))
return True, 'add'
@staticmethod
@commit_on_success
def add_batch_abnormal_isp(user, isp_rooms_dict):
for isp, rooms in isp_rooms_dict.items():
for room in rooms:
MigrateDal.add_or_update_abnormal_isp(user, isp, room)
@staticmethod
def list_migrate_domain_by_isp(to_migrate_dict, dst_rooms):
isp_migrate_domains = {}
# 得到所有域名与对应机房的结果
domain_rooms = MigrateDal.get_view_domain_rooms()
# 符合迁移条件的域名与机房的结果
for isp, migrate_rooms in to_migrate_dict.items():
migrate_domains_list = []
rules = set()
# 只迁移A记录
q = (ViewDomainNameState.query.
filter_by(isp=isp, state='A').
order_by(ViewDomainNameState.domain_name))
for result in q:
origin_enabled_rooms = set(json.loads(result.enabled_rooms))
actual_migrate_rooms = origin_enabled_rooms & migrate_rooms
if not actual_migrate_rooms:
continue
# 根据迁移规则得到迁移机房列表
domain = result.domain_name
after_rooms = [dst_room for dst_room in dst_rooms
if dst_room in domain_rooms[domain] and dst_room not in migrate_rooms]
migrate_domains_list.append({
'domain_name': domain,
'cur': ','.join(sorted(actual_migrate_rooms)),
'after': ','.join(sorted(after_rooms)),
'after_enabled_rooms': after_rooms,
'eisps': isp
})
isp_migrate_domains[isp] = migrate_domains_list
return isp_migrate_domains
@staticmethod
def list_migrate_domain(src_rooms, dst_rooms, to_migrate_isps):
migrated_isp_rooms = MigrateDal.get_all_abnormal_isps(key='isp', value='room')
to_migrate_dict = {isp: set(src_rooms) | migrated_isp_rooms.get(isp, set())
for isp in to_migrate_isps}
isp_migrate_domains = MigrateDal.list_migrate_domain_by_isp(to_migrate_dict, dst_rooms)
domains_cur_after_isps = {}
for isp, domains in isp_migrate_domains.items():
for domain_info in domains:
if not domain_info['cur']:
continue
domain = domain_info['domain_name']
domains_cur_after_isps.setdefault(domain, defaultdict(list))
domains_cur_after_isps[domain][(domain_info['cur'], domain_info['after'])].append(domain_info['eisps'])
domain_isps = []
trans = MigrateDal.get_isp_trans()
for domain, rule_isps in domains_cur_after_isps.items():
for (cur, after), isps in rule_isps.items():
domain_isps.append({
'domain_name': domain,
'cur': cur,
'after': after,
'isps': sorted([trans[eisp] for eisp in isps if eisp])
})
return sorted(domain_isps, key=lambda x: x['domain_name'])
@staticmethod
def create_migrage_history(username):
history_id = MigrateDal.add_migrate_history('', '', '', 'migrating', 1, 1, username)
try:
ing_count = ViewMigrateHistory.query.filter(
ViewMigrateHistory.state.in_(ViewMigrateHistory.check_states)).count()
if ing_count > 1:
raise DnsdbException('migrate running', msg_ch=u'有正在进行的迁移,请稍后重试')
except Exception:
# 删掉占位的历史记录
MigrateDal.delete_history_by_id(history_id)
raise
return history_id
@staticmethod
@commit_on_success
def migrate_domains(domain_infos, history_id):
# tb_view_domain_name_state 主键 (domain_name, isp)
for domain_info in domain_infos:
enabled_rooms = domain_info['after_enabled_rooms']
cur_state = (ViewDomainNameState.query.
filter_by(domain_name=domain_info['domain_name'],
isp=domain_info['eisps'])).first()
# 记录历史状态信息
migrate_state = ViewMigrateDetail(migrate_id=history_id, domain_name=cur_state.domain_name,
before_enabled_server_rooms=cur_state.enabled_rooms,
after_enabled_server_rooms=json.dumps(enabled_rooms),
isp=cur_state.isp, before_state=cur_state.state,
after_state='A')
db.session.add(migrate_state)
cur_state.enabled_rooms = json.dumps(enabled_rooms)
cur_state.state = 'A'
MigrateDal.update_history_count(history_id, len(domain_infos))
@staticmethod
@commit_on_success
def onekey_recover_rooms():
with db.session.begin(subtransactions=True):
(ViewIspStatus.query.filter_by(closed=False).
update({"closed": True,
"is_health": True}))
ViewMigrateHistory.query.filter_by(state='migrated').update({'state': 'recovered'})
q = ViewDomainNameState.query.filter(sqlalchemy.or_(
ViewDomainNameState.origin_enabled_rooms != ViewDomainNameState.enabled_rooms,
ViewDomainNameState.origin_state != ViewDomainNameState.state
))
if q.count() == 0:
raise BadParam('no domain to recover', msg_ch=u'没有可恢复的域名')
for item in q:
item.enabled_rooms = item.origin_enabled_rooms
item.state = item.origin_state
MigrateDal.increase_serial_num(NORMAL_TO_CNAME.values())
@staticmethod
def get_view_migrate_detail(migrate_id, domain, page, page_size):
detail_list = []
migrate_rules = ViewMigrateHistory.query.filter(
ViewMigrateHistory.id == migrate_id).first().dst_rooms
query = ViewMigrateDetail.query.filter(ViewMigrateDetail.migrate_id == migrate_id)
if domain != '':
query = query.filter(ViewMigrateDetail.domain_name.like('%{}%'.format(domain)))
total = query.count()
details = query.order_by(ViewMigrateDetail.domain_name).offset(page_size * (page - 1)).limit(page_size).all()
trans = MigrateDal.get_isp_trans()
for detail in details:
detail_list.append({
'domain_name': detail.domain_name,
'isp': trans[detail.isp],
'before_enabled_server_rooms': ','.join(sorted(json.loads(detail.before_enabled_server_rooms))),
'after_enabled_server_rooms': ','.join(sorted(json.loads(detail.after_enabled_server_rooms)))
})
return {'total': total, 'migrate_rules': ','.join(sorted(json.loads(migrate_rules))), 'detail': detail_list}
|
py | b404d76ffd86275ab57f111331bffc6fc70d6864 | from django.apps import AppConfig
class CardapioConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'cardapio'
|
py | b404d95b1481eabf0c1d28ef7a2df9a424116bb1 | import numpy
class module_template:
def __init__(self):
pass
def module_processor(self,
in_data: numpy.ndarray,
frame_count: int,
time_info: dict,
flag: int) -> (numpy.ndarray, int):
return in_data
|
py | b404da779452215e7957c7aa504474dc6fd09eaa | from functools import wraps, partial, update_wrapper
from flask import current_app, request
from jsonschema.validators import validator_for
class JsonValidationError(Exception):
def __init__(self, message, errors):
self.message = message
self.errors = errors
class JsonSchema(object):
def __init__(self, app=None):
self.app = app
self.config = {}
self.validator_cls = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
self.config = app.config.copy()
self.config.setdefault('JSON_SCHEMA_METHODS', ['POST', 'PUT', 'PATCH'])
self.config.setdefault('JSON_SCHEMA_FORMAT_CHECKER', None)
def validate(self, schema, methods=None, format_checker=None):
def wrapper(fn):
@wraps(fn)
def decorated(methods=None, format_checker=None, *args, **kwargs):
validator_kwargs = {
'schema': schema,
'format_checker': format_checker if format_checker else self.config.get('JSON_SCHEMA_FORMAT_CHECKER')
}
# use default methods if not supplied as arguments to decorator
if not methods:
methods = self.config.get('JSON_SCHEMA_METHODS')
# check jsonschema
if request.method in methods:
validator_cls = self.validator_cls if self.validator_cls else validator_for(schema)
validator = validator_cls(**validator_kwargs)
errors = list(validator.iter_errors(request.get_json()))
if errors:
raise JsonValidationError('Error validating against schema', errors)
return fn(*args, **kwargs)
# the wrapper() func ctx has access to format_checker & methods, but the decorator
# won't, so we use partial, where those args are passed in
pfunc = partial(decorated, format_checker=format_checker, methods=methods)
# this is needed because partial() doesn't add in __name__ attribute to the created
# partial function, which Flask requires
update_wrapper(pfunc, decorated)
return pfunc
return wrapper
|
py | b404db16bec750bd2320849bf3341c9f057640b0 | from .apps import Apps
from .dbs import Dbs
from .servers import Servers
from .sysusers import Sysusers
|
py | b404db48027f898543617260b70ec3b75925da0d | from django.shortcuts import render, redirect
from django.views.generic import View
import os
# Create your views here.
class SetupView(View):
def get(self, request):
context = {}
return render(request, 'setup/index.html', context)
class SetupProceedView(View):
def get(self, request):
os.system("python manage.py migrate")
#os.system("python manage.py collectstatic")
return redirect('/sys_console/init_setup/')
|
py | b404db9394afec7677247fb0e1b9290302380d38 | def test_two_runs_no_clean():
"""
>>> report_fixture = getfixture('allure_report_with_params')
>>> allure_report_first_run = report_fixture(cache=False)
>>> allure_report_second_run = report_fixture(cache=False)
>>> assert_that(allure_report_second_run,
... has_only_n_test_cases('test_two_runs_no_clean', 2)
... )
"""
assert True
|
py | b404dc5a5d286d7222b3b34e6a827a840d49e295 | # Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from textwrap import fill
class ApiException(Exception):
pass
class InvalidApiResource(ApiException):
def __init__(self, name):
self.error_str = "Invalid API Resource: %s" % name
super(InvalidApiResource, self).__init__()
def __str__(self):
return self.error_str
class UnsupportedFormat(ApiException):
pass
class TooManyMatches(ApiException):
"""
Too many matches returned during a fuzzy-id lookup.
"""
def __init__(self, msg=None):
self.msg = msg
super(TooManyMatches, self).__init__()
def __str__(self):
if not self.msg:
return "The query matched more than one record."
else:
return self.msg
class InvalidVolumeNode(ApiException):
def __init__(self, input):
self.input = input
super(InvalidVolumeNode, self).__init__()
def __str__(self):
return "Invalid VolumeNode spec: %s (malformed or bad path?)" % self.input
class BadUserInput(ApiException):
"""
Generic exception for bad user input detected post-argparse.
"""
pass
class BadRequest(ApiException):
"""
Represents a failed TastyPie validation or other 400-level error.
"""
def __init__(self, value):
self.error_dict = value
super(BadRequest, self).__init__()
def __str__(self):
lines = ["The server rejected the request with the following error(s):"]
try:
for field, errors in self.error_dict.items():
try:
lines.extend([" %s: %s" % (field, ", ".join(errors.values()[0]))])
except (AttributeError, IndexError):
if isinstance(errors, basestring):
errors = [errors]
for error in errors:
if error:
lines.extend([" %s: %s" % (field, error)])
except AttributeError:
# Sometimes what comes back is just a string.
lines.append(self.error_dict)
return "\n".join(lines)
class InternalError(ApiException):
"""
HTTP 500
"""
def __init__(self, backtrace):
self.backtrace = backtrace
super(InternalError, self).__init__()
def __str__(self):
return self.backtrace
class NotFound(ApiException):
"""
HTTP 404
"""
pass
class UnauthorizedRequest(ApiException):
"""
HTTP 401
"""
pass
class AuthenticationFailure(ApiException):
"""
HTTP 401 after trying to authenticate.
"""
def __str__(self):
return "Authentication failed. Check username/password."
class ApiConnectionError(ApiException):
def __init__(self, api_url):
self.api_url = api_url
super(ApiConnectionError, self).__init__()
def __str__(self):
return "Failed to connect to %s (is --api_url correct?)" % self.api_url
class InvalidStateChange(ApiException):
def __init__(self, requested_state, available_states):
self.requested_state = requested_state
self.available_states = available_states
def __str__(self):
return "The requested state (%s) is not one of the available states: %s" % (
self.requested_state,
", ".join(self.available_states),
)
class InvalidJobError(ApiException):
def __init__(self, requested_job, available_jobs):
self.requested_job = requested_job
self.available_jobs = available_jobs
def __str__(self):
return "The requested job (%s) is not one of the available jobs: %s" % (
self.requested_job,
", ".join(self.available_jobs),
)
class AbnormalCommandCompletion(Exception):
def __init__(self, command, status):
self.status = status
self.command = command
super(AbnormalCommandCompletion, self).__init__()
def __str__(self):
return "Command completed with abnormal status: %s (%s)" % (self.status, self.command["message"])
class UserConfirmationRequired(ApiException):
skip_argument = "force"
def __str__(self):
return "Confirmation required."
class JobConfirmationRequired(UserConfirmationRequired):
def __init__(self, verb, subject, confirmation):
self.verb = verb
self.subject = subject
self.confirmation = confirmation
def __str__(self):
return "Running %s on %s requires confirmation of the following:\n%s" % (
self.verb,
self.subject,
fill(self.confirmation, initial_indent=" ", subsequent_indent=" "),
)
class StateChangeConfirmationRequired(UserConfirmationRequired):
def __init__(self, report):
self.report = report
@property
def consequences(self):
return sorted([j["description"] for j in self.report["dependency_jobs"] + [self.report["transition_job"]]])
def __str__(self):
return """
This action (%s) has the following consequences:
%s
""" % (
self.report["transition_job"]["description"],
"\n".join([" * %s" % c for c in self.consequences]),
)
class ReformatVolumesConfirmationRequired(UserConfirmationRequired):
skip_argument = "reformat"
def __init__(self, volumes):
self.volumes = volumes
def __str__(self):
return """
One or more of the selected volumes already contains a filesystem, but may
not actually be in use. Please check the following list of volumes and
verify that they are suitable for use as Lustre targets:
%s
""" % "\n".join(
[" %s" % v for v in self.volumes]
)
|
py | b404dd3c523b84869a58a5ff2d9cdd2a4a8f5b0a | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'Speech Emotion Recognition'
copyright = '2019, Hemanth Kumar Veeranki'
author = 'Hemanth Kumar Veeranki'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
autosummary_generate = True
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'm2r',
]
# Napoleon related configs.
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'main'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SpeechEmotionRecognitiondoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SpeechEmotionRecognition.tex', 'Speech Emotion Recognition Documentation',
'Hemanth Kumar Veeranki', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'speechemotionrecognition', 'Speech Emotion Recognition Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SpeechEmotionRecognition', 'Speech Emotion Recognition Documentation',
author, 'SpeechEmotionRecognition', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
py | b404dd5c9eb271d008e15e3f2be350f1280061e2 | import click
import pytest
from catacomb import entry_points
from catacomb.common import constants
from click.testing import CliRunner
# Fixtures.
@pytest.fixture(scope="module", params=[["-h"], ["--help"]])
def help_flag(request):
return request.param
class TestTombHelp(object):
"""Testing various scenarios that should show the help view.
"""
def test_tomb_help_view(self, help_flag):
result = CliRunner().invoke(entry_points.tomb_entry, help_flag)
# No failure.
assert result.exit_code == 0
# Correct help text.
assert "Usage:" in result.output
assert "Options:" in result.output
assert "Commands:" in result.output
def test_add_help_view(self, help_flag):
result = CliRunner().invoke(
entry_points.tomb_entry, ["add", help_flag[0]])
assert result.exit_code == 0
# Correct description present.
assert constants.CMD_ADD_DESC in result.output
assert "Usage:" in result.output
assert "Options:" in result.output
# There aren't any subcommands for this command.
assert "Commands:" not in result.output
def test_clean_help_view(self, help_flag):
result = CliRunner().invoke(
entry_points.tomb_entry, ["clean", help_flag[0]])
assert result.exit_code == 0
# Correct description present.
assert constants.CMD_CLEAN_DESC in result.output
assert "Usage:" in result.output
assert "Options:" in result.output
# There aren't any subcommands for this command.
assert "Commands:" not in result.output
def test_list_help_view(self, help_flag):
result = CliRunner().invoke(
entry_points.tomb_entry, ["list", help_flag[0]])
assert result.exit_code == 0
# Correct description present.
assert constants.CMD_LIST_DESC in result.output
assert "Usage:" in result.output
assert "Options:" in result.output
# There aren't any subcommands for this command.
assert "Commands:" not in result.output
def test_rm_help_view(self, help_flag):
result = CliRunner().invoke(
entry_points.tomb_entry, ["rm", help_flag[0]])
assert result.exit_code == 0
# Correct description present.
assert constants.CMD_RM_DESC in result.output
assert "Usage:" in result.output
assert "Options:" in result.output
# There aren't any subcommands for this command.
assert "Commands:" not in result.output
def test_status_help_view(self, help_flag):
result = CliRunner().invoke(
entry_points.tomb_entry, ["status", help_flag[0]])
assert result.exit_code == 0
# Correct description present.
assert constants.CMD_STATUS_DESC in result.output
assert "Usage:" in result.output
assert "Options:" in result.output
# There aren't any subcommands for this command.
assert "Commands:" not in result.output
def test_use_help_view(self, help_flag):
result = CliRunner().invoke(
entry_points.tomb_entry, ["use", help_flag[0]])
assert result.exit_code == 0
# Correct description present.
assert constants.CMD_USE_DESC in result.output
assert "Usage:" in result.output
assert "Options:" in result.output
# There aren't any subcommands for this command.
assert "Commands:" not in result.output
|
py | b404dda51bd16378e90014756cbc77c1babcd74b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.param.base_param import BaseParam
class PearsonParam(BaseParam):
def __init__(self, column_names=None, column_indexes=None, cross_parties=True, need_run=True, use_mix_rand=False):
super().__init__()
self.column_names = column_names
self.column_indexes = column_indexes
self.cross_parties = cross_parties
self.need_run = need_run
self.use_mix_rand = use_mix_rand
if column_names is None:
self.column_names = []
if column_indexes is None:
self.column_indexes = []
def check(self):
if not isinstance(self.use_mix_rand, bool):
raise ValueError(f"use_mix_rand accept bool type only, {type(self.use_mix_rand)} got")
if self.cross_parties and (not self.need_run):
raise ValueError(f"need_run should be True(which is default) when cross_parties is True.")
if not isinstance(self.column_names, list):
raise ValueError(f"type mismatch, column_names with type {type(self.column_names)}")
for name in self.column_names:
if not isinstance(name, str):
raise ValueError(f"type mismatch, column_names with element {name}(type is {type(name)})")
if isinstance(self.column_indexes, list):
for idx in self.column_indexes:
if not isinstance(idx, int):
raise ValueError(f"type mismatch, column_indexes with element {idx}(type is {type(idx)})")
if isinstance(self.column_indexes, int) and self.column_indexes != -1:
raise ValueError(f"column_indexes with type int and value {self.column_indexes}(only -1 allowed)")
if self.need_run:
if isinstance(self.column_indexes, list) and isinstance(self.column_names, list):
if len(self.column_indexes) == 0 and len(self.column_names) == 0:
raise ValueError(f"provide at least one column")
|
py | b404ddd2196524d9438b9a38096a634e33df0f53 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Any, Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn
import poptorch
from optimum.utils import logging
from transformers import AutoConfig, PreTrainedModel
from transformers.modeling_outputs import ModelOutput
from .ipu_configuration import IPUConfig
logger = logging.get_logger(__name__)
_PRETRAINED_TO_PIPELINED_REGISTRY = {}
def register(transformers_cls=None):
def wrapper(cls):
orig_cls = transformers_cls
if orig_cls is None:
found = False
for base_cls in cls.__bases__:
if base_cls != PipelineMixin:
orig_cls = base_cls
found = True
break
if not found:
raise ValueError(f"Was not able to find original transformers class for {cls}")
_PRETRAINED_TO_PIPELINED_REGISTRY[orig_cls] = cls
return cls
return wrapper
def to_pipelined(model: nn.Module, ipu_config: IPUConfig, force: bool = False):
model_cls = model.__class__
pipelined_cls = _PRETRAINED_TO_PIPELINED_REGISTRY.get(model_cls, None)
if pipelined_cls is not None:
return pipelined_cls.from_transformers(model, ipu_config)
else:
if force:
logger.warning(
f"No pipelined version exists for {model_cls.__name__}, creating it dynamically, it might not work as expected."
)
pipelined_cls = type(f"Pipelined{model_cls.__name__}", (model_cls, PipelineMixin), {})
return pipelined_cls.from_model(model)
else:
raise KeyError(f"{model_cls.__name__} pipelined version not found in registry.")
class PipelineMixin:
@classmethod
def from_transformers(cls, model: PreTrainedModel, ipu_config: IPUConfig):
config = copy.deepcopy(model.config)
pipelined_model = cls(config)
pipelined_model.load_state_dict(model.state_dict())
pipelined_model.ipu_config = copy.deepcopy(ipu_config)
return pipelined_model
@classmethod
def from_pretrained_transformers(cls, model_name_or_path: str, ipu_config: IPUConfig):
config = AutoConfig.from_pretrained(model_name_or_path)
pipelined_model = cls.from_pretrained(model_name_or_path, config=config)
pipelined_model.ipu_config = copy.deepcopy(ipu_config)
return pipelined_model
@classmethod
def from_model(cls, model: nn.Module):
clone = copy.deepcopy(model)
clone.__class__ = cls
# Just needed so that .parallelize() does not throw an error
clone.ipu_config = IPUConfig()
return clone
def _has_ipu_config_check(self):
_ipu_config = getattr(self, "_ipu_config", None)
if _ipu_config is None:
raise AttributeError("No IPUConfig was found, please set the ipu_config attribute")
@property
def ipu_config(self):
self._has_ipu_config_check()
return self._ipu_config
@ipu_config.setter
def ipu_config(self, value: IPUConfig):
if not isinstance(value, IPUConfig):
raise TypeError(f"ipu_config must be an instance of IPUConfig, but {type(value)} was provided")
self._ipu_config = value
def parallelize(self):
"""Transform the model to run in an IPU pipeline."""
self._hooks = []
self._has_ipu_config_check()
return self
def deparallelize(self):
"""
Undo the changes to the model done by `parallelize`.
You should call this before doing `save_pretrained` so that the `model.state_dict` is fully compatible with the
original model.
"""
# Remove hooks
if hasattr(self, "_hooks"):
for h in self._hooks:
h.remove()
# Remove poptorch Blocks
for m in self.modules():
if m is not self:
poptorch.removeBlocks(m)
return self
def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
"""
Get number of (optionally, trainable or non-embeddings) parameters in the module.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of non-embeddings parameters
Returns:
:obj:`int`: The number of parameters.
"""
# TODO: actually overwrite this to handle SerializedEmbedding.
if exclude_embeddings:
embedding_param_names = [
f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)
]
non_embedding_parameters = [
parameter for name, parameter in self.named_parameters() if name not in embedding_param_names
]
return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)
else:
return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)
def _prepare_encoder_decoder_kwargs_for_generation(
self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None
) -> Dict[str, Any]:
# 2. prepare encoder args and encoder kwargs from model kwargs
irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not any(argument.startswith(p) for p in irrelevant_prefix)
}
model_input_name = model_input_name if model_input_name is not None else self.main_input_name
encoder_kwargs["return_dict"] = True
encoder_kwargs[model_input_name] = inputs_tensor
# 1. get encoder
compiled_encoder = getattr(self, "_compiled_encoder", None)
if compiled_encoder is None:
encoder = self.get_encoder()
# TODO: how to pass the poptorch options?
compiled_encoder = poptorch.inferenceModel(encoder.eval())
compiled_encoder.compile(**encoder_kwargs)
# 3. make sure that encoder returns `ModelOutput`
model_input_name = model_input_name if model_input_name is not None else self.main_input_name
encoder_kwargs["return_dict"] = True
encoder_kwargs[model_input_name] = inputs_tensor
model_kwargs["encoder_outputs"]: "ModelOutput" = compiled_encoder(**encoder_kwargs)
return model_kwargs
def get_layer_ipu(layers_per_ipu):
# List of the IPU Id for each encoder layer
layer_ipu = []
for ipu, n_layers in enumerate(layers_per_ipu):
layer_ipu += [ipu] * n_layers
return layer_ipu
def recomputation_checkpoint(module: nn.Module):
"""Annotates the output of a module to be checkpointed instead of
recomputed"""
def recompute_outputs(module, inputs, outputs):
return tuple(poptorch.recomputationCheckpoint(y) for y in outputs)
return module.register_forward_hook(recompute_outputs)
def outline_attribute(module: nn.Module, value: str):
"""Adds an attribute to a module. This attribute will be used
when comparing operation equivalence in outlining. For example:
layer1 = nn.Linear(...)
layer2 = nn.Linear(...)
layer3 = nn.Linear(...)
layer4 = nn.Linear(...)
outline_attribute(layer1, "A")
outline_attribute(layer2, "A")
outline_attribute(layer3, "B")
The code for layer1 can be reused for layer2.
But it can't be used for layer3 or layer4.
"""
context = poptorch.Attribute(__outline={"layer": value})
def enable(*args):
context.__enter__()
def disable(*args):
context.__exit__(None, None, None)
handles = []
handles.append(module.register_forward_pre_hook(enable))
handles.append(module.register_forward_hook(disable))
return handles
class SerializedEmbedding(nn.Module):
"""
Wrapper for `nn.Embedding` layer that performs the embedding look-up into
smaller serialized steps in order to reduce memory in the embedding gradient
calculation.
Args:
embedding: A `nn.Embedding` to wrap
serialization_factor: The number of serialized embedding look-ups
"""
def __init__(self, embedding: nn.Embedding, serialization_factor: int):
super().__init__()
self.serialization_factor = serialization_factor
self.num_embeddings = embedding.num_embeddings
# Num embeddings should be divisible by the serialization factor
assert self.num_embeddings % self.serialization_factor == 0
self.split_size = self.num_embeddings // self.serialization_factor
self.split_embeddings = nn.ModuleList(
[
nn.Embedding.from_pretrained(
embedding.weight[i * self.split_size : (i + 1) * self.split_size, :].detach(),
freeze=False,
padding_idx=embedding.padding_idx if i == 0 else None,
)
for i in range(self.serialization_factor)
]
)
def deserialize(self):
"""
Deserialize the internal wrapped embedding layer and return it as a
`nn.Embedding` object.
Returns:
`nn.Embedding` layer
"""
return nn.Embedding.from_pretrained(torch.vstack([l.weight for l in self.split_embeddings]), padding_idx=0)
def forward(self, indices):
# iterate through the splits
x_sum = None
for i in range(self.serialization_factor):
# mask out the indices not in this split
split_indices = indices - i * self.split_size
mask = (split_indices >= 0) * (split_indices < self.split_size)
mask = mask.detach()
split_indices *= mask
# do the embedding lookup
x = self.split_embeddings[i](split_indices)
# multiply the output by mask
x *= mask.unsqueeze(-1)
# add to partial
if x_sum is not None:
x_sum += x
else:
x_sum = x
return x_sum
class SerializedLinear(nn.Linear):
"""
Exactly equivalent to `nn.Linear` layer, but with the matrix multiplication replaced with
a serialized matrix multiplication: `poptorch.serializedMatMul`.
The matrix multiplication is split into separate smaller multiplications, calculated one after the other,
to reduce the memory requirements of the multiplication and its gradient calculation.
Args:
in_features: Size of each input sample
out_features: Size of each output sample
factor: Number of serialized multiplications. Must be a factor of
the dimension to serialize on.
bias: If set to False, the layer will not learn an additive bias.
Default: True
mode: Which dimension of the matmul to serialize on:
for matrix A (m by n) multiplied by matrix B (n by p).
* InputChannels: Split across the input channels (dimension m).
* ReducingDim: Split across the reducing dimension (n).
* OutputChannels: Split across the output channels (dimension p).
* Disabled: Same as an ordinary matrix multiplication.
"""
def __init__(
self, in_features, out_features, factor, bias=False, mode=poptorch.MatMulSerializationMode.OutputChannels
):
super().__init__(in_features, out_features, bias)
self.mode = mode
self.factor = factor
def forward(self, x):
output = poptorch.serializedMatMul(x, self.weight.t(), self.mode, self.factor)
if self.bias is not None:
output += self.bias
return output
class SharedEmbedding(nn.Module):
"""Wrapper around the shared embedding between the encoder and the decoder stacks.
Attributes:
shared: The shared embedding layer.
"""
def __init__(self, shared: nn.Embedding):
super().__init__()
self.shared = shared
def _combine_inputs(self, input_ids: torch.Tensor, decoder_input_ids: torch.Tensor) -> Tuple[int, torch.Tensor]:
idx = input_ids.size(1)
return idx, torch.cat([input_ids, decoder_input_ids], dim=1)
def _separate_inputs(self, idx: int, embeds: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return embeds[:, :idx, :], embeds[:, idx:, :]
def forward(
self,
input_ids: torch.Tensor,
decoder_input_ids: torch.Tensor,
encoder_embed_scale: Optional[float] = None,
decoder_embed_scale: Optional[float] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
# TODO: use this once the TiedGather pattern issue is solved.
# encoder_inputs_embeds, decoder_inputs_embeds = None, None
# if input_ids is not None and encoder_embed_scale is not None:
# encoder_inputs_embeds = self.shared(input_ids) * encoder_embed_scale
# if decoder_input_ids is not None and decoder_embed_scale is not None:
# decoder_inputs_embeds = self.shared(decoder_input_ids) * decoder_embed_scale
# combined, n1, n2 = self._combine_inputs(input_ids, decoder_input_ids)
# encoder_inputs_embeds, decoder_inputs_embeds = self._separate_inputs(self.shared(combined), n1, n2)
idx, combined = self._combine_inputs(input_ids, decoder_input_ids)
encoder_inputs_embeds, decoder_inputs_embeds = self._separate_inputs(idx, self.shared(combined))
if encoder_embed_scale:
encoder_inputs_embeds = encoder_inputs_embeds * encoder_embed_scale
if decoder_embed_scale:
decoder_inputs_embeds = decoder_inputs_embeds * decoder_embed_scale
return encoder_inputs_embeds, decoder_inputs_embeds
class OnehotGather(nn.Module):
"""
Gathers selected indices from a tensor by transforming the list of indices
into a one-hot matrix and then multiplying the tensor by that matrix.
"""
def __init__(self):
super().__init__()
self._is_half = False
def half(self):
super().half()
# Tracing is always executed in float as there are missing
# implementations of operations in half on the CPU.
# So we cannot query the inputs to know if we are running
# with a model that has had .half() called on it.
# To work around it nn.Module::half is overridden
self._is_half = True
def forward(self, sequence, positions):
"""
Gather the vectors at the specific positions over a batch.
"""
num_classes = int(sequence.shape[1])
one_hot_positions = F.one_hot(positions, num_classes)
if self._is_half:
one_hot_positions = one_hot_positions.half()
else:
one_hot_positions = one_hot_positions.float()
return torch.matmul(one_hot_positions.detach(), sequence)
|
py | b404de4d07ec52e416cdf5725ef5ff3704609823 | ## -*- coding: utf-8 -*-
## (C) 2021 Muthiah Annamalai
import subprocess
import re
try:
from tamilsandhi import check_sandhi
SKIP_SANDHI_CHECKER = False
except ModuleNotFoundError:
SKIP_SANDHI_CHECKER = True
NL = re.compile('\n+')
SPC = re.compile('\s+')
class ASpell:
"""
run GNU Aspell or ispell via pipe.
Ref: http://aspell.net/man-html/Through-A-Pipe.html
"""
PIPE_CMD = re.split('\s+', "aspell -l ta --encoding UTF-8 -a --suggest")
def __init__(self, command=PIPE_CMD):
self.command = command
self.text = ""
self.result = {}
def spellcheck(self, text, timeout=60):
self.text = text
pipe = subprocess.Popen(self.command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
output, _ = pipe.communicate(bytes(text, 'utf-8'), timeout)
ASpell.parse_result(self.result, output.decode('utf-8'))
if SKIP_SANDHI_CHECKER:
return self.result
prev_word = ''
for line in re.split(NL, text):
for word in re.split(SPC, line):
if prev_word != '':
if prev_word not in self.result:
# if a word is in error we don't/can't do a sandhi-check
sandhi_result, _ = check_sandhi([prev_word, word])
if sandhi_result[0] != prev_word:
self.result[prev_word] = [sandhi_result[0], prev_word]
if word.endswith('.'):
prev_word = ''
else:
prev_word = word
return self.result
@staticmethod
def parse_result(result, output):
# Syntax of ASpell pipe
# OK: *
# Suggestions: & original count offset: miss, miss, …
# None: # original offset
for line in re.split('\n+', output):
if not line.startswith('&'): continue
word, suggestions = line.split(':')
word = word.replace('&', '').strip().split(' ')[0]
suggestions = [word.strip() for word in suggestions.split(',')]
result[word] = suggestions
return result
|
py | b404de7d816fb3de513076d9f9133a44110c376d | from rrutil import *
send_gdb('c')
expect_gdb('exited normally')
restart_replay()
expect_gdb('exited normally')
ok()
|
py | b404e044c7d8ad18c4310e3469abb6aa1c58fdc7 |
import numpy as np
#######################################
def init_matrix(size, init, std=None):
input_size, output_size = size
if init == 'zero':
weights = np.zeros(shape=(input_size, output_size))
elif init == 'sqrt_fan_in':
sqrt_fan_in = np.sqrt(input_size)
weights = np.random.uniform(low=-1.0/sqrt_fan_in, high=1.0/sqrt_fan_in, size=(input_size, output_size))
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (input_size + output_size))
weights = np.random.uniform(low=-limit, high=limit, size=(input_size, output_size))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (input_size + output_size))
weights = np.random.normal(loc=0.0, scale=scale, size=(input_size, output_size))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(input_size, output_size))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(input_size, output_size))
else:
weights = np.random.normal(loc=0.0, scale=1.0, size=(input_size, output_size))
return weights
#######################################
def init_filters(size, init, std=None):
fh, fw, fin, fout = size
if init == 'zero':
weights = np.zeros(shape=(fh, fw, fin, fout))
elif init == 'sqrt_fan_in':
assert (False)
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (fh*fw*fin + fh*fw*fout))
weights = np.random.uniform(low=-limit, high=limit, size=(fh, fw, fin, fout))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (fh*fw*fin + fh*fw*fout))
weights = np.random.normal(loc=0.0, scale=scale, size=(fh, fw, fin, fout))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(fh, fw, fin, fout))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(fh, fw, fin, fout))
else:
assert (False)
return weights
#######################################
def init_local_filters(size, init, std=None):
h, w, fh, fw, fin, fout = size
if init == 'zero':
weights = np.zeros(shape=(h, w, fh*fw*fin, fout))
elif init == 'sqrt_fan_in':
assert (False)
elif init == 'glorot_uniform':
limit = np.sqrt(6. / (fh*fw*fin + fh*fw*fout))
weights = np.random.uniform(low=-limit, high=limit, size=(h, w, fh*fw*fin, fout))
elif init == 'glorot_normal':
scale = np.sqrt(2. / (fh*fw*fin + fh*fw*fout))
weights = np.random.normal(loc=0.0, scale=scale, size=(h, w, fh*fw*fin, fout))
elif init == 'alexnet':
weights = np.random.normal(loc=0.0, scale=0.01, size=(h, w, fh*fw*fin, fout))
elif init == 'normal':
scale = std
weights = np.random.normal(loc=0.0, scale=scale, size=(h, w, fh*fw*fin, fout))
else:
assert (False)
return weights
#######################################
|
py | b404e13923b1b39cf882662be548277c293555d6 | # -*- coding: utf-8 -*-
# Scrapy settings for armus1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'armus1'
SPIDER_MODULES = ['armus1.spiders']
NEWSPIDER_MODULE = 'armus1.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = ' Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False #默认为True,自设置为False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.5
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'armus1.middlewares.Armus1SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'armus1.middlewares.Armus1DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'armus1.pipelines.ArmusPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
py | b404e1a9db66da1607470e6044ac4764a157d497 | """
Visualizing AoLP image
with the addition of modulation by DoLP
"""
import cv2
import numpy as np
import os
import polanalyser as pa
def main():
# Read and Demosaicing image
filepath = "dataset/dragon.png"
img_raw = cv2.imread(filepath, 0)
img_demosaiced = pa.demosaicing(img_raw)
# Calculate Stokes vector
radians = np.array([0, np.pi/4, np.pi/2, np.pi*3/4])
img_stokes = pa.calcStokes(img_demosaiced, radians)
# Convert Stokes vector to DoLP and AoLP
img_DoLP = pa.cvtStokesToDoLP(img_stokes) # 0~1
img_AoLP = pa.cvtStokesToAoLP(img_stokes) # 0~pi
name, ext = os.path.splitext(filepath)
# Apply the HSV color map on the AoLP image
img_AoLP_color = pa.applyColorToAoLP(img_AoLP)
cv2.imwrite(name+"_AoLP"+".png", img_AoLP_color)
# Set saturation to DoLP
# As a result, the image is lighter and has color only at the polarized area
img_AoLP_light = pa.applyColorToAoLP(img_AoLP, saturation=img_DoLP)
cv2.imwrite(name+"_AoLP_saturation"+".png", img_AoLP_light)
# Set value to DoLP
# As a result, the image is darker and has color only at the polarized area
img_AoLP_dark = pa.applyColorToAoLP(img_AoLP, value=img_DoLP)
cv2.imwrite(name+"_AoLP_value"+".png", img_AoLP_dark)
if __name__=="__main__":
main()
|
py | b404e1bea05597791500c8b91b03ebea37b65713 | X = 99
def setX(new):
global X
X = new
|
py | b404e460f93573f5a99552c88a071e2b7982448a | import torchvision.models as models
import numpy as np
import foolbox
from cnns.nnlib.datasets.imagenet.imagenet_pytorch import imagenet_min
from cnns.nnlib.datasets.imagenet.imagenet_pytorch import imagenet_max
from cnns.nnlib.datasets.imagenet.imagenet_pytorch import imagenet_mean_array
from cnns.nnlib.datasets.imagenet.imagenet_pytorch import imagenet_std_array
# instantiate the model
resnet18 = models.resnet18(
pretrained=True).cuda().eval() # for CPU, remove cuda()
model = foolbox.models.PyTorchModel(resnet18, bounds=(0, 1), num_classes=1000,
preprocessing=(imagenet_mean_array,
imagenet_std_array))
for index in range(20):
print("\n image index: ", index)
image, label = foolbox.utils.samples("imagenet", index=index,
data_format="channels_first")
image = image / 255 # map from [0,255] to [0,1]
# no batch dimension
image = image[0]
label = label[0]
print("true prediction: ", label)
# Original prediction of the model (without any adversarial changes or noise).
original_predictions = model.predictions(image)
print("original prediction: ", np.argmax(original_predictions))
# Attack the image.
attack = foolbox.attacks.FGSM(model)
adversarial_image = attack(image, label)
adversarial_predictions = model.predictions(adversarial_image)
print("adversarial prediction: ", np.argmax(adversarial_predictions))
# Add uniform noise.
noiser = foolbox.attacks.AdditiveUniformNoiseAttack()
noise = noiser._sample_noise(
epsilon=0.009, image=image,
bounds=(imagenet_min, imagenet_max))
randomized_image = adversarial_image + noise
noise_predictions = model.predictions(randomized_image)
print("uniform noise prediction: ", np.argmax(noise_predictions))
images, labels = foolbox.utils.samples(dataset='imagenet', batchsize=20,
shape=(224, 224),
data_format='channels_last')
print(image.shape)
images = np.transpose(images, (0, 3, 1, 2))
print(image.shape)
images = images / 255
predictions = model.batch_predictions(images)
print("predictions: ", predictions.shape)
print(np.argmax(predictions, axis=1), labels)
|
py | b404e5dead68d183f2897e153ba286fcbd2fbdc2 |
# coding: utf-8
import sys
# In[ ]:
from tadnet import TADNet as SunNet
import torch
import numpy as np
import torch.nn.functional as F
from torch.utils.data import Dataset,DataLoader,TensorDataset
import visdom
import copy
# In[ ]:
class Processor:
def __init__(self):
self.dilation_depth=9 #网络有多少层
self.out_len_redce=2**self.dilation_depth-1 #输出减少了多少
self.model = SunNet(self.dilation_depth).cuda()
self.model.load_state_dict(torch.load(r"models/net9_20.pth"))
def eval(self,input_x):
input_x=torch.tensor(input_x)
input_x=input_x.float().unsqueeze(0).to('cuda')
self.model.eval()
#print("input_x",input_x.size())
with torch.no_grad():
output = self.model(input_x)
output=output[-1]
_, predict_label = torch.max(output.data, 0)
output = F.softmax(output,dim=0)
#print(predict_label,output)
return int(predict_label),float(output[predict_label])
# In[ ]:
|
py | b404e6dec42b794c75c6e0da25304224e134eb26 | from datetime import datetime, date
from marqeta.response_models.gatewaylog import Gatewaylog
from marqeta.response_models import datetime_object
import json
import re
class Gatewaylog(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def id(self):
return self.json_response.get('id', None)
@property
def traceNumber(self):
return self.json_response.get('traceNumber', None)
@property
def paymentTypeCode(self):
return self.json_response.get('paymentTypeCode', None)
@property
def achTransactionType(self):
return self.json_response.get('achTransactionType', None)
@property
def memo(self):
return self.json_response.get('memo', None)
@property
def gatewayVersion(self):
return self.json_response.get('gatewayVersion', None)
@property
def gatewayResponse(self):
return self.json_response.get('gatewayResponse', None)
@property
def timedOut(self):
return self.json_response.get('timedOut', None)
@property
def deal_Id(self):
return self.json_response.get('deal_Id', None)
@property
def order_Id(self):
return self.json_response.get('order_Id', None)
@property
def request_method(self):
return self.json_response.get('request_method', None)
@property
def response_code(self):
return self.json_response.get('response_code', None)
@property
def response_subcode(self):
return self.json_response.get('response_subcode', None)
@property
def response_reasoncode(self):
return self.json_response.get('response_reasoncode', None)
@property
def response_message(self):
return self.json_response.get('response_message', None)
@property
def status(self):
return self.json_response.get('status', None)
@property
def fraud_avs(self):
return self.json_response.get('fraud_avs', None)
@property
def fraud_auth(self):
return self.json_response.get('fraud_auth', None)
@property
def fraud_cvv(self):
return self.json_response.get('fraud_cvv', None)
@property
def gateway_transactionId(self):
return self.json_response.get('gateway_transactionId', None)
@property
def original_gateway(self):
if 'original_gateway' in self.json_response:
return Gatewaylog(self.json_response['original_gateway'])
@property
def amount(self):
return self.json_response.get('amount', None)
@property
def duplicate(self):
return self.json_response.get('duplicate', None)
@property
def post_date(self):
if 'post_date' in self.json_response:
return datetime_object('post_date', self.json_response)
@property
def response_time(self):
if 'response_time' in self.json_response:
return datetime_object('response_time', self.json_response)
@property
def api_duration(self):
return self.json_response.get('api_duration', None)
@property
def gateway_duration(self):
return self.json_response.get('gateway_duration', None)
@property
def ach_status(self):
return self.json_response.get('ach_status', None)
@property
def created(self):
if 'created' in self.json_response:
return datetime_object('created', self.json_response)
@property
def modified(self):
if 'modified' in self.json_response:
return datetime_object('modified', self.json_response)
def __repr__(self):
return '<Marqeta.response_models.gatewaylog.Gatewaylog>' + self.__str__()
|
py | b404e71abddd9996fc3d7acd5cf58877e26be5ba | # BSD 3-Clause License
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
import datetime
from redis_benchmarks_specification.__common__.env import (
GH_REDIS_SERVER_HOST,
GH_TOKEN,
GH_REDIS_SERVER_PORT,
GH_REDIS_SERVER_AUTH,
GH_REDIS_SERVER_USER,
)
from redisbench_admin.run.common import get_start_time_vars
START_TIME_NOW_UTC, _, _ = get_start_time_vars()
START_TIME_LAST_YEAR_UTC = START_TIME_NOW_UTC - datetime.timedelta(days=7)
def spec_cli_args(parser):
parser.add_argument("--redis_host", type=str, default=GH_REDIS_SERVER_HOST)
parser.add_argument("--branch", type=str, default="unstable")
parser.add_argument("--gh_token", type=str, default=GH_TOKEN)
parser.add_argument("--redis_port", type=int, default=GH_REDIS_SERVER_PORT)
parser.add_argument("--redis_pass", type=str, default=GH_REDIS_SERVER_AUTH)
parser.add_argument("--redis_user", type=str, default=GH_REDIS_SERVER_USER)
parser.add_argument(
"--from-date",
type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d"),
default=START_TIME_LAST_YEAR_UTC,
)
parser.add_argument(
"--to-date",
type=lambda s: datetime.datetime.strptime(s, "%Y-%m-%d"),
default=START_TIME_NOW_UTC,
)
parser.add_argument("--redis_repo", type=str, default=None)
parser.add_argument("--trigger-unstable-commits", type=bool, default=True)
parser.add_argument(
"--use-tags",
default=False,
action="store_true",
help="Iterate over the git tags.",
)
parser.add_argument(
"--tags-regexp",
type=str,
default=".*",
help="Interpret PATTERN as a regular expression to filter tag names",
)
parser.add_argument(
"--use-branch",
default=False,
action="store_true",
help="Iterate over the git commits.",
)
parser.add_argument(
"--dry-run",
default=False,
action="store_true",
help="Only check how many benchmarks we would trigger. Don't request benchmark runs at the end.",
)
return parser
|
py | b404e7a002e693206a9f3b4f428730ac5ad38157 | # This is a template for the settings.py file used in this project
# pylint: disable=unused-wildcard-import
from .settings_public import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
if DEBUG:
ALLOWED_HOSTS = ['127.0.0.1']
else:
ALLOWED_HOSTS = ['127.0.0.1']
# Logging configuration
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': f'/var/log/django/urlshort/debug.log',
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
# }
# Email config
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.example.com'
EMAIL_HOST_USER = '[email protected]'
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = '******'
EMAIL_PORT = 587
EMAIL_USE_SSL = False
EMAIL_ADMIN_USER = '[email protected]'
# VirusTotal
VIRUSTOTAL_API_KEY = ''
# reCAPTChA
RECAPTCHA_PUBLIC_KEY = ''
RECAPTCHA_PRIVATE_KEY = '' |
py | b404e8f289a55d736ee8aaa0a187b5a8e84eea13 | from band import expose, cleanup, worker, settings as cfg, logger, response
from facebook_business.api import FacebookAdsApi
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.adset import AdSet
my_app_id = cfg.fb_app_id
my_app_secret = cfg.fb_app_secret
my_access_token = cfg.fb_token
auth_url = f"https://graph.facebook.com/oauth/access_token?client_id={cfg.fb_app_id}&client_secret={my_app_secret}&redirect_uri={cfg.redirect}&grant_type=client_credentials"
logger.info(f'auth url: {auth_url}')
FacebookAdsApi.init(my_app_id, my_app_secret, my_access_token)
id = 'act_207528810141959'
account = AdAccount(id)
adsets = account.get_ad_sets(fields=[AdSet.Field.name])
for adset in adsets:
print(adset[AdSet.Field.name])
# my_accounts = list(me.get_ad_accounts())
# print(my_accounts)
# campaigns = my_account
# print(campaigns)
|
py | b404e91736d8bf2560e90aeea32352bbc17c73e1 | # Generated by Django 3.0.6 on 2020-10-14 02:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0035_schedule_patient'),
]
operations = [
migrations.AlterField(
model_name='schedule',
name='duration',
field=models.IntegerField(choices=[(15, '15 min'), (30, '30 min'), (45, '45 min'), (60, '60 min'), (75, '75 min'), (90, '90 min'), (105, '105 min'), (120, '120 min')], default=60),
),
migrations.AlterField(
model_name='schedule',
name='hour',
field=models.IntegerField(choices=[(0, '0 h'), (1, '1 h'), (2, '2 h'), (3, '3 h'), (4, '4 h'), (5, '5 h'), (6, '6 h'), (7, '7 h'), (8, '8 h'), (9, '9 h'), (10, '10 h'), (11, '11 h'), (12, '12 h'), (13, '13 h'), (14, '14 h'), (15, '15 h'), (16, '16 h'), (17, '17 h'), (18, '18 h'), (19, '19 h'), (20, '20 h'), (21, '21 h'), (22, '22 h'), (23, '23 h')], default=8),
),
migrations.AlterField(
model_name='schedule',
name='min',
field=models.IntegerField(choices=[(0, '0 min'), (15, '15 min'), (30, '30 min'), (45, '45 min')], default=0),
),
migrations.AlterField(
model_name='schedule',
name='professional',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='base.Professional'),
),
]
|
py | b404ea09f3940efb5534e0d9df083e4855bc9304 | #!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import RPWN_hash
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = RPWN_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'cee2caff'
if 'genesis' not in settings:
settings['genesis'] = '00000bafbc94add76cb75e2ec92894837288a481e5c005f6563d91623bf8bc2c'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
py | b404ea41aaf92ab6ace7344113ec075318ed1e49 | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
import logging
logger = logging.getLogger(__name__)
class SoftmaxLoss(nn.Module):
"""
This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.
:param model: SentenceTransformer model
:param sentence_embedding_dimension: Dimension of your sentence embeddings
:param num_labels: Number of different labels
:param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
:param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
:param concatenation_sent_multiplication: Add u*v for the softmax classifier?
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
"""
def __init__(self,
model: SentenceTransformer,
sentence_embedding_dimension: int,
num_labels: int,
concatenation_sent_rep: bool = True,
concatenation_sent_difference: bool = True,
concatenation_sent_multiplication: bool = False):
super(SoftmaxLoss, self).__init__()
self.model = model
self.num_labels = num_labels
self.concatenation_sent_rep = concatenation_sent_rep
self.concatenation_sent_difference = concatenation_sent_difference
self.concatenation_sent_multiplication = concatenation_sent_multiplication
num_vectors_concatenated = 0
if concatenation_sent_rep:
num_vectors_concatenated += 2
if concatenation_sent_difference:
num_vectors_concatenated += 1
if concatenation_sent_multiplication:
num_vectors_concatenated += 1
logger.info("Softmax loss: #Vectors concatenated: {}".format(num_vectors_concatenated))
self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
rep_a, rep_b = reps
vectors_concat = []
if self.concatenation_sent_rep:
vectors_concat.append(rep_a)
vectors_concat.append(rep_b)
if self.concatenation_sent_difference:
vectors_concat.append(torch.abs(rep_a - rep_b))
if self.concatenation_sent_multiplication:
vectors_concat.append(rep_a * rep_b)
features = torch.cat(vectors_concat, 1)
output = self.classifier(features)
loss_fct = nn.CrossEntropyLoss()
if labels is not None:
return loss_fct(output, labels.view(-1))
else:
return reps, output
|
py | b404ed5c71c19ecf98ac139d864009726ecec73f | # Crie um programa que leia quanto dinheiro uma pessoa tem na carteira e mostre quantos dólares ela pode comprar
#x = float(input('O valor que você possui: R$'))
#dolar = x / 5.37
#euros = x / 6.44
#libras = x / 7.46
#print('Com R${:.2f} você poderá comprar US${:.2f} de doláres '.format(x, dolar))
#print('Com R${:.2f} você podera comprar {:.2f} Euros'.format(x, euros))
#print('Com R${:.2f} você podera comprar {:.2f} Libras'.format(x, libras))
x = float(input('O valor que você possui: R$'))
dolar = x / 5.37
euros = x / 6.44
libras = x / 7.46
print('Com \033[32m R${:.2F}\033[m você poderá comprar \033[32m US${:.2f}\033[m de doláres'.format(x, dolar))
print('Com \033[32m R${:.2f}\033[m você poderá comprar \033[32m{:.2f}\033[m Euros'.format(x, euros))
print('Com \033[32m R${:.2f}\033[m você poderá comprar \033[32m{:.2f}\033[m Libras'.format(x, libras))
|
py | b404ee730067e978742eee2c515b7539e75a11b1 | from datetime import datetime, timedelta
import unittest
from app import app, db
from app.models import Student, CommunityComment
class UserModelCase(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_password_hashing(self):
u = Student(student_full_name='susan')
u.set_password('cat')
self.assertFalse(u.check_password('dog'))
self.assertTrue(u.check_password('cat'))
def test_avatar(self):
u = Student(
student_full_name='john',
student_email='[email protected]'
)
self.assertEqual(
u.avatar_student(128),
('https://www.gravatar.com/avatar/'
'd4c74594d841139328695756648b6bd6'
'?d=identicon&s=128')
)
def test_follow(self):
u1 = Student(
student_full_name='john',
student_email='[email protected]'
)
u2 = Student(
student_full_name='susan',
student_email='[email protected]'
)
db.session.add(u1)
db.session.add(u2)
db.session.commit()
self.assertEqual(u1.followed.all(), [])
self.assertEqual(u1.followers.all(), [])
u1.follow(u2)
db.session.commit()
self.assertTrue(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 1)
self.assertEqual(u1.followed.first().student_full_name, 'susan')
self.assertEqual(u2.followers.count(), 1)
self.assertEqual(u2.followers.first().student_full_name, 'john')
u1.unfollow(u2)
db.session.commit()
self.assertFalse(u1.is_following(u2))
self.assertEqual(u1.followed.count(), 0)
self.assertEqual(u2.followers.count(), 0)
def test_follow_posts(self):
# create four users
u1 = Student(
student_full_name='john',
student_email='[email protected]'
)
u2 = Student(
student_full_name='susan',
student_email='[email protected]'
)
u3 = Student(
student_full_name='mary',
student_email='[email protected]'
)
u4 = Student(
student_full_name='david',
student_email='[email protected]'
)
db.session.add_all([u1, u2, u3, u4])
# create four posts
now = datetime.utcnow()
p1 = CommunityComment(
body="post from john",
author=u1,
timestamp=now + timedelta(seconds=1)
)
p2 = CommunityComment(
body="post from susan",
author=u2,
timestamp=now + timedelta(seconds=4)
)
p3 = CommunityComment(
body="post from mary",
author=u3,
timestamp=now + timedelta(seconds=3)
)
p4 = CommunityComment(
body="post from david",
author=u4,
timestamp=now + timedelta(seconds=2)
)
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
# setup the followers
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u3) # susan follows mary
u3.follow(u4) # mary follows david
db.session.commit()
# check the followed posts of each user
f1 = u1.followed_comments().all()
f2 = u2.followed_comments().all()
f3 = u3.followed_comments().all()
f4 = u4.followed_comments().all()
self.assertEqual(f1, [p2, p4, p1])
self.assertEqual(f2, [p2, p3])
self.assertEqual(f3, [p3, p4])
self.assertEqual(f4, [p4])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
py | b404ef6b2373abc39c94a5df2c0a1bc6c085b2f6 | from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class Location(models.Model):
latitude = models.DecimalField(_("Latitude"),
max_digits=15, decimal_places=10, null=True, blank=True)
longitude = models.DecimalField(_("Longitude"),
max_digits=15, decimal_places=10, null=True, blank=True)
device_id = models.CharField(_("Device Serial Number"), max_length=80, unique=True)
created_at = models.DateField(_("Created At"), auto_now_add=True)
updated_at = models.DateTimeField(_("Updated At"), auto_now_add=True)
|
py | b404f0322a0c939ec9228ecb18bc49705954df9d | import math
from django.db.models import F, Func, Sum, Count, Case, When, ExpressionWrapper, FloatField
from django.shortcuts import render
from calcs import models
from calcs import forms
# Create your views here.
def show_events(request):
return render(request, 'events.html', {"events": models.Event.objects.all()})
def create_event(request):
return render(request, 'create_event.html', {"form": forms.EventForm()})
def event(request, eventId):
event = models.Event.objects.get(pk=eventId)
meals = list(models.MealsInEvent.objects \
.filter(FkEvent=eventId) \
.annotate(num_components=Count('FkMeal__mealcomponent__id')) \
.values('id', 'FkMeal__Name', 'num_components', 'AttendeeCount') \
)
ingredient_costs = models.PurchaseableItem.objects.filter(mealcomponent__Meal__event__id=eventId)\
.distinct()\
.annotate(quantity_needed = Sum(F('mealcomponent__AmountPerPerson') * F('mealcomponent__Meal__mealsinevent__AttendeeCount'), output_field=FloatField())) \
.annotate(num_packages = Case(When(AlreadyHave__gt= F('quantity_needed') / F('QuantityProvided'), then=0),
default=Func(0.49 - F('AlreadyHave') + (F('quantity_needed') / F('QuantityProvided')), output_field=FloatField(), function='ROUND'))) \
.annotate(total_cost = ExpressionWrapper(F('num_packages') * F('UnitPrice'), output_field=FloatField())) \
.values('quantity_needed', 'num_packages', 'total_cost', 'ItemName', 'AlreadyHave', 'QuantityProvided', 'PurchaseLink', 'QuantityUnits__Name', 'id')
total_cost = 0
for i in ingredient_costs:
if i['total_cost'] > 0:
total_cost += i['total_cost']
return render(request, 'event.html',
{
"event": event,
"meals": meals,
"total_cost": total_cost,
"ingredient_costs": ingredient_costs,
})
def mealcost(request, mealInEventId):
mie = models.MealsInEvent.objects.select_related('FkMeal').get(pk=mealInEventId)
scaledComponents = []
totalCost = 0
for comp in models.MealComponent.objects.filter(Meal=mie.FkMeal).select_related('Ingredient', 'Ingredient__QuantityUnits'):
total = comp.AmountForGroup + (comp.AmountPerPerson * mie.AttendeeCount)
purchaseIncrement = comp.Ingredient.QuantityProvided
numToPurchase = math.ceil(total / purchaseIncrement)
itemCost = numToPurchase * comp.Ingredient.UnitPrice
totalCost += itemCost
scaledComponents.append({
"Name": comp.Ingredient.ItemName,
"AmountPerPerson": comp.AmountPerPerson,
"AmountForGroup": comp.AmountForGroup,
"Units": comp.Units.Name,
"TotalAmount": total,
"PurchaseIncrement": purchaseIncrement,
"NumberToPurchase": numToPurchase,
"ItemCost": itemCost,
})
return render(request, 'cost.html',
{
"mealInEvent": mie,
"scaledComponents": scaledComponents,
"TotalCost": totalCost,
})
def ingredient(request, ingredientId, eventId):
event = models.Event.objects.select_related('MealsInEvent__AttendeeCount').get(pk=eventId)
ingredient = models.PurchaseableItem.objects.get(pk=ingredientId)
in_meals = models.MealComponent.objects.select_related('Ingredient').select_related('Meal').select_related('Ingredient__QuantityUnits').filter(Ingredient=ingredientId)
# print(in_meals)
return render(request, 'ingredient.html', {"in_meals": in_meals, "ingredient": ingredient, "event": event}) |
py | b404f0464ca91ab00edbd0ca9410bd30d246a07d | import os.path
from parameterized import parameterized
import pytest
from unittest import TestCase
from samtranslator.yaml_helper import yaml_parse
from samtranslator.validator.validator import SamTemplateValidator
from tests.validator.test_validator import TestValidatorBase
BASE_PATH = os.path.dirname(__file__)
INPUT_FOLDER = os.path.join(BASE_PATH, "input", "root")
OUTPUT_FOLDER = os.path.join(BASE_PATH, "output", "root")
class TestValidatorRoot(TestValidatorBase):
@parameterized.expand(
[
"error_awstemplateformatversion_unknown",
"error_empty_template",
"error_resources_empty",
"error_resources_missing",
"error_resources_not_object",
"error_resources",
"error_transform_empty",
],
)
def test_errors(self, template):
self._test_validator_error(os.path.join(INPUT_FOLDER, template), os.path.join(OUTPUT_FOLDER, template))
@parameterized.expand(
[
"success_minimal_template",
],
)
def test_success(self, template):
self._test_validator_success(os.path.join(INPUT_FOLDER, template))
|
py | b404f0af974ea43f82c5e81208a0d6be64d6d9cf | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import importlib
from typing import (
Any, Dict, List, Optional,
)
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.traversal import (
Order, T, TextP,
)
from pyhocon import ConfigTree
from databuilder import Scoped
from databuilder.clients.neptune_client import NeptuneSessionClient
from databuilder.extractor.base_extractor import Extractor
from databuilder.models.cluster.cluster_constants import CLUSTER_REVERSE_RELATION_TYPE
from databuilder.models.column_usage_model import ColumnUsageModel
from databuilder.models.dashboard.dashboard_chart import DashboardChart
from databuilder.models.dashboard.dashboard_metadata import DashboardMetadata
from databuilder.models.dashboard.dashboard_query import DashboardQuery
from databuilder.models.owner_constants import OWNER_OF_OBJECT_RELATION_TYPE
from databuilder.models.schema.schema_constant import SCHEMA_REVERSE_RELATION_TYPE
from databuilder.models.table_metadata import DescriptionMetadata, TableMetadata
from databuilder.models.timestamp.timestamp_constants import LASTUPDATED_RELATION_TYPE, TIMESTAMP_PROPERTY
from databuilder.models.usage.usage_constants import READ_RELATION_COUNT_PROPERTY, READ_REVERSE_RELATION_TYPE
from databuilder.models.user import User
from databuilder.serializers.neptune_serializer import METADATA_KEY_PROPERTY_NAME
def _table_search_query(graph: GraphTraversalSource, tag_filter: str) -> List[Dict]:
traversal = graph.V().hasLabel(TableMetadata.TABLE_NODE_LABEL)
if tag_filter:
traversal = traversal.has('published_tag', tag_filter)
traversal = traversal.project(
'database',
'cluster',
'schema',
'schema_description',
'name',
'key',
'description',
'last_updated_timestamp',
'column_names',
'column_descriptions',
'total_usage',
'unique_usage',
'tags',
'badges',
'programmatic_descriptions'
)
traversal = traversal.by(
__.out(
TableMetadata.TABLE_SCHEMA_RELATION_TYPE
).out(SCHEMA_REVERSE_RELATION_TYPE).out(CLUSTER_REVERSE_RELATION_TYPE).values('name')
) # database
traversal = traversal.by(
__.out(TableMetadata.TABLE_SCHEMA_RELATION_TYPE).out(SCHEMA_REVERSE_RELATION_TYPE).values('name')
) # cluster
traversal = traversal.by(__.out(TableMetadata.TABLE_SCHEMA_RELATION_TYPE).values('name')) # schema
traversal = traversal.by(__.coalesce(
__.out(TableMetadata.TABLE_SCHEMA_RELATION_TYPE).out(
DescriptionMetadata.DESCRIPTION_RELATION_TYPE
).values('description'),
__.constant('')
)) # schema_description
traversal = traversal.by('name') # name
traversal = traversal.by(T.id) # key
traversal = traversal.by(__.coalesce(
__.out(DescriptionMetadata.DESCRIPTION_RELATION_TYPE).values('description'),
__.constant('')
)) # description
traversal = traversal.by(
__.coalesce(__.out(LASTUPDATED_RELATION_TYPE).values(TIMESTAMP_PROPERTY), __.constant(''))
) # last_updated_timestamp
traversal = traversal.by(__.out(TableMetadata.TABLE_COL_RELATION_TYPE).values('name').fold()) # column_names
traversal = traversal.by(
__.out(TableMetadata.TABLE_COL_RELATION_TYPE).out(
DescriptionMetadata.DESCRIPTION_RELATION_TYPE
).values('description').fold()
) # column_descriptions
traversal = traversal.by(__.coalesce(
__.outE(ColumnUsageModel.TABLE_USER_RELATION_TYPE).values('read_count'),
__.constant(0)).sum()
) # total_usage
traversal = traversal.by(__.outE(ColumnUsageModel.TABLE_USER_RELATION_TYPE).count()) # unique_usage
traversal = traversal.by(
__.inE(TableMetadata.TAG_TABLE_RELATION_TYPE).outV().values(METADATA_KEY_PROPERTY_NAME).fold()
) # tags
traversal = traversal.by(
__.out('HAS_BADGE').values('keys').dedup().fold()
) # badges
traversal = traversal.by(
__.out(DescriptionMetadata.PROGRAMMATIC_DESCRIPTION_NODE_LABEL).values('description').fold()
) # programmatic_descriptions
traversal = traversal.order().by(__.select('name'), Order.asc)
return traversal.toList()
def _user_search_query(graph: GraphTraversalSource, tag_filter: str) -> List[Dict]:
traversal = graph.V().hasLabel(User.USER_NODE_LABEL)
traversal = traversal.has(User.USER_NODE_FULL_NAME)
if tag_filter:
traversal = traversal.where('published_tag', tag_filter)
traversal = traversal.project(
'email',
'first_name',
'last_name',
'full_name',
'github_username',
'team_name',
'employee_type',
'manager_email',
'slack_id',
'is_active',
'role_name',
'total_read',
'total_own',
'total_follow'
)
traversal = traversal.by('email') # email
traversal = traversal.by('first_name') # first_name
traversal = traversal.by('last_name') # last_name
traversal = traversal.by('full_name') # full_name
traversal = traversal.by('github_username') # github_username
traversal = traversal.by('team_name') # team_name
traversal = traversal.by('employee_type') # employee_type
traversal = traversal.by(__.coalesce(
__.out(User.USER_MANAGER_RELATION_TYPE).values('email'),
__.constant(''))
) # manager_email
traversal = traversal.by('slack_id') # slack_id
traversal = traversal.by('is_active') # is_active
traversal = traversal.by('role_name') # role_name
traversal = traversal.by(__.coalesce(
__.outE(ColumnUsageModel.USER_TABLE_RELATION_TYPE).values('read_count'),
__.constant(0)
).sum()) # total_read
traversal = traversal.by(__.outE(OWNER_OF_OBJECT_RELATION_TYPE).fold().count()) # total_own
traversal = traversal.by(__.outE('FOLLOWED_BY').fold().count()) # total_follow
traversal = traversal.order().by(__.select('email'), Order.asc)
return traversal.toList()
def _dashboard_search_query(graph: GraphTraversalSource, tag_filter: str) -> List[Dict]:
traversal = graph.V().hasLabel(DashboardMetadata.DASHBOARD_NODE_LABEL)
traversal = traversal.has('name')
if tag_filter:
traversal = traversal.where('published_tag', tag_filter)
traversal = traversal.project(
'group_name',
'name',
'cluster',
'description',
'group_description',
'group_url',
'url',
'uri',
'last_successful_run_timestamp',
'query_names',
'chart_names',
'total_usage',
'tags',
'badges'
)
traversal = traversal.by(
__.out(DashboardMetadata.DASHBOARD_DASHBOARD_GROUP_RELATION_TYPE).values('name')
) # group_name
traversal = traversal.by('name') # name
traversal = traversal.by(
__.out(
DashboardMetadata.DASHBOARD_DASHBOARD_GROUP_RELATION_TYPE
).out(
DashboardMetadata.DASHBOARD_GROUP_CLUSTER_RELATION_TYPE
).values('name')
) # cluster
traversal = traversal.by(__.coalesce(
__.out(DashboardMetadata.DASHBOARD_DESCRIPTION_RELATION_TYPE).values('description'),
__.constant('')
)) # description
traversal = traversal.by(__.coalesce(
__.out(DashboardMetadata.DASHBOARD_DASHBOARD_GROUP_RELATION_TYPE).out(
DashboardMetadata.DASHBOARD_DESCRIPTION_RELATION_TYPE
).values('description'),
__.constant('')
)) # group_description
traversal = traversal.by(
__.out(DashboardMetadata.DASHBOARD_DASHBOARD_GROUP_RELATION_TYPE).values('dashboard_group_url')
) # group_url
traversal = traversal.by('dashboard_url') # dashboard_url
traversal = traversal.by('key') # uri
traversal = traversal.by(
__.coalesce(
__.out('EXECUTED').has('key', TextP.endingWith('_last_successful_execution')).values('timestamp'),
__.constant('')
)
) # last_successful_run_timestamp
traversal = traversal.by(
__.out(DashboardQuery.DASHBOARD_QUERY_RELATION_TYPE).values('name').dedup().fold()
) # query_names
traversal = traversal.by(
__.out(
DashboardQuery.DASHBOARD_QUERY_RELATION_TYPE
).out(DashboardChart.CHART_RELATION_TYPE).values('name').dedup().fold()
) # chart_names
traversal = traversal.by(__.coalesce(
__.outE(READ_REVERSE_RELATION_TYPE).values(READ_RELATION_COUNT_PROPERTY),
__.constant(0)
).sum()) # total_usage
traversal = traversal.by(
__.out('TAGGED_BY').has('tag_type', 'default').values('keys').dedup().fold()
) # tags
traversal = traversal.by(
__.out('HAS_BADGE').values('keys').dedup().fold()
) # badges
traversal = traversal.order().by(__.select('name'), Order.asc)
dashboards = traversal.toList()
for dashboard in dashboards:
dashboard['product'] = dashboard['uri'].split('_')[0]
return dashboards
class NeptuneSearchDataExtractor(Extractor):
"""
Extractor to fetch data required to support search from Neptune's graph database
"""
QUERY_FUNCTION_CONFIG_KEY = 'query_function'
QUERY_FUNCTION_KWARGS_CONFIG_KEY = 'query_function_kwargs'
ENTITY_TYPE_CONFIG_KEY = 'entity_type'
JOB_PUBLISH_TAG_CONFIG_KEY = 'job_publish_tag'
MODEL_CLASS_CONFIG_KEY = 'model_class'
DEFAULT_QUERY_BY_ENTITY = {
'table': _table_search_query,
'user': _user_search_query,
'dashboard': _dashboard_search_query
}
def init(self, conf: ConfigTree) -> None:
self.conf = conf
self.entity = conf.get_string(NeptuneSearchDataExtractor.ENTITY_TYPE_CONFIG_KEY, default='table').lower()
if NeptuneSearchDataExtractor.QUERY_FUNCTION_CONFIG_KEY in conf:
self.query_function = conf.get(NeptuneSearchDataExtractor.QUERY_FUNCTION_CONFIG_KEY)
else:
self.query_function = NeptuneSearchDataExtractor.DEFAULT_QUERY_BY_ENTITY[self.entity]
self.job_publish_tag = conf.get_string(NeptuneSearchDataExtractor.JOB_PUBLISH_TAG_CONFIG_KEY, '')
self.neptune_client = NeptuneSessionClient()
neptune_client_conf = Scoped.get_scoped_conf(conf, self.neptune_client.get_scope())
self.neptune_client.init(neptune_client_conf)
model_class = conf.get(NeptuneSearchDataExtractor.MODEL_CLASS_CONFIG_KEY, None)
if model_class:
module_name, class_name = model_class.rsplit(".", 1)
mod = importlib.import_module(module_name)
self.model_class = getattr(mod, class_name)
self._extract_iter: Optional[Any] = None
def close(self) -> None:
self.neptune_client.close()
def extract(self) -> Optional[Any]:
if not self._extract_iter:
self._extract_iter = self._get_extract_iter()
try:
return next(self._extract_iter)
except StopIteration:
return None
def _get_extract_iter(self) -> Any:
if not hasattr(self, 'results'):
self.results = self.query_function(self.neptune_client.get_graph(), tag_filter=self.job_publish_tag)
for result in self.results:
if hasattr(self, 'model_class'):
obj = self.model_class(**result)
yield obj
else:
yield result
def get_scope(self) -> str:
return 'extractor.neptune_search_data'
|
py | b404f11cacc8426426417807b46e00e1b3d4323b | from RLTest import Env
from test_helper_classes import ALLOWED_ERROR, _insert_data, _get_ts_info
def test_simple_dump_restore(self):
with Env().getClusterConnectionIfNeeded() as r:
r.execute_command('ts.create', 'test_key', 'UNCOMPRESSED')
r.execute_command('ts.add', 'test_key', 1, 1)
dump = r.execute_command('dump', 'test_key')
r.execute_command('del', 'test_key')
r.execute_command('restore', 'test_key', 0, dump)
def test_rdb():
start_ts = 1511885909
samples_count = 1500
data = None
key_name = 'tester{abc}'
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name, 'RETENTION', '0', 'CHUNK_SIZE', '360', 'LABELS', 'name',
'brown', 'color', 'pink')
assert r.execute_command('TS.CREATE', '{}_agg_avg_10'.format(key_name))
assert r.execute_command('TS.CREATE', '{}_agg_max_10'.format(key_name))
assert r.execute_command('TS.CREATE', '{}_agg_sum_10'.format(key_name))
assert r.execute_command('TS.CREATE', '{}_agg_stds_10'.format(key_name))
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_avg_10'.format(key_name), 'AGGREGATION', 'AVG', 10)
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_max_10'.format(key_name), 'AGGREGATION', 'MAX', 10)
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_sum_10'.format(key_name), 'AGGREGATION', 'SUM', 10)
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_stds_10'.format(key_name), 'AGGREGATION', 'STD.S', 10)
_insert_data(r, key_name, start_ts, samples_count, 5)
data = r.execute_command('DUMP', key_name)
avg_data = r.execute_command('DUMP', '{}_agg_avg_10'.format(key_name))
r.execute_command('DEL', key_name, '{}_agg_avg_10'.format(key_name))
r.execute_command('RESTORE', key_name, 0, data)
r.execute_command('RESTORE', '{}_agg_avg_10'.format(key_name), 0, avg_data)
expected_result = [[start_ts + i, b'5'] for i in range(samples_count)]
actual_result = r.execute_command('TS.range', key_name, start_ts, start_ts + samples_count)
assert expected_result == actual_result
actual_result = r.execute_command('TS.range', key_name, start_ts, start_ts + samples_count, 'count', 3)
assert expected_result[:3] == actual_result
assert _get_ts_info(r, key_name).rules == [[bytes('{}_agg_avg_10'.format(key_name), encoding="ascii"), 10, b'AVG'],
[bytes('{}_agg_max_10'.format(key_name), encoding="ascii"), 10, b'MAX'],
[bytes('{}_agg_sum_10'.format(key_name), encoding="ascii"), 10, b'SUM'],
[bytes('{}_agg_stds_10'.format(key_name), encoding="ascii"), 10, b'STD.S']]
assert _get_ts_info(r, '{}_agg_avg_10'.format(key_name)).sourceKey == bytes(key_name, encoding="ascii")
def test_rdb_aggregation_context():
"""
Check that the aggregation context of the rules is saved in rdb. Write data with not a full bucket,
then save it and restore, add more data to the bucket and check the rules results considered the previous data
that was in that bucket in their calculation. Check on avg and min, since all the other rules use the same
context as min.
"""
start_ts = 3
samples_count = 4 # 1 full bucket and another one with 1 value
key_name = 'tester{abc}'
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', key_name)
assert r.execute_command('TS.CREATE', '{}_agg_avg_3'.format(key_name))
assert r.execute_command('TS.CREATE', '{}_agg_min_3'.format(key_name))
assert r.execute_command('TS.CREATE', '{}_agg_sum_3'.format(key_name))
assert r.execute_command('TS.CREATE', '{}_agg_std_3'.format(key_name))
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_avg_3'.format(key_name), 'AGGREGATION', 'AVG', 3)
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_min_3'.format(key_name), 'AGGREGATION', 'MIN', 3)
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_sum_3'.format(key_name), 'AGGREGATION', 'SUM', 3)
assert r.execute_command('TS.CREATERULE', key_name, '{}_agg_std_3'.format(key_name), 'AGGREGATION', 'STD.S', 3)
_insert_data(r, key_name, start_ts, samples_count, list(range(samples_count)))
data_tester = r.execute_command('dump', key_name)
data_avg_tester = r.execute_command('dump', '{}_agg_avg_3'.format(key_name))
data_min_tester = r.execute_command('dump', '{}_agg_min_3'.format(key_name))
data_sum_tester = r.execute_command('dump', '{}_agg_sum_3'.format(key_name))
data_std_tester = r.execute_command('dump', '{}_agg_std_3'.format(key_name))
r.execute_command('DEL',
key_name,
'{}_agg_avg_3'.format(key_name),
'{}_agg_min_3'.format(key_name),
'{}_agg_sum_3'.format(key_name),
'{}_agg_std_3'.format(key_name))
r.execute_command('RESTORE', key_name, 0, data_tester)
r.execute_command('RESTORE', '{}_agg_avg_3'.format(key_name), 0, data_avg_tester)
r.execute_command('RESTORE', '{}_agg_min_3'.format(key_name), 0, data_min_tester)
r.execute_command('RESTORE', '{}_agg_sum_3'.format(key_name), 0, data_sum_tester)
r.execute_command('RESTORE', '{}_agg_std_3'.format(key_name), 0, data_std_tester)
assert r.execute_command('TS.ADD', key_name, start_ts + samples_count, samples_count)
assert r.execute_command('TS.ADD', key_name, start_ts + samples_count + 10, 0) # closes the last time_bucket
# if the aggregation context wasn't saved, the results were considering only the new value added
expected_result_avg = [[start_ts, b'1'], [start_ts + 3, b'3.5']]
expected_result_min = [[start_ts, b'0'], [start_ts + 3, b'3']]
expected_result_sum = [[start_ts, b'3'], [start_ts + 3, b'7']]
expected_result_std = [[start_ts, b'1'], [start_ts + 3, b'0.7071']]
actual_result_avg = r.execute_command('TS.range', '{}_agg_avg_3'.format(key_name), start_ts, start_ts + samples_count)
assert actual_result_avg == expected_result_avg
actual_result_min = r.execute_command('TS.range', '{}_agg_min_3'.format(key_name), start_ts, start_ts + samples_count)
assert actual_result_min == expected_result_min
actual_result_sum = r.execute_command('TS.range', '{}_agg_sum_3'.format(key_name), start_ts, start_ts + samples_count)
assert actual_result_sum == expected_result_sum
actual_result_std = r.execute_command('TS.range', '{}_agg_std_3'.format(key_name), start_ts, start_ts + samples_count)
assert actual_result_std[0] == expected_result_std[0]
assert abs(float(actual_result_std[1][1]) - float(expected_result_std[1][1])) < ALLOWED_ERROR
def test_dump_trimmed_series(self):
with Env().getClusterConnectionIfNeeded() as r:
samples = 120
start_ts = 1589461305983
r.execute_command('ts.create', 'test_key', 'RETENTION', 3000, 'CHUNK_SIZE', 160, 'UNCOMPRESSED')
for i in range(1, samples):
r.execute_command('ts.add', 'test_key', start_ts + i * 1000, i)
assert r.execute_command('ts.range', 'test_key', 0, -1) == \
[[1589461421983, b'116'], [1589461422983, b'117'], [1589461423983, b'118'], [1589461424983, b'119']]
before = r.execute_command('ts.range', 'test_key', '-', '+')
dump = r.execute_command('dump', 'test_key')
r.execute_command('del', 'test_key')
r.execute_command('restore', 'test_key', 0, dump)
assert r.execute_command('ts.range', 'test_key', '-', '+') == before
def test_empty_series():
with Env().getClusterConnectionIfNeeded() as r:
assert r.execute_command('TS.CREATE', 'tester')
agg_list = ['avg', 'sum', 'min', 'max', 'range', 'first', 'last',
'std.p', 'std.s', 'var.p', 'var.s']
for agg in agg_list:
assert [] == r.execute_command('TS.range', 'tester', 0, -1, 'aggregation', agg, 1000)
assert r.execute_command('DUMP', 'tester')
|
py | b404f2060b02d485edcadf3935d35f0e9a9d6d57 | import logging
from celery.task import task
from changesets.models import Changeset
logger = logging.getLogger(__name__)
@task
def execute_changeset(changeset_pk):
try:
changeset = Changeset.objects.get(pk=changeset_pk)
except Changeset.DoesNotExist:
logger.error("No changeset with pk=%s", changeset_pk)
else:
try:
changeset.execute()
except Exception:
# Catch any exception, to make sure it gets logged
logger.exception("Error executing changeset %s", changeset)
|
py | b404f214caa1b5d7050f9e4acfcf338b290e3f54 | import torch
import numpy as np
from torch.distributions import Categorical
from models import PolicyConv
class Agent(object):
def __init__(self):
self.train_device = "cuda"
self.policy = PolicyConv(3, 128).to(self.train_device)
self.prev_obs = None
self.policy.eval()
def replace_policy(self):
self.old_policy.load_state_dict(self.policy.state_dict())
def get_action(self, observation):
x = self.preprocess(observation).to(self.train_device)
dist, value = self.policy.forward(x)
action = torch.argmax(dist.probs)
return action
def reset(self):
self.prev_obs = None
def get_name(self):
return "Some other agent"
def load_model(self):
weights = torch.load("model.mdl")
self.policy.load_state_dict(weights, strict=False)
def preprocess(self, observation):
observation = observation[::2, ::2].mean(axis=-1)
observation = np.expand_dims(observation, axis=-1)
if self.prev_obs is None:
self.prev_obs = observation
stack_ob = np.concatenate((self.prev_obs, observation), axis=-1)
stack_ob = torch.from_numpy(stack_ob).float().unsqueeze(0)
stack_ob = stack_ob.transpose(1, 3)
self.prev_obs = observation
return stack_ob
|
py | b404f2bac8cf0d8123e855f88dc7c3fbc0c3f0d3 |
import django_rq
import redis
def new_account_sound():
r = redis.Redis()
r.publish('sound', 'new_account')
def deposit_sound():
r = redis.Redis()
r.publish('sound', 'deposit')
def withdraw_sound():
r = redis.Redis()
r.publish('sound', 'withdraw')
def logout_sound():
r = redis.Redis()
r.publish('sound', 'logout')
def login_sound():
r = redis.Redis()
r.publish('sound','login')
|
py | b404f2db833604151ba196541211f8793b10d1c2 | # Generated by Django 2.2 on 2020-09-25 14:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Zoo', '0002_auto_20200925_1156'),
]
operations = [
migrations.AlterField(
model_name='cuidador',
name='horas',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='limpiador',
name='horas',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='personal',
name='age',
field=models.IntegerField(),
),
]
|
py | b404f370500492a32d66c7d215ce482aaa8604b3 | from PIL import Image
def print_hello():
print("hello")
def print_str(s):
print(s)
def print_img(img):
print(img)
# r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
# img = img[::-1] |
py | b404f42c16c1d3f10abd7ae37efeae92e082e0a2 | # ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch and contributors. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
# pylint: disable-msg=too-many-locals
from typing import Tuple
import numpy as np
from ..util.coordinate import Coordinate
class Wolf(Coordinate):
def step(self, a_parameter, alpha_pos: Tuple[float, float], beta_pos: Tuple[float, float],
delta_pos: Tuple[float, float]) -> None:
"""
Execute a wolf step.
Update the wolf's position and value.
Arguments:
alpha_pos {Tuple[float, float]} -- The alpha position
beta_pos {Tuple[float, float]} -- The beta position
delta_pos {Tuple[float, float]} -- The delta position
"""
r_1 = np.random.random() # r_1 is a random number in [0,1]
r_2 = np.random.random() # r_2 is a random number in [0,1]
a_1 = 2 * a_parameter * r_1 - a_parameter # Equation (3.3)
c_1 = 2 * r_2 # Equation (3.4)
d_alpha = abs(c_1 * alpha_pos - self._position) # Equation (3.5)-part 1
x_1 = alpha_pos - a_1 * d_alpha # Equation (3.6)-part 1
r_1 = np.random.random()
r_2 = np.random.random()
a_2 = 2 * a_parameter * r_1 - a_parameter # Equation (3.3)
c_2 = 2 * r_2 # Equation (3.4)
d_beta = abs(c_2 * beta_pos - self._position) # Equation (3.5)-part 2
x_2 = beta_pos - a_2 * d_beta # Equation (3.6)-part 2
r_1 = np.random.random()
r_2 = np.random.random()
a_3 = 2 * a_parameter * r_1 - a_parameter # Equation (3.3)
c_3 = 2 * r_2 # Equation (3.4)
d_delta = abs(c_3 * delta_pos - self._position) # Equation (3.5)-part 3
x_3 = delta_pos - a_3 * d_delta # Equation (3.5)-part 3
self._position = (x_1 + x_2 + x_3) / 3 # Equation (3.7)
|
py | b404f4cee03f40f1e1926b5d2a9db286e2d876a2 | from proxypool.tester import Tester
from proxypool.db import RedisClient
from proxypool.crawler import Crawler
from proxypool.setting import *
import sys
class Getter():
def __init__(self):
self.redis = RedisClient()
self.crawler = Crawler()
def is_over_threshold(self):
"""
判断是否达到了代理池限制
"""
if self.redis.count() >= POOL_UPPER_THRESHOLD:
return True
else:
return False
def run(self):
print('获取器开始执行')
if not self.is_over_threshold():
for callback_label in range(self.crawler.__CrawlFuncCount__):
callback = self.crawler.__CrawlFunc__[callback_label]
# 获取代理
proxies = self.crawler.get_proxies(callback)
sys.stdout.flush()
try:
for proxy in proxies:
self.redis.add(proxy)
except TypeError:
print('请检查部分免费代理网站的响应页面,是否还有代理信息。')
|
py | b404f4d40f342b7ba3412300c694bfc6ac7f7f18 | # Write a procedure, rotate which takes as its input a string of lower case
# letters, a-z, and spaces, and an integer n, and returns the string constructed
# by shifting each of the letters n steps, and leaving the spaces unchanged.
# Note that 'a' follows 'z'. You can use an additional procedure if you
# choose to as long as rotate returns the correct string.
# Note that n can be positive, negative or zero.
from ex20_shift import shift_n_letters
from UnitaryTest.test_tools import TestTools
def rotate_improved(string, n):
return ''.join([chr(((ord(i)-97+n)%26)+97) if i != ' ' else ' ' for i in string])
def rotate(words, n):
output = []
for i in words:
if i == ' ':
output.append(i)
else:
shifted = shift_n_letters(i, n)
output.append(shifted)
return ''.join(output)
def main():
t = TestTools()
t.new_test('test 1-->rotate')
t.evaluate_result(rotate('sarah', 13), expected='fnenu')
t.new_test('test 2-->rotate')
t.evaluate_result(rotate('fnenu', 13), expected='sarah')
t.new_test('test 3-->rotate')
t.evaluate_result(rotate('dave', 5), expected='ifaj')
t.new_test('test 4-->rotate')
t.evaluate_result(rotate('ifaj',-5), expected='dave')
t.new_test('test 5-->rotate')
t.evaluate_result(rotate("zw pfli tfuv nfibj tfiivtkcp pfl jyflcu sv rscv kf ivru kyzj",-17),
expected='if your code works correctly you should be able to read this')
t.new_test('test 6-->rotate')
t.evaluate_result(rotate('this course teaches you to code', 7),
expected='aopz jvbyzl alhjolz fvb av jvkl')
if __name__ == '__main__':
main()
|
py | b404f5c538ddf7d51a2ad3b956af8ef578b2538e | from django.contrib.auth.models import AnonymousUser, Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from wagtail.documents.models import Document
from wagtail.models import Collection, GroupCollectionPermission
from wagtail.permission_policies.collections import (
CollectionMangementPermissionPolicy,
CollectionOwnershipPermissionPolicy,
CollectionPermissionPolicy,
)
from wagtail.test.utils import WagtailTestUtils
from wagtail.tests.test_permission_policies import PermissionPolicyTestUtils
class PermissionPolicyTestCase(PermissionPolicyTestUtils, TestCase, WagtailTestUtils):
def setUp(self):
# Permissions
document_content_type = ContentType.objects.get_for_model(Document)
add_doc_permission = Permission.objects.get(
content_type=document_content_type, codename="add_document"
)
change_doc_permission = Permission.objects.get(
content_type=document_content_type, codename="change_document"
)
# Collections
self.root_collection = Collection.get_first_root_node()
self.reports_collection = self.root_collection.add_child(name="Reports")
# Groups
doc_changers_group = Group.objects.create(name="Document changers")
GroupCollectionPermission.objects.create(
group=doc_changers_group,
collection=self.root_collection,
permission=change_doc_permission,
)
report_changers_group = Group.objects.create(name="Report changers")
GroupCollectionPermission.objects.create(
group=report_changers_group,
collection=self.reports_collection,
permission=change_doc_permission,
)
report_adders_group = Group.objects.create(name="Report adders")
GroupCollectionPermission.objects.create(
group=report_adders_group,
collection=self.reports_collection,
permission=add_doc_permission,
)
# Users
self.superuser = self.create_superuser(
"superuser", "[email protected]", "password"
)
self.inactive_superuser = self.create_superuser(
"inactivesuperuser", "[email protected]", "password"
)
self.inactive_superuser.is_active = False
self.inactive_superuser.save()
# a user with change_document permission through the 'Document changers' group
self.doc_changer = self.create_user(
"docchanger", "[email protected]", "password"
)
self.doc_changer.groups.add(doc_changers_group)
# a user that has change_document permission, but is inactive
self.inactive_doc_changer = self.create_user(
"inactivedocchanger", "[email protected]", "password"
)
self.inactive_doc_changer.groups.add(doc_changers_group)
self.inactive_doc_changer.is_active = False
self.inactive_doc_changer.save()
# a user with change_document permission on reports via the report_changers group
self.report_changer = self.create_user(
"reportchanger", "[email protected]", "password"
)
self.report_changer.groups.add(report_changers_group)
# a user with add_document permission on reports via the report_adders group
self.report_adder = self.create_user(
"reportadder", "[email protected]", "password"
)
self.report_adder.groups.add(report_adders_group)
# a user with no permissions
self.useless_user = self.create_user(
"uselessuser", "[email protected]", "password"
)
self.anonymous_user = AnonymousUser()
# Documents
# a document in the root owned by 'reportchanger'
self.changer_doc = Document.objects.create(
title="reportchanger's document",
collection=self.root_collection,
uploaded_by_user=self.report_changer,
)
# a document in reports owned by 'reportchanger'
self.changer_report = Document.objects.create(
title="reportchanger's report",
collection=self.reports_collection,
uploaded_by_user=self.report_changer,
)
# a document in reports owned by 'reportadder'
self.adder_report = Document.objects.create(
title="reportadder's report",
collection=self.reports_collection,
uploaded_by_user=self.report_adder,
)
# a document in reports owned by 'uselessuser'
self.useless_report = Document.objects.create(
title="uselessuser's report",
collection=self.reports_collection,
uploaded_by_user=self.useless_user,
)
# a document with no owner
self.anonymous_report = Document.objects.create(
title="anonymous report", collection=self.reports_collection
)
class TestCollectionPermissionPolicy(PermissionPolicyTestCase):
def setUp(self):
super().setUp()
self.policy = CollectionPermissionPolicy(Document)
def test_user_has_permission(self):
self.assertUserPermissionMatrix(
[
(self.superuser, True, True, True, True),
(self.inactive_superuser, False, False, False, False),
(self.doc_changer, False, True, False, False),
(self.inactive_doc_changer, False, False, False, False),
(self.report_changer, False, True, False, False),
(self.report_adder, True, False, False, False),
(self.useless_user, False, False, False, False),
(self.anonymous_user, False, False, False, False),
]
)
def test_user_has_any_permission(self):
self.assertTrue(
self.policy.user_has_any_permission(self.superuser, ["add", "change"])
)
self.assertFalse(
self.policy.user_has_any_permission(
self.inactive_superuser, ["add", "change"]
)
)
self.assertTrue(
self.policy.user_has_any_permission(self.report_changer, ["add", "change"])
)
self.assertTrue(
self.policy.user_has_any_permission(self.report_adder, ["add", "change"])
)
self.assertFalse(
self.policy.user_has_any_permission(self.anonymous_user, ["add", "change"])
)
def test_users_with_any_permission(self):
users_with_add_or_change_permission = self.policy.users_with_any_permission(
["add", "change"]
)
self.assertResultSetEqual(
users_with_add_or_change_permission,
[
self.superuser,
self.doc_changer,
self.report_changer,
self.report_adder,
],
)
def test_users_with_permission(self):
users_with_change_permission = self.policy.users_with_permission("change")
self.assertResultSetEqual(
users_with_change_permission,
[
self.superuser,
self.doc_changer,
self.report_changer,
],
)
users_with_custom_permission = self.policy.users_with_permission("frobnicate")
self.assertResultSetEqual(
users_with_custom_permission,
[
self.superuser,
],
)
def test_user_has_permission_for_instance(self):
# document in the root is only editable by users with permissions
# on the root collection
self.assertUserInstancePermissionMatrix(
self.changer_doc,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.doc_changer, True, False, False),
(self.inactive_doc_changer, False, False, False),
(self.report_changer, False, False, False),
(self.report_adder, False, False, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
# document in 'reports' is editable by users with permissions
# on 'reports' or the root collection
self.assertUserInstancePermissionMatrix(
self.useless_report,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.doc_changer, True, False, False),
(self.inactive_doc_changer, False, False, False),
(self.report_changer, True, False, False),
(self.report_adder, False, False, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
def test_user_has_any_permission_for_instance(self):
self.assertTrue(
self.policy.user_has_any_permission_for_instance(
self.report_changer, ["change", "delete"], self.useless_report
)
)
self.assertFalse(
self.policy.user_has_any_permission_for_instance(
self.report_changer, ["change", "delete"], self.changer_doc
)
)
self.assertFalse(
self.policy.user_has_any_permission_for_instance(
self.anonymous_user, ["change", "delete"], self.changer_doc
)
)
def test_instances_user_has_permission_for(self):
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.superuser,
"change",
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.inactive_superuser,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.doc_changer,
"change",
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.report_changer,
"change",
),
[
self.changer_report,
self.useless_report,
self.adder_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.useless_user,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.anonymous_user,
"change",
),
[],
)
def test_instances_user_has_any_permission_for(self):
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.superuser, ["change", "delete"]
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.inactive_superuser, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.doc_changer, ["change", "delete"]
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.report_changer, ["change", "delete"]
),
[
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.useless_user, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.anonymous_user, ["change", "delete"]
),
[],
)
def test_users_with_permission_for_instance(self):
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance("change", self.changer_doc),
[self.superuser, self.doc_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance("change", self.adder_report),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.changer_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.useless_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.anonymous_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
def test_users_with_any_permission_for_instance(self):
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["change", "delete"], self.changer_doc
),
[self.superuser, self.doc_changer],
)
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["change", "delete"], self.adder_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["change", "delete"], self.useless_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["delete", "frobnicate"], self.useless_report
),
[self.superuser],
)
def test_collections_user_has_permission_for(self):
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.superuser,
"change",
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.inactive_superuser,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.doc_changer,
"change",
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.report_changer,
"change",
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.report_adder,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.report_adder,
"add",
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.useless_user,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.anonymous_user,
"change",
),
[],
)
def test_collections_user_has_any_permission_for(self):
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.superuser, ["change", "delete"]
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.inactive_superuser, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.doc_changer, ["change", "delete"]
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.report_changer, ["change", "delete"]
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.report_adder, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.report_adder, ["add", "delete"]
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.useless_user, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.anonymous_user, ["change", "delete"]
),
[],
)
class TestCollectionOwnershipPermissionPolicy(PermissionPolicyTestCase):
def setUp(self):
super().setUp()
self.policy = CollectionOwnershipPermissionPolicy(
Document,
owner_field_name="uploaded_by_user",
)
def test_user_has_permission(self):
self.assertUserPermissionMatrix(
[
(self.superuser, True, True, True, True),
(self.inactive_superuser, False, False, False, False),
(self.doc_changer, False, True, True, False),
(self.inactive_doc_changer, False, False, False, False),
(self.report_changer, False, True, True, False),
(self.report_adder, True, True, True, False),
(self.useless_user, False, False, False, False),
(self.anonymous_user, False, False, False, False),
]
)
def test_user_has_any_permission(self):
self.assertTrue(
self.policy.user_has_any_permission(self.superuser, ["add", "change"])
)
self.assertFalse(
self.policy.user_has_any_permission(
self.inactive_superuser, ["add", "change"]
)
)
self.assertTrue(
self.policy.user_has_any_permission(self.report_changer, ["add", "delete"])
)
self.assertTrue(
self.policy.user_has_any_permission(self.report_adder, ["add", "change"])
)
self.assertTrue(
self.policy.user_has_any_permission(self.report_adder, ["change", "delete"])
)
self.assertFalse(
self.policy.user_has_any_permission(self.anonymous_user, ["add", "change"])
)
def test_users_with_any_permission(self):
users_with_add_or_change_permission = self.policy.users_with_any_permission(
["add", "change"]
)
self.assertResultSetEqual(
users_with_add_or_change_permission,
[
self.superuser,
self.doc_changer,
self.report_changer,
self.report_adder,
],
)
def test_users_with_permission(self):
users_with_change_permission = self.policy.users_with_permission("change")
self.assertResultSetEqual(
users_with_change_permission,
[
self.superuser,
self.doc_changer,
self.report_changer,
self.report_adder,
],
)
users_with_custom_permission = self.policy.users_with_permission("frobnicate")
self.assertResultSetEqual(
users_with_custom_permission,
[
self.superuser,
],
)
def test_user_has_permission_for_instance(self):
# document in the root is only editable by users with permissions
# on the root collection
self.assertUserInstancePermissionMatrix(
self.changer_doc,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.doc_changer, True, True, False),
(self.inactive_doc_changer, False, False, False),
(self.report_changer, False, False, False),
(self.report_adder, False, False, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
# document in 'reports' is editable by users with permissions
# on 'reports' or the root collection
self.assertUserInstancePermissionMatrix(
self.useless_report,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.doc_changer, True, True, False),
(self.inactive_doc_changer, False, False, False),
(self.report_changer, True, True, False),
(self.report_adder, False, False, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
# adder_report is editable by its owner (who only has 'add' permission)
self.assertUserInstancePermissionMatrix(
self.adder_report,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.doc_changer, True, True, False),
(self.inactive_doc_changer, False, False, False),
(self.report_changer, True, True, False),
(self.report_adder, True, True, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
def test_user_has_any_permission_for_instance(self):
self.assertTrue(
self.policy.user_has_any_permission_for_instance(
self.report_changer, ["change", "delete"], self.useless_report
)
)
self.assertFalse(
self.policy.user_has_any_permission_for_instance(
self.report_changer, ["change", "delete"], self.changer_doc
)
)
self.assertFalse(
self.policy.user_has_any_permission_for_instance(
self.report_adder, ["change", "delete"], self.changer_doc
)
)
self.assertTrue(
self.policy.user_has_any_permission_for_instance(
self.report_adder, ["change", "delete"], self.adder_report
)
)
self.assertFalse(
self.policy.user_has_any_permission_for_instance(
self.anonymous_user, ["change", "delete"], self.changer_doc
)
)
def test_instances_user_has_permission_for(self):
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.superuser,
"change",
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.inactive_superuser,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.doc_changer,
"change",
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.report_changer,
"change",
),
[
self.changer_report,
self.useless_report,
self.adder_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.useless_user,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.anonymous_user,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.report_adder,
"change",
),
[
self.adder_report,
],
)
def test_instances_user_has_any_permission_for(self):
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.superuser, ["change", "delete"]
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.inactive_superuser, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.doc_changer, ["change", "delete"]
),
[
self.changer_doc,
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.report_changer, ["change", "delete"]
),
[
self.changer_report,
self.adder_report,
self.useless_report,
self.anonymous_report,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.useless_user, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.anonymous_user, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.report_adder,
["change", "delete"],
),
[
self.adder_report,
],
)
def test_users_with_permission_for_instance(self):
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance("change", self.changer_doc),
[self.superuser, self.doc_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.changer_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance("change", self.adder_report),
[self.superuser, self.doc_changer, self.report_changer, self.report_adder],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.useless_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.anonymous_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
def test_users_with_any_permission_for_instance(self):
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["change", "delete"], self.changer_doc
),
[self.superuser, self.doc_changer],
)
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["change", "delete"], self.adder_report
),
[self.superuser, self.doc_changer, self.report_changer, self.report_adder],
)
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["change", "delete"], self.useless_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["delete", "frobnicate"], self.useless_report
),
[self.superuser, self.doc_changer, self.report_changer],
)
def test_collections_user_has_permission_for(self):
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.superuser,
"change",
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.inactive_superuser,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.doc_changer,
"change",
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.report_changer,
"change",
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.report_adder,
"change",
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.report_adder,
"add",
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.useless_user,
"change",
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.anonymous_user,
"change",
),
[],
)
def test_collections_user_has_any_permission_for(self):
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.superuser, ["change", "delete"]
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.inactive_superuser, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.doc_changer, ["change", "delete"]
),
[self.root_collection, self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.report_changer, ["change", "delete"]
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.report_adder, ["change", "delete"]
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.report_adder, ["add", "delete"]
),
[self.reports_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.useless_user, ["change", "delete"]
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.anonymous_user, ["change", "delete"]
),
[],
)
class TestCollectionManagementPermission(
PermissionPolicyTestUtils, TestCase, WagtailTestUtils
):
def setUp(self):
self.policy = CollectionMangementPermissionPolicy(Collection)
# Permissions
collection_content_type = ContentType.objects.get_for_model(Collection)
add_collection_permission = Permission.objects.get(
content_type=collection_content_type, codename="add_collection"
)
change_collection_permission = Permission.objects.get(
content_type=collection_content_type, codename="change_collection"
)
delete_collection_permission = Permission.objects.get(
content_type=collection_content_type, codename="delete_collection"
)
# Collections
self.root_collection = Collection.get_first_root_node()
self.reports_collection = self.root_collection.add_child(name="Reports")
self.reports_2020_collection = self.reports_collection.add_child(
name="Reports 2020"
)
# Users with their groups/permissions
self.superuser = self.create_superuser(
"superuser", "[email protected]", "password"
)
self.inactive_superuser = self.create_superuser(
"inactivesuperuser", "[email protected]", "password"
)
self.inactive_superuser.is_active = False
self.inactive_superuser.save()
# a user with change collection permission on reports via the report_changers group
report_changers_group = Group.objects.create(name="Report changers")
GroupCollectionPermission.objects.create(
group=report_changers_group,
collection=self.reports_collection,
permission=change_collection_permission,
)
self.report_changer = self.create_user(
"reportchanger", "[email protected]", "password"
)
self.report_changer.groups.add(report_changers_group)
# a user with add collection permission on reports via the report_adders group
report_adders_group = Group.objects.create(name="Report adders")
GroupCollectionPermission.objects.create(
group=report_adders_group,
collection=self.reports_collection,
permission=add_collection_permission,
)
self.report_adder = self.create_user(
"reportadder", "[email protected]", "password"
)
self.report_adder.groups.add(report_adders_group)
# a user with delete collection permission on reports via the report_deleters group
report_deleters_group = Group.objects.create(name="Report deleters")
GroupCollectionPermission.objects.create(
group=report_deleters_group,
collection=self.reports_collection,
permission=delete_collection_permission,
)
self.report_deleter = self.create_user(
"reportdeleter", "[email protected]", "password"
)
self.report_deleter.groups.add(report_deleters_group)
# a user with no permissions
self.useless_user = self.create_user(
"uselessuser", "[email protected]", "password"
)
self.anonymous_user = AnonymousUser()
def test_user_has_permission(self):
self.assertUserPermissionMatrix(
[
(self.superuser, True, True, True, True),
(self.inactive_superuser, False, False, False, False),
(self.report_changer, False, True, False, False),
(self.report_adder, True, False, False, False),
(self.report_deleter, False, False, True, False),
(self.useless_user, False, False, False, False),
(self.anonymous_user, False, False, False, False),
]
)
def test_user_has_any_permission(self):
users_with_permissions = [
self.superuser,
self.report_changer,
self.report_adder,
self.report_deleter,
]
users_without_permissions = [
self.inactive_superuser,
self.useless_user,
self.anonymous_user,
]
for user in users_with_permissions:
self.assertTrue(
self.policy.user_has_any_permission(user, ["add", "change", "delete"])
)
for user in users_without_permissions:
self.assertFalse(
self.policy.user_has_any_permission(user, ["add", "change", "delete"])
)
def test_users_with_any_permission(self):
users_with_add_or_change_or_delete_permission = (
self.policy.users_with_any_permission(["add", "change", "delete"])
)
self.assertResultSetEqual(
users_with_add_or_change_or_delete_permission,
[
self.superuser,
self.report_changer,
self.report_adder,
self.report_deleter,
],
)
def test_users_with_permission(self):
users_with_change_permission = self.policy.users_with_permission("change")
self.assertResultSetEqual(
users_with_change_permission,
[
self.superuser,
self.report_changer,
],
)
users_with_custom_permission = self.policy.users_with_permission("frobnicate")
self.assertResultSetEqual(
users_with_custom_permission,
[
self.superuser,
],
)
def test_only_superuser_has_permission_for_root_collection(self):
self.assertUserInstancePermissionMatrix(
self.root_collection,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.report_changer, False, False, False),
(self.report_adder, False, False, False),
(self.report_deleter, False, False, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
def test_user_has_permission_for_instance(self):
# Reports collection is editable - as are its children
self.assertUserInstancePermissionMatrix(
self.reports_collection,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.report_changer, True, False, False),
(self.report_deleter, False, True, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
self.assertUserInstancePermissionMatrix(
self.reports_2020_collection,
[
(self.superuser, True, True, True),
(self.inactive_superuser, False, False, False),
(self.report_changer, True, False, False),
(self.report_deleter, False, True, False),
(self.useless_user, False, False, False),
(self.anonymous_user, False, False, False),
],
)
def test_user_has_any_permission_for_instance(self):
users_with_permissions = [
self.superuser,
self.report_changer,
self.report_adder,
self.report_deleter,
]
for user in users_with_permissions:
self.assertTrue(
self.policy.user_has_any_permission_for_instance(
user, ["add", "change", "delete"], self.reports_collection
)
)
self.assertFalse(
self.policy.user_has_any_permission_for_instance(
self.report_adder, ["add", "change", "delete"], self.root_collection
)
)
self.assertTrue(
self.policy.user_has_any_permission_for_instance(
self.superuser, ["add", "change", "delete"], self.root_collection
)
)
def test_instances_user_has_permission_for(self):
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(self.superuser, "change"),
[
self.root_collection,
self.reports_collection,
self.reports_2020_collection,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(self.report_adder, "add"),
[self.reports_collection, self.reports_2020_collection],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(self.report_adder, "change"),
[],
)
self.assertResultSetEqual(
self.policy.instances_user_has_permission_for(
self.inactive_superuser, "change"
),
[],
)
def test_instances_user_has_any_permission_for(self):
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.superuser, ["add", "change"]
),
[
self.root_collection,
self.reports_collection,
self.reports_2020_collection,
],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.report_adder, ["add", "change"]
),
[self.reports_collection, self.reports_2020_collection],
)
self.assertResultSetEqual(
self.policy.instances_user_has_any_permission_for(
self.inactive_superuser, ["add", "change"]
),
[],
)
def test_users_with_permission_for_instance(self):
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.root_collection
),
[self.superuser],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"change", self.reports_collection
),
[self.superuser, self.report_changer],
)
self.assertResultSetEqual(
self.policy.users_with_permission_for_instance(
"add", self.reports_collection
),
[self.superuser, self.report_adder],
)
def test_users_with_any_permission_for_instance(self):
self.assertResultSetEqual(
self.policy.users_with_any_permission_for_instance(
["add", "change", "delete"], self.reports_2020_collection
),
[
self.superuser,
self.report_adder,
self.report_changer,
self.report_deleter,
],
)
def test_collections_user_has_permission_for(self):
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(self.superuser, "change"),
[
self.root_collection,
self.reports_collection,
self.reports_2020_collection,
],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(self.report_adder, "add"),
[self.reports_collection, self.reports_2020_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.report_adder, "change"
),
[],
)
self.assertResultSetEqual(
self.policy.collections_user_has_permission_for(
self.inactive_superuser, "change"
),
[],
)
def test_collections_user_has_any_permission_for(self):
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.superuser, ["add", "change"]
),
[
self.root_collection,
self.reports_collection,
self.reports_2020_collection,
],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.report_adder, ["add", "change"]
),
[self.reports_collection, self.reports_2020_collection],
)
self.assertResultSetEqual(
self.policy.collections_user_has_any_permission_for(
self.inactive_superuser, ["add", "change"]
),
[],
)
|
py | b404f736ab019724d135df9fcdb32553c090510b | from allauth.account import app_settings
from allauth.account.views import LoginView, SignupView
from allauth.utils import get_form_class
class LoginSignupView(LoginView):
def get_context_data(self, **kwargs):
data = super(LoginSignupView, self).get_context_data(**kwargs)
signup_form_class = get_form_class(
app_settings.FORMS, 'signup', SignupView.form_class)
data['signup_form'] = signup_form_class()
return data
login_signup_view = LoginSignupView.as_view()
|
py | b404f817fb64d0dbdbce0d13b10b504b8a4e3ba7 | #!/usr/bin/env python
# coding: utf-8
# # Primitive Roots
# ## About
# In modular arithmetic, a branch of number theory, a number g is a primitive root modulo n if every number a coprime to n is congruent to a power of g modulo n. That is, g is a primitive root modulo n if for every integer a coprime to n, there is an integer k such that g<sup>k</sup> ≡ a (mod n).
# ## Algorithm
#
# **x** is the **primitive root of q** if,
#
# **{x<sup>1</sup> mod q, x<sup>2</sup> mod q, x<sup>3</sup> mod q, ..., x<sup>q-1</sup> mod q} => {1, 2, 3, ..., q-1}**
#
# 1. Calculate value of **x<sup>i</sup> mod q**, for **x <= q-1 and i <= q-1**
# 2. The value of x, for which the corresponding set(s), **{x<sup>1</sup> mod q, x<sup>2</sup> mod q, x<sup>3</sup> mod q, ..., x<sup>q-1</sup> mod q} => {1, 2, 3, ..., q-1}** , is a a **Primitive Root of q**
# 3. Print the Primitive Root table.
# 4. Print the list of Primitive roots.
# In[122]:
import random
from termcolor import colored
def primitiveRoot(q):
setOfPR = []
a = 1
print("\nPrimitive Roots table for {} : \n".format(q))
print(colored("a\u2193 i\u2192|".format(q) , 'blue'), end=" ")
for i in range(1, q):
print(colored('a^{} mod {}'.format(i,q), 'blue'), end=" ")
print("\n")
for i in range(1, q+1):
print(colored('------------', 'blue'), end="")
print("\n")
while a<q:
rootSet= []
reqSet = []
for i in range(1, q):
rootSet.append(int((a**i)%q))
#print(rootSet)
print(colored("{} |".format(a) , 'blue'), end=" ")
for j in range(1, q):
reqSet.append(j)
if set(rootSet) == set(reqSet):
setOfPR.append(a)
for k in rootSet:
print(colored("{}".format(k), 'green'), end=" ")
print("\n")
a += 1
else:
for k in rootSet:
print("{}".format(k), end=" ")
print("\n")
a += 1
print("\nPrimitive Roots of {} are ".format(q), setOfPR, end="\n\n")
q = int(input("Enter a prime number: "))
primitiveRoot(q)
# #### Tanmoy Sen Gupta
# [tanmoysg.com](http://tanmoysg.com) | +91 9864809029 | [email protected]
|
py | b404f9c6e84a57bdfd12da44e004168230596af6 | class Solution:
def findKthPositive(self, arr: List[int], k: int) -> int:
'''
Number of missing element at index i =
= arr[0] - 1 + arr[idx] - arr[0] - idx =
= arr[idx] - idx - 1
For more description, see: 1060 Missing Element in Sorted Array
T: O(log n) and O(1)
'''
def countMissing(idx):
return arr[idx] - idx - 1
if k < arr[0]: return k
lo, hi = 0, len(arr)
while lo < hi:
mid = lo + (hi - lo) // 2
if countMissing(mid) >= k:
hi = mid
else:
lo = mid + 1
return arr[lo - 1] + k - countMissing(lo - 1)
|
py | b404fb0de5aa2cf5e88967c791e3c0bccf34401b | from glob import glob
import pyexcel as p
### Set the following variables for the directory of the files you're working with
convertDirectory =
###
for convertOldXls in glob(convertDirectory + '*.xls'):
xlsMinusPath = convertOldXls[:-3]
xlsFileNewExtension = xlsMinusPath + "xlsx"
p.save_book_as(file_name=convertOldXls,
dest_file_name=(newFileWithExtension))
print("The workbook processed was: %s" % xlsFileNewExtension)
print("\n###\n" +
"### The .xls TO .xlsx conversion was a success.\n" +
"###\n") |
py | b404fb28b4d93c89142547f9f2914abdcdcc8f6e | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Zenacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Zenacoin should be started with the command line arguments:
zenacoind -testnet -daemon \
-zmqpubhashblock=tcp://127.0.0.1:29332 \
-zmqpubrawtx=tcp://127.0.0.1:29332 \
-zmqpubhashtx=tcp://127.0.0.1:29332 \
-zmqpubhashblock=tcp://127.0.0.1:29332
We use the asyncio library here. `self.handle()` installs itself as a
future at the end of the function. Since it never returns with the event
loop having an empty stack of futures, this creates an infinite loop. An
alternative is to wrap the contents of `handle` inside `while True`.
A blocking example using python 2.7 can be obtained from the git history:
https://github.com/zenacoin/zenacoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py
"""
import binascii
import asyncio
import zmq
import zmq.asyncio
import signal
import struct
import sys
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 5):
print("This example only works with Python 3.5 and greater")
exit(1)
port = 29332
class ZMQHandler():
def __init__(self):
self.loop = zmq.asyncio.install()
self.zmqContext = zmq.asyncio.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
async def handle(self) :
msg = await self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
print(binascii.hexlify(body))
# schedule ourselves to receive the next message
asyncio.ensure_future(self.handle())
def start(self):
self.loop.add_signal_handler(signal.SIGINT, self.stop)
self.loop.create_task(self.handle())
self.loop.run_forever()
def stop(self):
self.loop.stop()
self.zmqContext.destroy()
daemon = ZMQHandler()
daemon.start()
|
py | b404fb4a7a36518a4321cd688699a3e626f4c318 | """
OAuth 2.0 Client Credentials Plugin for HTTPie.
"""
import sys
from httpie.plugins import AuthPlugin
from oauthlib.oauth2 import BackendApplicationClient, WebApplicationClient, InsecureTransportError
from requests_oauthlib import OAuth2Session
from requests.auth import HTTPBasicAuth, AuthBase
from httpie.cli.definition import parser
from httpie.context import Environment
__version__ = '0.1.0'
__author__ = 'Brian Demers'
__licence__ = 'BSD'
class OAuth2Plugin(AuthPlugin):
name = 'OAuth 2.0 Client Credentials'
auth_type = 'oauth2'
description = ''
oauth = parser.add_argument_group(title='OAuth 2.0')
oauth.add_argument(
'--issuer-uri',
default=None,
metavar='ISSUER_URI',
help="""
The OAuth 2.0 Issuer URI
""",
)
oauth.add_argument(
'--scope',
default=None,
metavar='SCOPE',
help="""
The OAuth 2.0 Scopes
""",
)
def get_auth(self, username, password):
args = parser.args
auth = HTTPBasicAuth(username, password)
client = BackendApplicationClient(client_id=username)
oauth = OAuth2Session(client=client)
token = oauth.fetch_token(token_url=args.issuer_uri, auth=auth, scope=args.scope)
return BearerAuth(token=token['access_token'])
class BearerAuth(AuthBase):
"""Adds proof of authorization (Bearer token) to the request."""
def __init__(self, token):
"""Construct a new Bearer authorization object.
:param token: bearer token to attach to request
"""
self.token = token
def __call__(self, r):
"""Append an Bearer header to the request.
"""
r.headers['Authorization'] = 'Bearer %s' % self.token
return r
|
py | b404fbc7d70f322f87e7711fcbfde63ea355080d | """Gradients for operators defined in math_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
def _ReductionGradAssist(op):
"""Reduction grads have much in common, so factor the commonality out."""
inp = op.inputs[0] # Example:
input_shape = array_ops.shape(inp) # [2, 3, 5, 7]
input_rank = array_ops.rank(inp) # 4
indices = op.inputs[1] # [1, 2]
indices_shape = array_ops.shape(indices) # [2]
new_output_shape = data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[math_ops.range(0, input_rank), # [0, 1, 2, 3]
indices], # [1, 2]
[input_shape, # [2, 3, 5, 7]
array_ops.fill(indices_shape, 1)]) # [1, 1]
return inp, new_output_shape, input_shape
@ops.RegisterGradient("Sum")
def _SumGrad(op, grad):
"""Gradient for Sum."""
_, new_output_shape, input_shape = _ReductionGradAssist(op)
tile_scaling = input_shape // new_output_shape
grad = array_ops.reshape(grad, new_output_shape)
return [array_ops.tile(grad, tile_scaling), None]
def _MinOrMaxGrad(op, grad):
"""Gradient for Max or Max. Amazingly it's precisely the same code."""
inp, new_output_shape, _ = _ReductionGradAssist(op)
y = op.outputs[0]
y = array_ops.reshape(y, new_output_shape)
grad = array_ops.reshape(grad, new_output_shape)
indicators = math_ops.cast(math_ops.equal(y, inp), grad.dtype)
return [indicators * grad, None]
@ops.RegisterGradient("Max")
def _MaxGrad(op, grad):
"""Gradient for Max."""
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Min")
def _MinGrad(op, grad):
return _MinOrMaxGrad(op, grad)
@ops.RegisterGradient("Mean")
def _MeanGrad(op, grad):
"""Gradient for Mean."""
sum_grad = _SumGrad(op, grad)[0]
input_shape = array_ops.shape(op.inputs[0])
output_shape = array_ops.shape(op.outputs[0])
factor = (math_ops.reduce_prod(input_shape) //
math_ops.reduce_prod(output_shape))
return sum_grad / math_ops.cast(factor, sum_grad.dtype), None
@ops.RegisterGradient("Prod")
def _ProdGrad(op, grad):
"""Gradient for Prod."""
# TODO(kearnes): this gives NaNs for 0s in the input tensor
_, new_output_shape, input_shape = _ReductionGradAssist(op)
tile_scaling = input_shape // new_output_shape
grad = array_ops.reshape(grad * op.outputs[0], new_output_shape)
grad = math_ops.div(array_ops.tile(grad, tile_scaling), op.inputs[0])
return grad, None
@ops.RegisterGradient("SegmentSum")
def _SegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None
@ops.RegisterGradient("SegmentMean")
def _SegmentMeanGrad(op, grad):
"""Gradient for SegmentMean."""
input_rank = array_ops.rank(op.inputs[0])
ones_shape = array_ops.concat(
0, [array_ops.shape(op.inputs[1]),
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)])
ones = array_ops.fill(ones_shape,
constant_op.constant(1, dtype=grad.dtype))
scaled_grad = grad * math_ops.inv(math_ops.segment_sum(ones, op.inputs[1]))
return array_ops.gather(scaled_grad, op.inputs[1]), None
@ops.RegisterGradient("SparseSegmentSum")
def _SparseSegmentSumGrad(op, grad):
"""Gradient for SparseSegmentSum."""
input_rows = array_ops.shape(op.inputs[0])[0]
return (math_ops.unsorted_segment_sum(
array_ops.gather(grad, op.inputs[2]),
op.inputs[1], input_rows), None, None)
@ops.RegisterGradient("SparseSegmentMean")
def _SparseSegmentMeanGrad(op, grad):
"""Gradient for SparseSegmentMean."""
dim0 = array_ops.shape(op.inputs[0])[0]
return (math_ops.sparse_segment_mean_grad(grad,
op.inputs[1],
op.inputs[2],
dim0),
None, None)
@ops.RegisterGradient("SegmentMin")
def _SegmentMinGrad(op, grad):
"""Gradient for SegmentMin."""
zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype)
gathered_grads = array_ops.gather(grad, op.inputs[1])
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
return math_ops.select(math_ops.greater(op.inputs[0], gathered_outputs),
zeros,
gathered_grads), None
@ops.RegisterGradient("SegmentMax")
def _SegmentMaxGrad(op, grad):
"""Gradient for SegmentMax."""
zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
dtype=op.inputs[0].dtype)
gathered_grads = array_ops.gather(grad, op.inputs[1])
gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
return math_ops.select(math_ops.less(op.inputs[0], gathered_outputs),
zeros,
gathered_grads), None
@ops.RegisterGradient("UnsortedSegmentSum")
def _UnsortedSegmentSumGrad(op, grad):
"""Gradient for SegmentSum."""
return array_ops.gather(grad, op.inputs[1]), None, None
@ops.RegisterGradient("Abs")
def _AbsGrad(op, grad):
x = op.inputs[0]
return grad * math_ops.sign(x)
@ops.RegisterGradient("Neg")
def _NegGrad(_, grad):
"""Returns -grad."""
return - grad
@ops.RegisterGradient("Inv")
def _InvGrad(op, grad):
"""Returns -grad * (1 / x^2)."""
y = op.outputs[0] # y = 1 / x
return grad * (- math_ops.square(y))
@ops.RegisterGradient("Square")
def _SquareGrad(op, grad):
x = op.inputs[0]
return grad * (2.0 * x)
@ops.RegisterGradient("Sqrt")
def _SqrtGrad(op, grad):
y = op.outputs[0] # y = x^(1/2)
return grad * (.5 * math_ops.inv(y))
@ops.RegisterGradient("Rsqrt")
def _RsqrtGrad(op, grad):
x = op.inputs[0]
y = op.outputs[0] # y = x^(-1/2)
return grad * ((-0.5) * math_ops.inv(x) * y)
@ops.RegisterGradient("Exp")
def _ExpGrad(op, grad):
"""Returns grad * exp(x)."""
y = op.outputs[0] # y = e^x
return grad * y
@ops.RegisterGradient("Log")
def _LogGrad(op, grad):
"""Returns grad * (1/x)."""
x = op.inputs[0]
return grad * math_ops.inv(x)
@ops.RegisterGradient("Tanh")
def _TanhGrad(op, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
y = op.outputs[0] # y = tanh(x)
return grad * (1 - math_ops.square(y))
@ops.RegisterGradient("Sigmoid")
def _SigmoidGrad(op, grad):
"""Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
y = op.outputs[0] # y = sigmoid(x)
return grad * (y * (1 - y))
@ops.RegisterGradient("Sign")
def _SignGrad(op, _):
"""Returns 0."""
x = op.inputs[0]
return array_ops.zeros(array_ops.shape(x), dtype=x.dtype)
@ops.RegisterGradient("Sin")
def _SinGrad(op, grad):
"""Returns grad * cos(x)."""
x = op.inputs[0]
return grad * math_ops.cos(x)
@ops.RegisterGradient("Cos")
def _CosGrad(op, grad):
"""Returns grad * -sin(x)."""
x = op.inputs[0]
return -grad * math_ops.sin(x)
@ops.RegisterGradient("AddN")
def _AddNGrad(op, grad):
"""Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@ops.RegisterGradient("Add")
def _AddGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Sub")
def _SubGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad, rx), sx),
array_ops.reshape(-math_ops.reduce_sum(grad, ry), sy))
@ops.RegisterGradient("Mul")
def _MulGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
if x.dtype.base_dtype == types.complex64:
return (array_ops.reshape(math_ops.reduce_sum(grad * math_ops.conj(y), rx), sx),
array_ops.reshape(math_ops.reduce_sum(math_ops.conj(x) * grad, ry), sy))
else:
return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
@ops.RegisterGradient("Div")
def _DivGrad(op, grad):
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
return (array_ops.reshape(math_ops.reduce_sum(grad / y, rx), sx),
array_ops.reshape(math_ops.reduce_sum(grad *
(-x / math_ops.square(y)), ry), sy))
@ops.RegisterGradient("Pow")
def _PowGrad(op, grad):
"""Returns grad * (y*x^(y-1), z*log(x))."""
x = op.inputs[0]
y = op.inputs[1]
z = op.outputs[0]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
gx = array_ops.reshape(math_ops.reduce_sum(grad * y * math_ops.pow(x, y - 1), rx),
sx)
gy = array_ops.reshape(math_ops.reduce_sum(grad * z * math_ops.log(x), ry), sy)
return gx, gy
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
x = op.inputs[0]
y = op.inputs[1]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
xgrad = math_ops.select(xmask, grad, zeros)
ygrad = math_ops.select(math_ops.logical_not(xmask), grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
@ops.RegisterGradient("Maximum")
def _MaximumGrad(op, grad):
"""Returns grad*(x > y, x <= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.greater_equal)
@ops.RegisterGradient("Minimum")
def _MinimumGrad(op, grad):
"""Returns grad*(x < y, x >= y) with type of grad."""
return _MaximumMinimumGrad(op, grad, math_ops.less_equal)
# Logical operations have no gradients.
ops.NoGradient("Less")
ops.NoGradient("LessEqual")
ops.NoGradient("Greater")
ops.NoGradient("GreaterEqual")
ops.NoGradient("Equal")
ops.NoGradient("NotEqual")
ops.NoGradient("LogicalAnd")
ops.NoGradient("LogicalOr")
ops.NoGradient("LogicalNot")
@ops.RegisterGradient("Select")
def _SelectGrad(op, grad):
c = op.inputs[0]
x = op.inputs[1]
zeros = array_ops.zeros(array_ops.shape(c), dtype=x.dtype)
return (None, math_ops.select(c, grad, zeros),
math_ops.select(c, zeros, grad))
@ops.RegisterGradient("MatMul")
def _MatMulGrad(op, grad):
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
if not t_a and not t_b:
return (math_ops.matmul(grad, op.inputs[1], transpose_b=True),
math_ops.matmul(op.inputs[0], grad, transpose_a=True))
elif not t_a and t_b:
return (math_ops.matmul(grad, op.inputs[1]),
math_ops.matmul(grad, op.inputs[0], transpose_a=True))
elif t_a and not t_b:
return (math_ops.matmul(op.inputs[1], grad, transpose_b=True),
math_ops.matmul(op.inputs[0], grad))
elif t_a and t_b:
return (math_ops.matmul(op.inputs[1], grad, transpose_a=True,
transpose_b=True),
math_ops.matmul(grad, op.inputs[0], transpose_a=True,
transpose_b=True))
@ops.RegisterGradient("SparseMatMul")
def _SparseMatMulGrad(op, grad):
"""Gradient for SparseMatMul."""
t_a = op.get_attr("transpose_a")
t_b = op.get_attr("transpose_b")
is_sparse = {
op.inputs[0]: op.get_attr("a_is_sparse"),
op.inputs[1]: op.get_attr("b_is_sparse"),
# Use heuristic to figure out if grad might be sparse
grad: (grad.op.type == "ReluGrad")
}
def _SparseMatMul(t1, t2, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if not t1_sparse and not t2_sparse:
return math_ops.matmul(t1, t2,
transpose_a=transpose_a,
transpose_b=transpose_b)
transpose_out = False
if not t1_sparse:
transpose_out = True
t1, t2 = t2, t1
t1_sparse, t2_sparse = t2_sparse, t1_sparse
assert t1_sparse
transpose_a, transpose_b = not transpose_b, not transpose_a
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
m = math_ops.matmul(t1, t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if transpose_out:
m = array_ops.transpose(m)
return m
if not t_a and not t_b:
return (_SparseMatMul(grad, op.inputs[1], transpose_b=True),
_SparseMatMul(op.inputs[0], grad, transpose_a=True))
elif not t_a and t_b:
return (_SparseMatMul(grad, op.inputs[1]),
_SparseMatMul(grad, op.inputs[0], transpose_a=True))
elif t_a and not t_b:
return (_SparseMatMul(op.inputs[1], grad, transpose_b=True),
_SparseMatMul(op.inputs[0], grad))
elif t_a and t_b:
return (_SparseMatMul(op.inputs[1], grad,
transpose_a=True, transpose_b=True),
_SparseMatMul(grad, op.inputs[0],
transpose_a=True, transpose_b=True))
@ops.RegisterGradient("Floor")
def _FloorGrad(_, grad):
return grad
@ops.RegisterGradient("BatchMatMul")
def _BatchMatMul(op, grad):
"""Returns the gradient of x and y given the gradient of x * y."""
x = op.inputs[0]
y = op.inputs[1]
adj_x = op.get_attr("adj_x")
adj_y = op.get_attr("adj_y")
if not adj_x:
if not adj_y:
grad_x = math_ops.batch_matmul(grad, y, False, True)
grad_y = math_ops.batch_matmul(x, grad, True, False)
else:
grad_x = math_ops.batch_matmul(grad, y, False, False)
grad_y = math_ops.batch_matmul(grad, x, True, False)
else:
if not adj_y:
grad_x = math_ops.batch_matmul(y, grad, False, True)
grad_y = math_ops.batch_matmul(x, grad, False, False)
else:
grad_x = math_ops.batch_matmul(y, grad, True, True)
grad_y = math_ops.batch_matmul(grad, x, True, True)
return grad_x, grad_y
ops.NoGradient("Range")
ops.NoGradient("LinSpace")
@ops.RegisterGradient("Complex")
def _ComplexGrad(_, grad):
"""Returns the real and imaginary components of 'grad', respectively."""
return math_ops.real(grad), math_ops.imag(grad)
@ops.RegisterGradient("Real")
def _RealGrad(_, grad):
"""Returns 'grad' as the real part and set the imaginary part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(grad, zero)
@ops.RegisterGradient("Imag")
def _ImagGrad(_, grad):
"""Returns 'grad' as the imaginary part and set the real part 0."""
zero = constant_op.constant(0, dtype=grad.dtype)
return math_ops.complex(zero, grad)
@ops.RegisterGradient("Conj")
def _ConjGrad(_, grad):
"""Returns the complex conjugate of grad."""
return math_ops.conj(grad)
@ops.RegisterGradient("Cast")
def _CastGrad(op, grad):
t = [types.float32, types.float64, types.bfloat16]
src_type = op.inputs[0].dtype.base_dtype
dst_type = grad.dtype.base_dtype
if src_type in t and dst_type in t:
return math_ops.cast(grad, src_type)
else:
return None
|
py | b404fcf6a85196114a2009975e9b2dca38bbd254 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Helio de Jesus and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Quartos')
class TestQuartos(unittest.TestCase):
pass
|
py | b404fd882ffb6ad64f118d0cf013deb9c2dc83f1 | import os
import joblib
import re
import numpy as np
import tensorflow as tf
from rllab.misc.logger import get_snapshot_dir
class FusionDistrManager(object):
def add_paths(self, paths):
raise NotImplementedError()
def sample_paths(self, n):
raise NotImplementedError()
class PathsReader(object):
ITR_REG = re.compile(r"itr_(?P<itr_count>[0-9]+)\.pkl")
def __init__(self, path_dir):
self.path_dir = path_dir
def get_path_files(self):
itr_files = []
for i, filename in enumerate(os.listdir(self.path_dir)):
m = PathsReader.ITR_REG.match(filename)
if m:
itr_count = m.group('itr_count')
itr_files.append((itr_count, filename))
itr_files = sorted(itr_files, key=lambda x: int(x[0]), reverse=True)
for itr_file_and_count in itr_files:
fname = os.path.join(self.path_dir, itr_file_and_count[1])
yield fname
def __len__(self):
return len(list(self.get_path_files()))
class DiskFusionDistr(FusionDistrManager):
def __init__(self, path_dir=None):
if path_dir is None:
path_dir = get_snapshot_dir()
self.path_dir = path_dir
self.paths_reader = PathsReader(path_dir)
def add_paths(self, paths):
raise NotImplementedError()
def sample_paths(self, n):
# load from disk!
fnames = list(self.paths_reader.get_path_files())
N = len(fnames)
sample_files = np.random.randint(0, N, size=(n))
#sample_hist = np.histogram(sample_files, range=(0, N))
#print(sample_hist)
unique, counts = np.unique(sample_files, return_counts=True)
unique_dict = dict(zip(unique, counts))
all_paths = []
for fidx in unique_dict:
fname = fnames[fidx]
n_samp = unique_dict[fidx]
print(fname, n_samp)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Graph().as_default():
with tf.Session(config=config).as_default():
snapshot_dict = joblib.load(fname)
paths = snapshot_dict['paths']
pidxs = np.random.randint(0, len(paths), size=(n_samp))
all_paths.extend([paths[pidx] for pidx in pidxs])
return all_paths
class RamFusionDistr(FusionDistrManager):
def __init__(self, buf_size, subsample_ratio=0.5):
self.buf_size = buf_size
self.buffer = []
self.subsample_ratio = subsample_ratio
def add_paths(self, paths, subsample=True):
if subsample:
paths = paths[:int(len(paths)*self.subsample_ratio)]
self.buffer.extend(paths)
overflow = len(self.buffer)-self.buf_size
while overflow > 0:
#self.buffer = self.buffer[overflow:]
N = len(self.buffer)
probs = np.arange(N)+1
probs = probs/float(np.sum(probs))
pidx = np.random.choice(np.arange(N), p=probs)
self.buffer.pop(pidx)
overflow -= 1
def sample_paths(self, n):
if len(self.buffer) == 0:
return []
else:
pidxs = np.random.randint(0, len(self.buffer), size=(n))
return [self.buffer[pidx] for pidx in pidxs]
if __name__ == "__main__":
#fm = DiskFusionDistr(path_dir='data_nobs/gridworld_random/gru1')
#paths = fm.sample_paths(10)
fm = RamFusionDistr(10)
fm.add_paths([1,2,3,4,5,6,7,8,9,10,11,12,13])
print(fm.buffer)
print(fm.sample_paths(5))
|
py | b404fe23ec592918754200b96ce306a8c41fdecf | import numpy as np
import pathlib
import cv2
import pandas as pd
import copy
class OpenImagesDataset:
def __init__(self, root,
transform=None, target_transform=None,
dataset_type="train", balance_data=False):
self.root = pathlib.Path(root)
self.transform = transform
self.target_transform = target_transform
self.dataset_type = dataset_type.lower()
self.data, self.class_names, self.class_dict = self._read_data()
self.balance_data = balance_data
self.min_image_num = -1
if self.balance_data:
self.data = self._balance_data()
self.ids = [info['image_id'] for info in self.data]
self.class_stat = None
def _getitem(self, index):
image_info = self.data[index]
image = self._read_image(image_info['image_id'])
image = cv2.resize(image, (300,300)) #ハードコーディングやばい
# duplicate boxes to prevent corruption of dataset
boxes = copy.copy(image_info['boxes'])
boxes[:, 0] *= image.shape[1]
boxes[:, 1] *= image.shape[0]
boxes[:, 2] *= image.shape[1]
boxes[:, 3] *= image.shape[0]
# duplicate labels to prevent corruption of dataset
labels = copy.copy(image_info['labels'])
if self.transform:
image, boxes, labels = self.transform(image, boxes, labels)
#print("real!", boxes, labels)
if self.target_transform:
boxes, labels = self.target_transform(boxes, labels)
return image_info['image_id'], image, boxes, labels
def __getitem__(self, index):
_, image, boxes, labels = self._getitem(index)
return image, boxes, labels
def get_annotation(self, index):
"""To conform the eval_ssd implementation that is based on the VOC dataset."""
image_id, image, boxes, labels = self._getitem(index)
is_difficult = np.zeros(boxes.shape[0], dtype=np.uint8)
return image_id, (boxes, labels, is_difficult)
def get_image(self, index):
image_info = self.data[index]
image = self._read_image(image_info['image_id'])
if self.transform:
image, _ = self.transform(image)
return image
#ここで参照してる
def _read_data(self):
annotation_file = f"{self.root}/sub-{self.dataset_type}-annotations-bbox.csv"
annotations = pd.read_csv(annotation_file)
annotations = annotations.dropna(how='any')
print(list(annotations['ClassName'].unique()))
class_names = ['BACKGROUND'] + sorted(list(annotations['ClassName'].unique()))
class_dict = {class_name: i for i, class_name in enumerate(class_names)}
data = []
for image_id, group in annotations.groupby("ImageID"):
boxes = group.loc[:, ["XMin", "YMin", "XMax", "YMax"]].values.astype(np.float32)
# make labels 64 bits to satisfy the cross_entropy function
labels = np.array([class_dict[name] for name in group["ClassName"]], dtype='int64')
data.append({
'image_id': image_id,
'boxes': boxes,
'labels': labels
})
return data, class_names, class_dict
def __len__(self):
return len(self.data)
def __repr__(self):
if self.class_stat is None:
self.class_stat = {name: 0 for name in self.class_names[1:]}
for example in self.data:
for class_index in example['labels']:
class_name = self.class_names[class_index]
self.class_stat[class_name] += 1
content = ["Dataset Summary:"
f"Number of Images: {len(self.data)}",
f"Minimum Number of Images for a Class: {self.min_image_num}",
"Label Distribution:"]
for class_name, num in self.class_stat.items():
content.append(f"\t{class_name}: {num}")
return "\n".join(content)
def _read_image(self, image_id):
#書き換え、アノテーションファイルから相対パスで読み取る
#image_file = self.root / self.dataset_type / f"{image_id}.jpg"
image_file = image_id
image = cv2.imread(str(image_file))
if image.shape[2] == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def _balance_data(self):
label_image_indexes = [set() for _ in range(len(self.class_names))]
for i, image in enumerate(self.data):
for label_id in image['labels']:
label_image_indexes[label_id].add(i)
label_stat = [len(s) for s in label_image_indexes]
self.min_image_num = min(label_stat[1:])
sample_image_indexes = set()
for image_indexes in label_image_indexes[1:]:
image_indexes = np.array(list(image_indexes))
sub = np.random.permutation(image_indexes)[:self.min_image_num]
sample_image_indexes.update(sub)
sample_data = [self.data[i] for i in sample_image_indexes]
return sample_data
|
py | b404febc96a6eb6098a62eeb350173f35ba239db | from .procedure_dict import SIM_PROCEDURES, SimProcedures
from .definitions import SIM_LIBRARIES
|
py | b404ffeb629bdd099d99244b33e81bf466cfae0b | from io import StringIO
import unittest
from sphinx_action import status_check
class TestStatusChecks(unittest.TestCase):
def test_output_for_warning(self):
output_file = StringIO()
annotation = status_check.CheckAnnotation(
path="index.rst",
start_line=1,
end_line=20,
annotation_level=status_check.AnnotationLevel.WARNING,
message="This is a test warning message",
)
status_check.output_annotation(annotation, where_to_print=output_file)
output_str = output_file.getvalue()
self.assertEqual(
output_str,
"::warning file=index.rst,line=1::This is a test warning message\n",
)
def test_output_for_error(self):
output_file = StringIO()
annotation = status_check.CheckAnnotation(
path="index.rst",
start_line=15,
end_line=20,
annotation_level=status_check.AnnotationLevel.FAILURE,
message="This is a test error message",
)
status_check.output_annotation(annotation, where_to_print=output_file)
output_str = output_file.getvalue()
self.assertEqual(
output_str,
"::error file=index.rst,line=15::This is a test error message\n",
)
if __name__ == "__main__":
unittest.main()
|
py | b405017ad7d2c411e6f05a4b7bba2c6adb6ee21b | # Copyright 2020 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from gan_pytorch.models import mnist
model = mnist(pretrained=True)
print(model)
|
py | b4050231f4725457778f61e93ac16f0ab4a2cffd | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.7.4, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_exchange_aad_access_token_for_acr_refresh_token_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/oauth2/exchange')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_exchange_acr_refresh_token_for_acr_access_token_request(
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/oauth2/token')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class AuthenticationOperations(object):
"""AuthenticationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~container_registry.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def exchange_aad_access_token_for_acr_refresh_token(
self,
grant_type, # type: Union[str, "_models.PostContentSchemaGrantType"]
service, # type: str
tenant=None, # type: Optional[str]
refresh_token=None, # type: Optional[str]
access_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.AcrRefreshToken"
"""Exchange AAD tokens for an ACR refresh Token.
:param grant_type: Can take a value of access_token_refresh_token, or access_token, or
refresh_token.
:type grant_type: str or ~container_registry.models.PostContentSchemaGrantType
:param service: Indicates the name of your Azure container registry.
:type service: str
:param tenant: AAD tenant associated to the AAD credentials.
:type tenant: str
:param refresh_token: AAD refresh token, mandatory when grant_type is
access_token_refresh_token or refresh_token.
:type refresh_token: str
:param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token
or access_token.
:type access_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AcrRefreshToken, or the result of cls(response)
:rtype: ~container_registry.models.AcrRefreshToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AcrRefreshToken"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
content_type = kwargs.pop('content_type', "application/x-www-form-urlencoded") # type: Optional[str]
# Construct form data
_data = {
"grant_type": grant_type,
"service": service,
"tenant": tenant,
"refresh_token": refresh_token,
"access_token": access_token,
}
request = build_exchange_aad_access_token_for_acr_refresh_token_request(
api_version=api_version,
content_type=content_type,
data=_data,
template_url=self.exchange_aad_access_token_for_acr_refresh_token.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"url": self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AcrRefreshToken', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
exchange_aad_access_token_for_acr_refresh_token.metadata = {'url': '/oauth2/exchange'} # type: ignore
@distributed_trace
def exchange_acr_refresh_token_for_acr_access_token(
self,
service, # type: str
scope, # type: str
refresh_token, # type: str
grant_type="refresh_token", # type: Union[str, "_models.TokenGrantType"]
**kwargs # type: Any
):
# type: (...) -> "_models.AcrAccessToken"
"""Exchange ACR Refresh token for an ACR Access Token.
:param service: Indicates the name of your Azure container registry.
:type service: str
:param scope: Which is expected to be a valid scope, and can be specified more than once for
multiple scope requests. You obtained this from the Www-Authenticate response header from the
challenge.
:type scope: str
:param refresh_token: Must be a valid ACR refresh token.
:type refresh_token: str
:param grant_type: Grant type is expected to be refresh_token.
:type grant_type: str or ~container_registry.models.TokenGrantType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AcrAccessToken, or the result of cls(response)
:rtype: ~container_registry.models.AcrAccessToken
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AcrAccessToken"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
content_type = kwargs.pop('content_type', "application/x-www-form-urlencoded") # type: Optional[str]
# Construct form data
_data = {
"service": service,
"scope": scope,
"refresh_token": refresh_token,
"grant_type": grant_type,
}
request = build_exchange_acr_refresh_token_for_acr_access_token_request(
api_version=api_version,
content_type=content_type,
data=_data,
template_url=self.exchange_acr_refresh_token_for_acr_access_token.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"url": self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('AcrAccessToken', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
exchange_acr_refresh_token_for_acr_access_token.metadata = {'url': '/oauth2/token'} # type: ignore
|
py | b405023deed77c7e065166cf31d4f7e2c4066320 | import argparse
from contextlib import closing
import logging
import os
import sqlite3
from indra_db_lite.construction import get_sqlite_tables
from indra_db_lite.construction import import_csv_into_sqlite
from indra_db_lite.construction import query_to_csv
logger = logging.getLogger(__name__)
def agent_text_stmts_to_csv(outpath: str) -> None:
query = """
SELECT
id, db_id, stmt_id
FROM
raw_agents
WHERE
db_name = 'TEXT' AND
stmt_id IS NOT NULL
"""
query_to_csv(query, outpath)
def stmts_readings_to_csv(outpath: str) -> None:
query = """
SELECT
id, reading_id
FROM
raw_statements
WHERE
reading_id IS NOT NULL
"""
query_to_csv(query, outpath)
def readings_content_to_csv(outpath: str) -> None:
query = """
SELECT
id, text_content_id
FROM
reading
"""
query_to_csv(query, outpath)
def content_text_refs_to_csv(outpath: str) -> None:
query = """
SELECT
id, text_ref_id
FROM
text_content
"""
query_to_csv(query, outpath)
def create_temp_agent_text_tables(
agent_stmts_path: str,
stmt_readings_path: str,
reading_content_path: str,
content_text_refs_path: str,
sqlite_db_path: str,
) -> None:
query1 = \
"""--
CREATE TABLE IF NOT EXISTS agent_stmts (
id INTEGER PRIMARY KEY,
agent_text TEXT,
stmt_id INTEGER
);
"""
query2 = \
"""--
CREATE TABLE IF NOT EXISTS stmt_readings (
stmt_id INTEGER PRIMARY KEY,
reading_id INTEGER
);
"""
query3 = \
"""--
CREATE TABLE IF NOT EXISTS reading_content (
reading_id INTEGER PRIMARY KEY,
text_content_id INTEGER
);
"""
query4 = \
"""--
CREATE TABLE IF NOT EXISTS content_text_refs (
text_content_id INTEGER PRIMARY KEY,
text_ref_id INTEGER
);
"""
with closing(sqlite3.connect(sqlite_db_path)) as conn:
with closing(conn.cursor()) as cur:
for query in query1, query2, query3, query4:
cur.execute(query)
conn.commit()
import_csv_into_sqlite(agent_stmts_path, 'agent_stmts', sqlite_db_path)
import_csv_into_sqlite(stmt_readings_path, 'stmt_readings', sqlite_db_path)
import_csv_into_sqlite(
reading_content_path, 'reading_content', sqlite_db_path
)
import_csv_into_sqlite(
content_text_refs_path, 'content_text_refs', sqlite_db_path
)
def add_indices_to_temp_agent_text_tables(sqlite_db_path: str) -> None:
query1 = """--
CREATE INDEX IF NOT EXISTS
agent_stmts_stmt_id_idx
ON
agent_stmts(stmt_id)
"""
query2 = """--
CREATE INDEX IF NOT EXISTS
stmt_readings_reading_id_idx
ON
stmt_readings(reading_id)
"""
query3 = """--
CREATE INDEX IF NOT EXISTS
reading_content_text_content_id_idx
ON
stmt_readings(reading_id)
"""
with closing(sqlite3.connect(sqlite_db_path)) as conn:
with closing(conn.cursor()) as cur:
for query in query1, query2, query3:
cur.execute(query)
conn.commit()
def ensure_agent_texts_table(sqlite_db_path: str) -> None:
query = """--
CREATE TABLE IF NOT EXISTS agent_texts (
id INTEGER PRIMARY KEY,
agent_text TEXT,
text_ref_id INTEGER
);
"""
with closing(sqlite3.connect(sqlite_db_path)) as conn:
with closing(conn.cursor()) as cur:
cur.execute(query)
conn.commit()
def create_agent_texts_table(sqlite_db_path: str) -> None:
all_tables = get_sqlite_tables(sqlite_db_path)
needed_tables = {
'agent_stmts', 'stmt_readings', 'reading_content', 'content_text_refs'
}
try:
assert needed_tables <= set(all_tables)
except AssertionError:
logger.exception('Necessary temporary tables do not exist.')
ensure_agent_texts_table(sqlite_db_path)
query = """--
INSERT OR IGNORE INTO
agent_texts
SELECT
NULL, agent_text, text_ref_id
FROM (
SELECT DISTINCT
ags.agent_text as agent_text, ct.text_ref_id as text_ref_id
FROM
agent_stmts ags
JOIN
stmt_readings sr
ON
ags.stmt_id = sr.stmt_id
JOIN
reading_content rc
ON
sr.reading_id = rc.reading_id
JOIN
content_text_refs ct
ON
rc.text_content_id = ct.text_content_id
)
"""
with closing(sqlite3.connect(sqlite_db_path)) as conn:
with closing(conn.cursor()) as cur:
cur.execute(query)
conn.commit()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('outpath')
args = parser.parse_args()
outpath = args.outpath
logging.basicConfig(
filename=os.path.join(outpath, 'agent_texts.log'),
filemode='a',
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
level=logging.DEBUG,
force=True,
)
logger = logging.getLogger(__name__)
logger.info('Constructing agent texts table')
csv_files = [
os.path.join(outpath, csv_file) for csv_file in
[
'agent_stmts.csv',
'stmt_readings.csv',
'reading_content.csv',
'content_text_refs.csv',
]
]
agent_texts_db_path = os.path.join(outpath, 'agent_texts.db')
for filepath, function in zip(
csv_files,
(
agent_text_stmts_to_csv,
stmts_readings_to_csv,
readings_content_to_csv,
content_text_refs_to_csv,
)
):
if os.path.exists(filepath):
continue
logger.info(f'Dumping to csv {function.__name__}')
function(filepath)
if not os.path.exists(agent_texts_db_path):
logger.info('Loading csv files into temporary tables in sqlite.')
create_temp_agent_text_tables(*csv_files, agent_texts_db_path)
logger.info('Adding indices to temporary tables.')
add_indices_to_temp_agent_text_tables(agent_texts_db_path)
for filepath in csv_files:
os.remove(filepath)
if 'agent_texts' not in get_sqlite_tables(agent_texts_db_path):
logger.info('Constructing agent texts table with one big join.')
create_agent_texts_table(agent_texts_db_path)
|
py | b405026ccc4622cdc68ab378986a4bd263a34db8 | # (C) Copyright 2021 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
from climetlab.utils.bbox import BoundingBox, to_bounding_box
CONVERT = {
list: lambda x: x.as_list(),
tuple: lambda x: x.as_tuple(),
dict: lambda x: x.as_dict(),
BoundingBox: lambda x: x,
}
class BoundingBoxNormaliser:
def __init__(self, format=BoundingBox):
self.format = format
def normalise(self, bbox):
bbox = to_bounding_box(bbox)
return CONVERT[self.format](bbox)
|
py | b40504753eb92a5893f93f9ea6b671bcbe81e4de | # -*- coding: utf-8 -*-
__version__ = '0.1.0'
from .base import Statement, Name, Str, Num
from .modules import Document
from .callables import FunctionDef, DecoratorDef, ClassDef, Attribute, Call
from .simple_statements import Return, Docstring, Assignment
from .syntaxes import base_syntax, yaml_syntax
from .parser import DefaultProcessor, YAMLProcessor |
py | b40504da067daaede4f23e68404f6d0958a7cb6a | from .datastores import AbstractSynchronisedDatastore
from .questions import AbstractQuestion, Question
from .services import AbstractRegisteredService, RegisteredService
__all__ = (
"AbstractRegisteredService",
"AbstractSynchronisedDatastore",
"AbstractQuestion",
"RegisteredService",
"Question",
)
|
py | b40505483f00f23e0b0e5ebe28c3faf6abd564a9 | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), 'rb') as f:
long_description = f.read().decode('utf-8')
setup(
name='deployer-lite-core',
version='0.0.1',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
url='https://github.com/harnash/deployer-lite-core',
license='Apache 2.0',
author='Łukasz Harasimowicz',
author_email='[email protected]',
description='',
long_description=long_description,
test_suite='nose.collector',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Build Tools',
'Topic :: System :: Clustering',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'License :: OSI Approved :: :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
tests_require=[
'nose==1.3.7',
'coverage==4.0b1',
],
install_requires=[
'pyzmq==14.7.0',
'layered-yaml-attrdict-config==15.5.2',
'deployer-lite-core',
]
)
|
py | b40507b05e0b887443fd6d70a1bf0020514bacc1 | from __future__ import absolute_import, division, print_function, unicode_literals
from amaasutils.random_utils import random_string, random_decimal
import random
from amaascore.core.reference import Reference
from amaascore.parties.asset_manager import AssetManager
from amaascore.parties.broker import Broker
from amaascore.parties.children import Address, Email
from amaascore.parties.individual import Individual
from amaascore.parties.party import Party
def generate_common(asset_manager_id, party_id, party_status):
common = {'asset_manager_id': asset_manager_id or random.randint(1, 1000),
'party_id': party_id or str(random.randint(1, 1000)),
'party_status': party_status or 'Active',
'display_name': random_string(10),
'legal_name': random_string(10),
'url': random_string(10)
}
return common
def generate_party(asset_manager_id=None, party_id=None, party_status=None):
references = {'PartyDB': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
party = Party(**attributes)
# This is ok from a mutability perspective as the references collection doesn't trigger anything
party.references.update(references)
party.upsert_address('Registered', generate_address(address_primary=True))
party.upsert_email('Office', generate_email(email_primary=True))
return party
def generate_asset_manager(asset_manager_id=None, party_id=None, party_status=None):
references = {'LEI': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
asset_manager = AssetManager(**attributes)
asset_manager.references.update(references)
asset_manager.upsert_address('Registered', generate_address(address_primary=True))
asset_manager.upsert_email('Office', generate_email(email_primary=True))
return asset_manager
def generate_broker(asset_manager_id=None, party_id=None, party_status=None):
references = {'LEI': Reference(random_string(10))}
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
broker = Broker(**attributes)
broker.references.update(references)
broker.upsert_address('Registered', generate_address(address_primary=True))
broker.upsert_email('Office', generate_email(email_primary=True))
return broker
def generate_individual(asset_manager_id=None, party_id=None, party_status=None):
attributes = generate_common(asset_manager_id=asset_manager_id, party_id=party_id, party_status=party_status)
individual = Individual(given_names=random_string(10), surname=random_string(10), **attributes)
return individual
def generate_address(country_id=None, address_primary=False):
address = Address(line_one=random_string(20),
line_two=random.choice([None, random_string(10)]),
city=random_string(10),
region=random_string(10),
postal_code=random_string(6),
country_id=country_id or random_string(3), # Make this a real country code
address_primary=address_primary)
return address
def generate_email(email=None, email_primary=False):
return Email(email=email or (random_string(10) + '@amaas.com'), email_primary=email_primary)
def generate_parties(asset_manager_ids=[], number=5):
parties = []
for i in range(number):
party = generate_party(asset_manager_id=random.choice(asset_manager_ids))
parties.append(party)
return parties
|
py | b40508141d4a292ef2fbeee27fb82a38e69965ae | import unittest
import time
from pwm_test_support.pwm_receiver_node import PwmReceiverNode
class BasePwmTestFixture(unittest.TestCase):
pwm_receiver = None
def __init__(self, *args):
super(BasePwmTestFixture, self).__init__(*args)
self.pwm_receiver = PwmReceiverNode()
def setUp(self):
# Need to wait so that node connections can be established
time.sleep(1.0)
self.pwm_receiver.reset_data_received()
def wait_and_assert_pwm_value(self, pwm_value):
self.pwm_receiver.wait_for_data_received()
self.assertEqual(self.pwm_receiver.pwm_out_received, pwm_value)
def assert_positive_motor_direction(self):
self.assertEqual(self.pwm_receiver.pwm_direction_1_received, 255)
self.assertEqual(self.pwm_receiver.pwm_direction_2_received, 0)
def assert_negative_motor_direction(self):
self.assertEqual(self.pwm_receiver.pwm_direction_1_received, 0)
self.assertEqual(self.pwm_receiver.pwm_direction_2_received, 255) |
py | b40508bf06fdab03e49320f2e73d41a7cfed417f | #ARC050d
def main():
import sys
input=sys.stdin.readline
sys.setrecursionlimit(10**6)
if __name__ == '__main__':
main() |
py | b40508ee0776176d7458699437d552ef9893e28f | # These represents parts used within cmake commands such as Version for cmake_minimum_required
from .IncludeDir import IncludeDir
from .ScopeTypes import ScopeTypes
from .Version import Version
|
py | b405098447eacb99ece8c6f97a0618d2b263e62a | import asyncio
async def print_nums():
num = 1
while True:
print(num)
num += 1
await asyncio.sleep(0.1)
async def print_time():
count = 0
while True:
if count % 3 == 0:
print('{} seconds passed'.format(count))
count += 1
await asyncio.sleep(1)
async def main():
task1 = asyncio.create_task(print_nums())
task2 = asyncio.create_task(print_time())
await asyncio.gather(task1, task2)
if __name__ == '__main__':
asyncio.run(main())
|
py | b40509e4441bc82d9e354e18222f5b22d28168e1 | # Copyright 2020 The MathWorks, Inc.
import os
import shlex
import tempfile
def setup_desktop():
# Get path to noVNC installation through environment variables
NOVNC_PATH = os.getenv('NOVNC_PATH', '/opt/noVNC')
# make a secure temporary directory for sockets
# This is only readable, writeable & searchable by our uid
sockets_dir = tempfile.mkdtemp()
sockets_path = os.path.join(sockets_dir, 'vnc-socket')
vnc_command = ' '.join((shlex.quote(p) for p in [
'vncserver',
'-verbose',
'-xstartup', os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', 'xstartup'),
'-geometry', '1680x1050',
'-SecurityTypes', 'None',
'-rfbunixpath', sockets_path,
'-fg',
':1',
]))
return {
'command': [
'websockify', '-v',
'--web', NOVNC_PATH,
'--heartbeat', '30',
'5901',
'--unix-target', sockets_path,
'--',
'/bin/sh', '-c',
f'cd {os.getcwd()} && {vnc_command}'
],
'port': 5901,
'timeout': 30,
'mappath': {'/': '/mw_lite.html'},
'new_browser_window': True,
'launcher_entry': {
'title': 'MATLAB VNC DESKTOP',
'icon_path': os.path.join(os.path.dirname(os.path.abspath(__file__)), 'resources', 'matlab_icon.svg')
}
}
|
py | b4050a3de7ecd6916ca9a24649df7898625dff59 | """Test file for numpy tracing"""
import inspect
import networkx as nx
import numpy
import pytest
from concrete.common.data_types.dtypes_helpers import broadcast_shapes
from concrete.common.data_types.floats import Float
from concrete.common.data_types.integers import Integer
from concrete.common.debugging import format_operation_graph
from concrete.common.representation import intermediate as ir
from concrete.common.values import ClearScalar, ClearTensor, EncryptedScalar, EncryptedTensor
from concrete.numpy import tracing
OPERATIONS_TO_TEST = [ir.Add, ir.Sub, ir.Mul]
@pytest.mark.parametrize(
"operation",
OPERATIONS_TO_TEST,
)
@pytest.mark.parametrize(
"x",
[
pytest.param(EncryptedScalar(Integer(64, is_signed=False)), id="x: Encrypted uint"),
pytest.param(
EncryptedScalar(Integer(64, is_signed=True)),
id="x: Encrypted int",
),
pytest.param(
ClearScalar(Integer(64, is_signed=False)),
id="x: Clear uint",
),
pytest.param(
ClearScalar(Integer(64, is_signed=True)),
id="x: Clear int",
),
],
)
@pytest.mark.parametrize(
"y",
[
pytest.param(EncryptedScalar(Integer(64, is_signed=False)), id="y: Encrypted uint"),
pytest.param(
EncryptedScalar(Integer(64, is_signed=True)),
id="y: Encrypted int",
),
pytest.param(
ClearScalar(Integer(64, is_signed=False)),
id="y: Clear uint",
),
pytest.param(
ClearScalar(Integer(64, is_signed=True)),
id="y: Clear int",
),
],
)
def test_numpy_tracing_binary_op(operation, x, y, test_helpers):
"Test numpy tracing a binary operation (in the supported ops)"
# Remark that the functions here have a common structure (which is
# 2x op y), such that creating further the ref_graph is easy, by
# hand
def simple_add_function(x, y):
z = x + x
return z + y
def simple_sub_function(x, y):
z = x + x
return z - y
def simple_mul_function(x, y):
z = x + x
return z * y
assert operation in OPERATIONS_TO_TEST, f"unknown operation {operation}"
if operation == ir.Add:
function_to_compile = simple_add_function
elif operation == ir.Sub:
function_to_compile = simple_sub_function
elif operation == ir.Mul:
function_to_compile = simple_mul_function
op_graph = tracing.trace_numpy_function(function_to_compile, {"x": x, "y": y})
ref_graph = nx.MultiDiGraph()
input_x = ir.Input(x, input_name="x", program_input_idx=0)
input_y = ir.Input(y, input_name="y", program_input_idx=1)
add_node_z = ir.Add(
(
input_x.outputs[0],
input_x.outputs[0],
)
)
returned_final_node = operation(
(
add_node_z.outputs[0],
input_y.outputs[0],
)
)
ref_graph.add_node(input_x)
ref_graph.add_node(input_y)
ref_graph.add_node(add_node_z)
ref_graph.add_node(returned_final_node)
ref_graph.add_edge(input_x, add_node_z, input_idx=0, output_idx=0)
ref_graph.add_edge(input_x, add_node_z, input_idx=1, output_idx=0)
ref_graph.add_edge(add_node_z, returned_final_node, input_idx=0, output_idx=0)
ref_graph.add_edge(input_y, returned_final_node, input_idx=1, output_idx=0)
assert test_helpers.digraphs_are_equivalent(ref_graph, op_graph.graph)
def test_numpy_tracing_tensors():
"Test numpy tracing tensors"
def all_operations(x):
intermediate = x + numpy.array([[1, 2], [3, 4]])
intermediate = numpy.array([[5, 6], [7, 8]]) + intermediate
intermediate = numpy.array([[100, 200], [300, 400]]) - intermediate
intermediate = intermediate - numpy.array([[10, 20], [30, 40]])
intermediate = intermediate * numpy.array([[1, 2], [2, 1]])
intermediate = numpy.array([[2, 1], [1, 2]]) * intermediate
return intermediate
op_graph = tracing.trace_numpy_function(
all_operations, {"x": EncryptedTensor(Integer(32, True), shape=(2, 2))}
)
expected = """ %0 = [[2 1] [1 2]] # ClearTensor<uint2, shape=(2, 2)>
%1 = [[1 2] [2 1]] # ClearTensor<uint2, shape=(2, 2)>
%2 = [[10 20] [30 40]] # ClearTensor<uint6, shape=(2, 2)>
%3 = [[100 200] [300 400]] # ClearTensor<uint9, shape=(2, 2)>
%4 = [[5 6] [7 8]] # ClearTensor<uint4, shape=(2, 2)>
%5 = x # EncryptedTensor<int32, shape=(2, 2)>
%6 = [[1 2] [3 4]] # ClearTensor<uint3, shape=(2, 2)>
%7 = add(%5, %6) # EncryptedTensor<int32, shape=(2, 2)>
%8 = add(%4, %7) # EncryptedTensor<int32, shape=(2, 2)>
%9 = sub(%3, %8) # EncryptedTensor<int32, shape=(2, 2)>
%10 = sub(%9, %2) # EncryptedTensor<int32, shape=(2, 2)>
%11 = mul(%10, %1) # EncryptedTensor<int32, shape=(2, 2)>
%12 = mul(%0, %11) # EncryptedTensor<int32, shape=(2, 2)>
return %12""" # noqa: E501
assert format_operation_graph(op_graph) == expected, format_operation_graph(op_graph)
def test_numpy_explicit_tracing_tensors():
"Test numpy tracing tensors using explicit operations"
def all_explicit_operations(x):
intermediate = numpy.add(x, numpy.array([[1, 2], [3, 4]]))
intermediate = numpy.add(numpy.array([[5, 6], [7, 8]]), intermediate)
intermediate = numpy.subtract(numpy.array([[100, 200], [300, 400]]), intermediate)
intermediate = numpy.subtract(intermediate, numpy.array([[10, 20], [30, 40]]))
intermediate = numpy.multiply(intermediate, numpy.array([[1, 2], [2, 1]]))
intermediate = numpy.multiply(numpy.array([[2, 1], [1, 2]]), intermediate)
return intermediate
op_graph = tracing.trace_numpy_function(
all_explicit_operations, {"x": EncryptedTensor(Integer(32, True), shape=(2, 2))}
)
expected = """ %0 = [[2 1] [1 2]] # ClearTensor<uint2, shape=(2, 2)>
%1 = [[1 2] [2 1]] # ClearTensor<uint2, shape=(2, 2)>
%2 = [[10 20] [30 40]] # ClearTensor<uint6, shape=(2, 2)>
%3 = [[100 200] [300 400]] # ClearTensor<uint9, shape=(2, 2)>
%4 = [[5 6] [7 8]] # ClearTensor<uint4, shape=(2, 2)>
%5 = x # EncryptedTensor<int32, shape=(2, 2)>
%6 = [[1 2] [3 4]] # ClearTensor<uint3, shape=(2, 2)>
%7 = add(%5, %6) # EncryptedTensor<int32, shape=(2, 2)>
%8 = add(%4, %7) # EncryptedTensor<int32, shape=(2, 2)>
%9 = sub(%3, %8) # EncryptedTensor<int32, shape=(2, 2)>
%10 = sub(%9, %2) # EncryptedTensor<int32, shape=(2, 2)>
%11 = mul(%10, %1) # EncryptedTensor<int32, shape=(2, 2)>
%12 = mul(%0, %11) # EncryptedTensor<int32, shape=(2, 2)>
return %12""" # noqa: E501
assert format_operation_graph(op_graph) == expected
@pytest.mark.parametrize(
"x_shape,y_shape",
[
pytest.param((), ()),
pytest.param((3,), ()),
pytest.param((3,), (1,)),
pytest.param((3,), (2,), marks=pytest.mark.xfail(raises=AssertionError, strict=True)),
pytest.param((3,), (3,)),
pytest.param((2, 3), ()),
pytest.param((2, 3), (1,)),
pytest.param((2, 3), (2,), marks=pytest.mark.xfail(raises=AssertionError, strict=True)),
pytest.param((2, 3), (3,)),
pytest.param((2, 3), (1, 1)),
pytest.param((2, 3), (2, 1)),
pytest.param((2, 3), (3, 1), marks=pytest.mark.xfail(raises=AssertionError, strict=True)),
pytest.param((2, 3), (1, 2), marks=pytest.mark.xfail(raises=AssertionError, strict=True)),
pytest.param((2, 3), (2, 2), marks=pytest.mark.xfail(raises=AssertionError, strict=True)),
pytest.param((2, 3), (3, 2), marks=pytest.mark.xfail(raises=AssertionError, strict=True)),
pytest.param((2, 3), (1, 3)),
pytest.param((2, 3), (2, 3)),
pytest.param((2, 3), (3, 3), marks=pytest.mark.xfail(raises=AssertionError, strict=True)),
pytest.param((2, 1, 3), (1, 1, 1)),
pytest.param((2, 1, 3), (1, 4, 1)),
pytest.param((2, 1, 3), (2, 4, 3)),
],
)
def test_numpy_tracing_broadcasted_tensors(x_shape, y_shape):
"""Test numpy tracing broadcasted tensors"""
def f(x, y):
return x + y
op_graph = tracing.trace_numpy_function(
f,
{
"x": EncryptedTensor(Integer(3, True), shape=x_shape),
"y": EncryptedTensor(Integer(3, True), shape=y_shape),
},
)
assert op_graph.input_nodes[0].outputs[0].shape == x_shape
assert op_graph.input_nodes[1].outputs[0].shape == y_shape
assert op_graph.output_nodes[0].outputs[0].shape == broadcast_shapes(x_shape, y_shape)
@pytest.mark.parametrize(
"function_to_trace,op_graph_expected_output_type,input_and_expected_output_tuples",
[
(
lambda x: x.astype(numpy.int32),
Integer(32, is_signed=True),
[
(14, numpy.int32(14)),
(1.5, numpy.int32(1)),
(2.0, numpy.int32(2)),
(-1.5, numpy.int32(-1)),
(2 ** 31 - 1, numpy.int32(2 ** 31 - 1)),
(-(2 ** 31), numpy.int32(-(2 ** 31))),
],
),
(
lambda x: x.astype(numpy.uint32),
Integer(32, is_signed=False),
[
(14, numpy.uint32(14)),
(1.5, numpy.uint32(1)),
(2.0, numpy.uint32(2)),
(2 ** 32 - 1, numpy.uint32(2 ** 32 - 1)),
],
),
(
lambda x: x.astype(numpy.int64),
Integer(64, is_signed=True),
[
(14, numpy.int64(14)),
(1.5, numpy.int64(1)),
(2.0, numpy.int64(2)),
(-1.5, numpy.int64(-1)),
(2 ** 63 - 1, numpy.int64(2 ** 63 - 1)),
(-(2 ** 63), numpy.int64(-(2 ** 63))),
],
),
(
lambda x: x.astype(numpy.uint64),
Integer(64, is_signed=False),
[
(14, numpy.uint64(14)),
(1.5, numpy.uint64(1)),
(2.0, numpy.uint64(2)),
(2 ** 64 - 1, numpy.uint64(2 ** 64 - 1)),
],
),
(
lambda x: x.astype(numpy.float64),
Float(64),
[
(14, numpy.float64(14.0)),
(1.5, numpy.float64(1.5)),
(2.0, numpy.float64(2.0)),
(-1.5, numpy.float64(-1.5)),
],
),
(
lambda x: x.astype(numpy.float32),
Float(32),
[
(14, numpy.float32(14.0)),
(1.5, numpy.float32(1.5)),
(2.0, numpy.float32(2.0)),
(-1.5, numpy.float32(-1.5)),
],
),
],
)
def test_tracing_astype(
function_to_trace, op_graph_expected_output_type, input_and_expected_output_tuples
):
"""Test function for NPTracer.astype"""
for input_, expected_output in input_and_expected_output_tuples:
input_value = (
EncryptedScalar(Integer(64, is_signed=True))
if isinstance(input_, int)
else EncryptedScalar(Float(64))
)
op_graph = tracing.trace_numpy_function(function_to_trace, {"x": input_value})
output_node = op_graph.output_nodes[0]
assert op_graph_expected_output_type == output_node.outputs[0].dtype
node_results = op_graph.evaluate({0: numpy.array(input_)})
evaluated_output = node_results[output_node]
assert evaluated_output.dtype == expected_output.dtype
assert expected_output == evaluated_output
def test_tracing_astype_single_element_array_corner_case(check_array_equality):
"""Test corner case where an array could be transformed to its scalar element"""
a = numpy.array([1], dtype=numpy.float64)
op_graph = tracing.trace_numpy_function(
lambda x: x.astype(numpy.int32), {"x": EncryptedTensor(Float(64), (1,))}
)
eval_result = op_graph(a)
check_array_equality(eval_result, numpy.array([1], dtype=numpy.int32))
@pytest.mark.parametrize(
"function_to_trace,inputs,expected_output_node,expected_output_value",
[
pytest.param(
lambda x, y: numpy.dot(x, y),
{
"x": EncryptedTensor(Integer(7, is_signed=False), shape=(10,)),
"y": EncryptedTensor(Integer(7, is_signed=False), shape=(10,)),
},
ir.Dot,
EncryptedScalar(Integer(32, False)),
),
pytest.param(
lambda x, y: numpy.dot(x, y),
{
"x": EncryptedTensor(Float(64), shape=(10,)),
"y": EncryptedTensor(Float(64), shape=(10,)),
},
ir.Dot,
EncryptedScalar(Float(64)),
),
pytest.param(
lambda x, y: numpy.dot(x, y),
{
"x": ClearTensor(Integer(64, is_signed=True), shape=(6,)),
"y": ClearTensor(Integer(64, is_signed=True), shape=(6,)),
},
ir.Dot,
ClearScalar(Integer(64, is_signed=True)),
),
pytest.param(
lambda x: numpy.dot(x, numpy.array([1, 2, 3, 4, 5], dtype=numpy.int64)),
{
"x": EncryptedTensor(Integer(64, is_signed=True), shape=(5,)),
},
ir.Dot,
EncryptedScalar(Integer(64, True)),
),
pytest.param(
lambda x: x.dot(numpy.array([1, 2, 3, 4, 5], dtype=numpy.int64)),
{
"x": EncryptedTensor(Integer(64, is_signed=True), shape=(5,)),
},
ir.Dot,
EncryptedScalar(Integer(64, True)),
),
],
)
def test_trace_numpy_dot(function_to_trace, inputs, expected_output_node, expected_output_value):
"""Function to test dot tracing"""
op_graph = tracing.trace_numpy_function(function_to_trace, inputs)
assert len(op_graph.output_nodes) == 1
assert isinstance(op_graph.output_nodes[0], expected_output_node)
assert len(op_graph.output_nodes[0].outputs) == 1
assert op_graph.output_nodes[0].outputs[0] == expected_output_value
@pytest.mark.parametrize("np_function", tracing.NPTracer.LIST_OF_SUPPORTED_UFUNC)
def test_nptracer_get_tracing_func_for_np_functions(np_function):
"""Test NPTracer get_tracing_func_for_np_function"""
expected_tracing_func = tracing.NPTracer.UFUNC_ROUTING[np_function]
assert tracing.NPTracer.get_tracing_func_for_np_function(np_function) == expected_tracing_func
def test_nptracer_get_tracing_func_for_np_functions_not_implemented():
"""Check NPTracer in case of not-implemented function"""
with pytest.raises(NotImplementedError) as excinfo:
tracing.NPTracer.get_tracing_func_for_np_function(numpy.conjugate)
assert "NPTracer does not yet manage the following func: conjugate" in str(excinfo.value)
@pytest.mark.parametrize(
"operation,exception_type,match",
[
pytest.param(
lambda x: x + "fail",
TypeError,
"unsupported operand type(s) for +: 'NPTracer' and 'str'",
),
pytest.param(
lambda x: "fail" + x,
TypeError,
'can only concatenate str (not "NPTracer") to str',
),
pytest.param(
lambda x: x - "fail",
TypeError,
"unsupported operand type(s) for -: 'NPTracer' and 'str'",
),
pytest.param(
lambda x: "fail" - x,
TypeError,
"unsupported operand type(s) for -: 'str' and 'NPTracer'",
),
pytest.param(
lambda x: x * "fail",
TypeError,
"can't multiply sequence by non-int of type 'NPTracer'",
),
pytest.param(
lambda x: "fail" * x,
TypeError,
"can't multiply sequence by non-int of type 'NPTracer'",
),
pytest.param(
lambda x: x / "fail",
TypeError,
"unsupported operand type(s) for /: 'NPTracer' and 'str'",
),
pytest.param(
lambda x: "fail" / x,
TypeError,
"unsupported operand type(s) for /: 'str' and 'NPTracer'",
),
pytest.param(
lambda x: x // "fail",
TypeError,
"unsupported operand type(s) for //: 'NPTracer' and 'str'",
),
pytest.param(
lambda x: "fail" // x,
TypeError,
"unsupported operand type(s) for //: 'str' and 'NPTracer'",
),
pytest.param(
lambda x, y: x / y, NotImplementedError, "Can't manage binary operator truediv"
),
pytest.param(
lambda x, y: x // y, NotImplementedError, "Can't manage binary operator floordiv"
),
],
)
def test_nptracer_unsupported_operands(operation, exception_type, match):
"""Test cases where NPTracer cannot be used with other operands."""
tracers = [
tracing.NPTracer([], ir.Input(ClearScalar(Integer(32, True)), param_name, idx), 0)
for idx, param_name in enumerate(inspect.signature(operation).parameters.keys())
]
with pytest.raises(exception_type) as exc_info:
_ = operation(*tracers)
assert match in str(exc_info)
def subtest_tracing_calls(
function_to_trace,
input_value_input_and_expected_output_tuples,
check_array_equality,
):
"""Test memory function managed by GenericFunction node of the form numpy.something"""
for input_value, input_, expected_output in input_value_input_and_expected_output_tuples:
op_graph = tracing.trace_numpy_function(function_to_trace, {"x": input_value})
output_node = op_graph.output_nodes[0]
node_results = op_graph.evaluate({0: input_})
evaluated_output = node_results[output_node]
assert isinstance(evaluated_output, type(expected_output)), type(evaluated_output)
check_array_equality(evaluated_output, expected_output)
@pytest.mark.parametrize(
"function_to_trace,input_value_input_and_expected_output_tuples",
[
(
lambda x: numpy.transpose(x),
[
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4).reshape(2, 2),
numpy.array([[0, 2], [1, 3]]),
),
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4, 8).reshape(2, 2),
numpy.array([[4, 6], [5, 7]]),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.int64(42),
),
],
),
(
lambda x: numpy.transpose(x) + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(3, 5).transpose(),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.int64(84),
),
],
),
(
lambda x: numpy.ravel(x),
[
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4).reshape(2, 2),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.array([42], dtype=numpy.int64),
),
],
),
(
lambda x: numpy.reshape(x, (5, 3)) + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(5, 3),
),
],
),
],
)
def test_tracing_numpy_calls(
function_to_trace,
input_value_input_and_expected_output_tuples,
check_array_equality,
):
"""Test memory function managed by GenericFunction node of the form numpy.something"""
subtest_tracing_calls(
function_to_trace, input_value_input_and_expected_output_tuples, check_array_equality
)
@pytest.mark.parametrize(
"function_to_trace,input_value_input_and_expected_output_tuples",
[
(
lambda x: x.transpose() + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(3, 5).transpose(),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.int64(84),
),
],
),
(
lambda x: x.ravel(),
[
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(4, is_signed=False), shape=(2, 2)),
numpy.arange(4).reshape(2, 2),
numpy.array([0, 1, 2, 3]),
),
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
numpy.array([42], dtype=numpy.int64),
),
],
),
(
lambda x: x.reshape((5, 3)) + 42,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(42, 57).reshape(5, 3),
),
],
),
pytest.param(
lambda x: x.reshape((5, 3)),
[
(
EncryptedTensor(Integer(6, is_signed=False), shape=()),
numpy.int64(42),
None,
)
],
marks=pytest.mark.xfail(strict=True, raises=ValueError),
),
pytest.param(
lambda x: x.flatten(),
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15),
)
],
),
pytest.param(
lambda x: abs(x),
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5),
)
],
),
pytest.param(
lambda x: +x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5),
)
],
),
pytest.param(
lambda x: -x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
(numpy.arange(15).reshape(3, 5)) * (-1),
)
],
),
pytest.param(
lambda x: ~x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5).__invert__(),
)
],
),
pytest.param(
lambda x: x << 3,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) * 8,
)
],
),
pytest.param(
lambda x: x >> 1,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) // 2,
)
],
),
pytest.param(
lambda x: 2 << x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5) % 8,
2 << (numpy.arange(15).reshape(3, 5) % 8),
)
],
),
pytest.param(
lambda x: 256 >> x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5) % 8,
256 >> (numpy.arange(15).reshape(3, 5) % 8),
)
],
),
pytest.param(
lambda x: x > 4,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) > 4,
)
],
),
pytest.param(
lambda x: x < 5,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) < 5,
)
],
),
pytest.param(
lambda x: x <= 7,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) <= 7,
)
],
),
pytest.param(
lambda x: x >= 9,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) >= 9,
)
],
),
pytest.param(
lambda x: x == 11,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) == 11,
)
],
),
pytest.param(
lambda x: x != 12,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.arange(15).reshape(3, 5) != 12,
)
],
),
# Remove misplaced-comparison-constant because precisely, we want to be sure it works fine
# pylint: disable=misplaced-comparison-constant
pytest.param(
lambda x: 4 > x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
4 > numpy.arange(15).reshape(3, 5),
)
],
),
pytest.param(
lambda x: 5 < x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
5 < numpy.arange(15).reshape(3, 5),
)
],
),
pytest.param(
lambda x: 7 <= x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
7 <= numpy.arange(15).reshape(3, 5),
)
],
),
pytest.param(
lambda x: 9 >= x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
9 >= numpy.arange(15).reshape(3, 5),
)
],
),
pytest.param(
lambda x: 11 == x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
11 == numpy.arange(15).reshape(3, 5),
)
],
),
pytest.param(
lambda x: 12 != x,
[
(
EncryptedTensor(Integer(32, is_signed=True), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
12 != numpy.arange(15).reshape(3, 5),
)
],
),
# pylint: enable=misplaced-comparison-constant
(
lambda x: x & 11,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i & 11 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: 13 & x,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i & 13 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: x | 6,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i | 6 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: 30 | x,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i | 30 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: x ^ 91,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i ^ 91 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: 115 ^ x,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i ^ 115 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: x % 11,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i % 11 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: 150 % (x + 1),
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([150 % (i + 1) for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: x ** 2,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5),
numpy.array([i ** 2 for i in range(15)]).reshape(3, 5),
),
],
),
(
lambda x: 2 ** x,
[
(
EncryptedTensor(Integer(32, is_signed=False), shape=(3, 5)),
numpy.arange(15).reshape(3, 5) % 7,
numpy.array([2 ** (i % 7) for i in range(15)]).reshape(3, 5),
),
],
),
],
)
def test_tracing_ndarray_calls(
function_to_trace,
input_value_input_and_expected_output_tuples,
check_array_equality,
):
"""Test memory function managed by GenericFunction node of the form ndarray.something"""
subtest_tracing_calls(
function_to_trace, input_value_input_and_expected_output_tuples, check_array_equality
)
@pytest.mark.parametrize(
"lambda_f,params",
[
(
lambda x: numpy.reshape(x, (5, 3)),
{
"x": EncryptedTensor(Integer(2, is_signed=False), shape=(7, 5)),
},
),
],
)
def test_errors_with_generic_function(lambda_f, params):
"Test some errors with generic function"
with pytest.raises(ValueError) as excinfo:
tracing.trace_numpy_function(lambda_f, params)
assert "shapes are not compatible (old shape (7, 5), new shape (5, 3))" in str(excinfo.value)
|
py | b4050a702ad8aeea87f304ec90fe15da7257390a | # Installing Libraries
import uvicorn
from fastapi import FastAPI
from BankNotes import BankNote
import pickle
# Creating a FastAPI Instance
app = FastAPI()
# Reading in the Pickle File
model = pickle.load(open('classifier.pkl', 'rb'))
# Index Route
@app.get('/')
async def index():
return {'message': 'My First FastAPI ML Deployment Implementation!!'}
# Route Path with Parameter
@app.get('/{name}')
async def hello(name: str):
return {'message': f'Hello, {name}!'}
# Predict Route Path with Parameter
"""
Will make a prediction from the passed JSON data and return the prediction with the confidence.
"""
@app.post('/predict')
async def predict(data: BankNote):
data = data.dict()
variance = data['variance']
skewness = data['skewness']
curtosis = data['curtosis']
entropy = data['entropy']
predictedValue = model.predict_proba([[variance, skewness, curtosis, entropy]])[:, 1][0]
if predictedValue < 0.5:
prediction = "Fake Note"
else:
prediction = "Authentic Bank Note"
return {
'prediction': prediction
}
# Run the API with `uvicorn`
if __name__ == '__main__':
# CLI: uvicorn app:app --reload --port 5000
uvicorn.run(app, host = '127.0.0.1', port = 5000, reload = True)
|
py | b4050ae3f0a3c202c44bc95f60472c04d14a4912 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import re
def trn_transcript(utterance_id, transcript):
words = []
for token in transcript:
words.append(trn_alternation(token))
words.append('(' + utterance_id + ')')
return ' '.join(words)
def trn_alternation(alternation):
words = []
if type(alternation) is list:
words.append('{')
words.append(' / '.join([' '.join(x) for x in alternation]))
words.append('}')
else:
words.append(alternation)
return ' '.join(words)
class Transcripts:
'''
A transcript database.
'''
def __init__(self):
self.__transcripts = dict()
def __iter__(self):
for utterance_id, transcript in self.__transcripts.items():
yield utterance_id, transcript
def read_trn(self, input_file):
for line in input_file:
utterance_end = line.index('(')
id_start = utterance_end + 1
id_end = line.index(')', id_start)
utterance = line[:utterance_end].strip()
utterance_id = line[id_start:id_end].strip()
self.set_utterance(utterance_id, utterance)
def write_trn(self, output_file):
for utterance_id, transcript in self.__transcripts.items():
line = trn_transcript(utterance_id, transcript)
output_file.write(line.encode('utf-8') + '\n')
def set_utterance(self, utterance_id, utterance):
transcript = []
pos = 0
while pos < len(utterance):
alt_pos = utterance.find('{', pos)
if alt_pos == -1:
transcript.extend(utterance[pos:].split())
break
transcript.extend(utterance[pos:alt_pos].split())
alt_pos += 1
alt_end = utterance.find('}', alt_pos)
alternation = utterance[alt_pos:alt_end].split('/')
alternation = [x.split() for x in alternation]
transcript.append(alternation)
pos = alt_end + 1
self.__transcripts[utterance_id] = transcript
def set_transcript(self, utterance_id, transcript):
self.__transcripts[utterance_id] = transcript
|
py | b4050bf37d79716fd6fab488daec238243944fc6 | from local_data_api.settings import setup
def test_setup_mysql(mocker) -> None:
mock_register_secret = mocker.patch('local_data_api.settings.register_secret')
mock_register_resource = mocker.patch('local_data_api.settings.register_resource')
setup()
mock_register_secret.assert_called_with(
'root', 'example', 'arn:aws:secretsmanager:us-east-1:123456789012:secret:dummy'
)
mock_register_resource.assert_called_with(
'arn:aws:rds:us-east-1:123456789012:cluster:dummy',
'MySQLJDBC',
'127.0.0.1',
3306,
'root',
'example',
{'JAR_PATH': '/usr/lib/jvm/mariadb-java-client.jar'},
)
def test_setup_postgres(mocker) -> None:
mock_register_secret = mocker.patch('local_data_api.settings.register_secret')
mock_register_resource = mocker.patch('local_data_api.settings.register_resource')
import os
os.environ['ENGINE'] = 'PostgreSQLJDBC'
setup()
mock_register_secret.assert_called_with(
'postgres',
'example',
'arn:aws:secretsmanager:us-east-1:123456789012:secret:dummy',
)
mock_register_resource.assert_called_with(
'arn:aws:rds:us-east-1:123456789012:cluster:dummy',
'PostgreSQLJDBC',
'127.0.0.1',
5432,
'postgres',
'example',
{'JAR_PATH': '/usr/lib/jvm/postgresql-java-client.jar'},
)
|
py | b4050ca0b590d52b1fb489673192aa5bd03a24ea | import ujson
from configuration import *
from log import *
state = {}
# state entries contain tag info and values
# source.measurement => value
def build_state_entry(time, iteration):
return {
'meta.time': time,
'meta.iteration': iteration,
}
def set_measurement(state_entry, sensor, measurement, value):
state_entry[sensor +'.'+ measurement] = value
def set_meta(state_entry, name, value):
state_entry['meta.'+ name] = value
def append_state_entry(state_entry):
global state
state['entries'].append(state_entry)
def get_state_entries():
return state['entries']
def state_entry_count():
return len(state['entries'])
def init_state():
log_debug('init state')
truncate_state()
def state_entry_to_string(state_entry):
return ujson.dumps(state_entry)
def truncate_state():
global state
state['entries'] = []
# does not obey log setting
def print_state():
global state
print('state')
print(ujson.dumps(state))
|
py | b4050d7b476c9fa57a45d0594322f2b31c672820 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A setup module for the Google Ads API client library."""
from setuptools import setup, find_packages
import io
install_requires = [
"dataclasses >= 0.6, < 1.0.0; python_version < '3.7'",
"google-auth-oauthlib >= 0.3.0, < 1.0.0",
"google-api-core >= 1.21.0, < 2.0.0",
"googleapis-common-protos >= 1.5.8, < 2.0.0",
"grpcio >= 1.33.2, < 2.0.0",
"proto-plus >= 1.18.0, < 2.0.0",
"PyYAML >= 5.1, < 6.0",
"setuptools >= 40.3.0",
"pep562 >= 1.0, < 2.0",
"nox == 2020.12.31",
]
with io.open("README.rst", "r", encoding="utf-8") as readme_file:
long_description = readme_file.read()
setup(
name="google-ads",
version="11.0.2",
author="Google LLC",
author_email="[email protected]",
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
description="Client library for the Google Ads API",
include_package_data=True,
python_requires=">=3.6",
long_description=long_description,
install_requires=install_requires,
license="Apache 2.0",
packages=find_packages(
exclude=["examples", "examples.*", "tests", "tests.*"]
),
namespace_packages=["google", "google.ads"],
url="https://github.com/googleads/google-ads-python",
zip_safe=False,
)
|
py | b4050ed883d473a6c6d516ce9abfbf88cbdee0f0 | """pymoku example: Basic IIR Filter Box
This example demonstrates how you can configure the IIR Filter instrument,
configure real-time monitoring of the input and output signals.
(c) 2019 Liquid Instruments Pty. Ltd.
"""
from pymoku import Moku
from pymoku.instruments import IIRFilterBox
# This script provides a basic example showing how to load coefficients from an
# array into the IIRFilterBox.
# The following example array produces an 8th order Direct-form 1 Chebyshev
# type 2 IIR filter with a normalized stopband frequency of 0.2 pi rad/sample
# and a stopband attenuation of 40 dB. Output gain is set to 1.0. See the
# IIRFilterBox documentation for array dimension specifics.
filt_coeff = [
[
1.0
], [
1.0000000000, 0.6413900006, -1.0290561741,
0.6413900006, -1.6378425857, 0.8915664128
], [
1.0000000000, 0.5106751138, -0.7507394931,
0.5106751138, -1.4000444473, 0.6706551819
], [
1.0000000000, 0.3173108134, -0.3111365531,
0.3173108134, -1.0873085012, 0.4107935750
], [
1.0000000000, 0.1301131088, 0.1223154629,
0.1301131088, -0.7955572476, 0.1780989281
]
]
m = Moku.get_by_name('Moku')
try:
i = m.deploy_or_connect(IIRFilterBox)
i.set_frontend(1, fiftyr=True, atten=False, ac=False)
i.set_frontend(2, fiftyr=True, atten=False, ac=False)
# Both filters have the same coefficients, but the different sampling rates
# mean the resultant transfer functions will be different by a factor of
# 128 (the ratio of sampling rates)
i.set_filter(1, sample_rate='high', filter_coefficients=filt_coeff)
i.set_filter(2, sample_rate='low', filter_coefficients=filt_coeff)
# Offset filter channel 1 input by 0.1V
i.set_gains_offsets(1, input_offset=0.1)
# Filter channel 2 acts on sum of input 1 and 2
i.set_control_matrix(2, scale_in1=0.5, scale_in2=0.5)
# Set the monitor timebase to +-1msec
i.set_timebase(-1e-3, 1e-3)
# Set up monitoring of the input and output of the second filter channel.
i.set_monitor('a', 'in2')
i.set_monitor('b', 'out2')
# Capture and print one set of time-domain input and output points
d = i.get_realtime_data()
print(d.ch1, d.ch2)
finally:
m.close()
|
py | b4050f6ff522bdbbf4b7fb09fee6fe8d2f117012 | # Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The intent service interface offers a unified wrapper class for the
Intent Service. Including both adapt and padatious.
"""
from os.path import exists
from adapt.intent import Intent
from mycroft.messagebus.message import Message
class IntentServiceInterface:
"""Interface to communicate with the Mycroft intent service.
This class wraps the messagebus interface of the intent service allowing
for easier interaction with the service. It wraps both the Adapt and
Precise parts of the intent services.
"""
def __init__(self, bus=None):
self.bus = bus
self.registered_intents = []
def set_bus(self, bus):
self.bus = bus
def register_adapt_keyword(self, vocab_type, entity, aliases=None):
"""Send a message to the intent service to add an Adapt keyword.
vocab_type(str): Keyword reference
entity (str): Primary keyword
aliases (list): List of alternative kewords
"""
aliases = aliases or []
self.bus.emit(Message("register_vocab",
{'start': entity, 'end': vocab_type}))
for alias in aliases:
self.bus.emit(Message("register_vocab", {
'start': alias, 'end': vocab_type, 'alias_of': entity
}))
def register_adapt_regex(self, regex):
"""Register a regex with the intent service.
Arguments:
regex (str): Regex to be registered, (Adapt extracts keyword
reference from named match group.
"""
self.bus.emit(Message("register_vocab", {'regex': regex}))
def register_adapt_intent(self, name, intent_parser):
"""Register an Adapt intent parser object.
Serializes the intent_parser and sends it over the messagebus to
registered.
"""
self.bus.emit(Message("register_intent", intent_parser.__dict__))
self.registered_intents.append((name, intent_parser))
def detach_intent(self, intent_name):
"""Remove an intent from the intent service.
Arguments:
intent_name(str): Intent reference
"""
self.bus.emit(Message("detach_intent", {"intent_name": intent_name}))
def set_adapt_context(self, context, word, origin):
"""Set an Adapt context.
Arguments:
context (str): context keyword name
word (str): word to register
origin (str): original origin of the context (for cross context)
"""
self.bus.emit(Message('add_context',
{'context': context, 'word': word,
'origin': origin}))
def remove_adapt_context(self, context):
"""Remove an active Adapt context.
Arguments:
context(str): name of context to remove
"""
self.bus.emit(Message('remove_context', {'context': context}))
def register_padatious_intent(self, intent_name, filename):
"""Register a padatious intent file with Padatious.
Arguments:
intent_name(str): intent identifier
filename(str): complete file path for entity file
"""
if not isinstance(filename, str):
raise ValueError('Filename path must be a string')
if not exists(filename):
raise FileNotFoundError('Unable to find "{}"'.format(filename))
data = {"file_name": filename,
"name": intent_name}
self.bus.emit(Message("padatious:register_intent", data))
self.registered_intents.append((intent_name, data))
def register_padatious_entity(self, entity_name, filename):
"""Register a padatious entity file with Padatious.
Arguments:
entity_name(str): entity name
filename(str): complete file path for entity file
"""
if not isinstance(filename, str):
raise ValueError('Filename path must be a string')
if not exists(filename):
raise FileNotFoundError('Unable to find "{}"'.format(filename))
self.bus.emit(Message('padatious:register_entity', {
'file_name': filename,
'name': entity_name
}))
def __iter__(self):
"""Iterator over the registered intents.
Returns an iterator returning name-handler pairs of the registered
intent handlers.
"""
return iter(self.registered_intents)
def __contains__(self, val):
"""Checks if an intent name has been registered."""
return val in [i[0] for i in self.registered_intents]
def get_intent(self, intent_name):
"""Get intent from intent_name.
Arguments:
intent_name (str): name to find.
Returns:
Found intent or None if none were found.
"""
for name, intent in self:
if name == intent_name:
return intent
else:
return None
def open_intent_envelope(message):
"""Convert dictionary received over messagebus to Intent."""
intent_dict = message.data
return Intent(intent_dict.get('name'),
intent_dict.get('requires'),
intent_dict.get('at_least_one'),
intent_dict.get('optional'))
|
py | b405127739ccc5d8151f0508183a90cda5c91ebf | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2016 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
class WeekDaysFiller(object):
'''Bar Filler to add missing calendar days to trading days'''
# kickstart value for date comparisons
lastdt = datetime.datetime.max.toordinal()
def __init__(self, data, fillclose=False):
self.fillclose = fillclose
self.voidbar = [float('Nan')] * data.size() # init a void bar
def __call__(self, data):
'''Empty bars (NaN) or with last close price are added for weekdays with no
data
Params:
- data: the data source to filter/process
Returns:
- True (always): bars are removed (even if put back on the stack)
'''
dt = data.datetime.dt() # current date in int format
lastdt = self.lastdt + 1 # move the last seen data once forward
while lastdt < dt: # loop over gap bars
if datetime.date.fromordinal(lastdt).isoweekday() < 6: # Mon-Fri
# Fill in date and add new bar to the stack
if self.fillclose:
self.voidbar = [self.lastclose] * data.size()
self.voidbar[-1] = float(lastdt) + data.sessionend
data._add2stack(self.voidbar[:])
lastdt += 1 # move lastdt forward
self.lastdt = dt # keep a record of the last seen date
self.lastclose = data.close[0]
data._save2stack(erase=True) # dt bar to the stack and out of stream
return True # bars are on the stack (new and original)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.